diff --git a/.deepsource.toml b/.deepsource.toml index d6588c28b8..ba4d17057f 100644 --- a/.deepsource.toml +++ b/.deepsource.toml @@ -15,6 +15,11 @@ # version = 1 +exclude_patterns = [ + "**/*.pb.go", + "example/**" +] + [[analyzers]] name = "go" enabled = true diff --git a/.github/conflint.yaml b/.github/conflint.yaml index d815c5fa34..f4c2068f82 100644 --- a/.github/conflint.yaml +++ b/.github/conflint.yaml @@ -1,5 +1,5 @@ kubeval: - files: - - k8s/**/*.yaml + - k8s/**/*.yaml strict: true ignoreMissingSchemas: true diff --git a/.github/labeler.yml b/.github/labeler.yml index d94f9641d9..e5f8cd243c 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,16 +1,22 @@ area/agent/core: + - apis/proto/v1/agent/core/**/* + - apis/grpc/v1/agent/core/**/* - apis/proto/agent/core/**/* - apis/grpc/agent/core/**/* - cmd/agent/core/**/* - pkg/agent/core/**/* area/agent/sidecar: + - apis/proto/v1/agent/sidecar/**/* + - apis/grpc/v1/agent/sidecar/**/* - apis/proto/agent/sidecar/**/* - apis/grpc/agent/sidecar/**/* - cmd/agent/sidecar/**/* - pkg/agent/sidecar/**/* area/discoverer: + - apis/proto/v1/discoverer/**/* + - apis/grpc/v1/discoverer/**/* - apis/proto/discoverer/**/* - apis/grpc/discoverer/**/* - cmd/discoverer/**/* @@ -29,10 +35,40 @@ area/filter/ingress: - pkg/filter/ingress/**/* area/gateway: - - apis/proto/vald/**/* - - apis/grpc/vald/**/* - - cmd/gateway/**/* - - pkg/gateway/**/* + - apis/proto/v1/gateway/vald/**/* + - apis/grpc/v1/gateway/vald/**/* + - apis/proto/gateway/vald/**/* + - apis/grpc/gateway/vald/**/* + - cmd/gateway/vald/**/* + - pkg/gateway/vald/**/* + +area/gateway/lb: + - apis/proto/v1/payload/**/* + - apis/proto/v1/vald/**/* + - apis/grpc/v1/vald/**/* + - cmd/gateway/lb/**/* + - pkg/gateway/lb/**/* + +area/gateway/meta: + - apis/proto/v1/payload/**/* + - apis/proto/v1/vald/**/* + - apis/grpc/v1/vald/**/* + - cmd/gateway/meta/**/* + - pkg/gateway/meta/**/* + +area/gateway/backup: + - apis/proto/v1/payload/**/* + - apis/proto/v1/vald/**/* + - apis/grpc/v1/vald/**/* + - cmd/gateway/backup/**/* + - pkg/gateway/backup/**/* + +area/gateway/filter: + - apis/proto/v1/payload/**/* + - apis/proto/v1/vald/**/* + - apis/grpc/v1/vald/**/* + - cmd/gateway/filter/**/* + - pkg/gateway/filter/**/* area/manager/backup: - apis/proto/manager/backup/**/* diff --git a/.github/workflows/dockers-gateway-backup-image.yml b/.github/workflows/dockers-gateway-backup-image.yml new file mode 100755 index 0000000000..9469d834b2 --- /dev/null +++ b/.github/workflows/dockers-gateway-backup-image.yml @@ -0,0 +1,165 @@ +name: "Build docker image: gateway-backup" +on: + push: + branches: + - master + tags: + - "*.*.*" + - "v*.*.*" + - "*.*.*-*" + - "v*.*.*-*" + paths: + - "go.mod" + - "go.sum" + - "internal/**" + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - "apis/grpc/**" + - "pkg/gateway/backup/**" + - "cmd/gateway/backup/**" + - "pkg/gateway/internal/**" + - "dockers/base/Dockerfile" + - "dockers/gateway/backup/Dockerfile" + - "versions/GO_VERSION" + pull_request: + paths: + - "go.mod" + - "go.sum" + - "internal/**" + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - "apis/grpc/**" + - "pkg/gateway/backup/**" + - "cmd/gateway/backup/**" + - "pkg/gateway/internal/**" + - "dockers/base/Dockerfile" + - "dockers/gateway/backup/Dockerfile" + - "versions/GO_VERSION" + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Setup QEMU + uses: docker/setup-qemu-action@v1 + with: + platforms: all + - name: Setup Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v1 + with: + buildkitd-flags: "--debug" + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-vald-gateway-backup-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx-vald-gateway-backup- + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_PASS }} + - name: Login to GitHub Container Registry + uses: docker/login-action@v1 + with: + registry: ghcr.io + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} + - name: Image name + id: image_name + run: | + image_name=`make docker/name/gateway-backup` + base_platforms=`make docker/platforms` + echo "IMAGE_NAME=${image_name}" >> $GITHUB_ENV + echo "::set-output name=IMAGE_NAME::${image_name}" + echo "::set-output name=BASE_PLATFORMS::${base_platforms}" + - name: Determine tag name (master) + if: github.ref == 'refs/heads/master' + run: | + echo "PRIMARY_TAG=nightly" >> $GITHUB_ENV + echo "PLATFORMS=${PLATFORMS}" >> $GITHUB_ENV + env: + PLATFORMS: ${{ steps.image_name.outputs.BASE_PLATFORMS }} + - name: Determine tag name (pull request) + if: github.event_name == 'pull_request' + run: | + pr_num=`cat $GITHUB_EVENT_PATH | jq -r ".number"` + echo "PR-${pr_num}" > versions/VALD_VERSION + echo "PRIMARY_TAG=pr-${pr_num}" >> $GITHUB_ENV + echo "PLATFORMS=${PLATFORMS}" >> $GITHUB_ENV + env: + PLATFORMS: linux/amd64 + - name: Determine tag name (tags) + if: startsWith( github.ref, 'refs/tags/') + id: determine_tag + run: | + tag_name=`echo $GITHUB_REF | sed -e 's:^refs/tags/::'` + echo "::set-output name=TAG_NAME::${tag_name}" + echo "PRIMARY_TAG=${tag_name}" >> $GITHUB_ENV + echo "PLATFORMS=${PLATFORMS}" >> $GITHUB_ENV + env: + PLATFORMS: ${{ steps.image_name.outputs.BASE_PLATFORMS }} + - name: Build and Push + run: | + make \ + DOCKER="docker buildx" \ + DOCKER_OPTS="--platform ${PLATFORMS} --builder ${BUILDER} ${CACHE_OPTS} ${LABEL_OPTS} --push" \ + TAG="${PRIMARY_TAG}" \ + docker/build/gateway-backup + make \ + REPO="ghcr.io/vdaas/vald" \ + DOCKER="docker buildx" \ + DOCKER_OPTS="--platform ${PLATFORMS} --builder ${BUILDER} ${CACHE_OPTS} ${LABEL_OPTS} --push" \ + TAG="${PRIMARY_TAG}" \ + docker/build/gateway-backup + env: + DOCKER_BUILDKIT: 1 + BUILDER: ${{ steps.buildx.outputs.name }} + CACHE_OPTS: "--cache-from=type=local,src=/tmp/.buildx-cache --cache-to=type=local,mode=max,dest=/tmp/.buildx-cache" + LABEL_OPTS: "--label org.opencontainers.image.url=${{ github.event.repository.html_url }} --label org.opencontainers.image.source=${{ github.event.repository.html_url }} --label org.opencontainers.image.revision=${{ github.sha }}" + - name: Initialize CodeQL + if: startsWith( github.ref, 'refs/tags/') + uses: github/codeql-action/init@v1 + - name: Run vulnerability scanner (table) + if: startsWith( github.ref, 'refs/tags/') + uses: aquasecurity/trivy-action@master + with: + image-ref: "${{ steps.image_name.outputs.IMAGE_NAME }}:${{ steps.determine_tag.outputs.TAG_NAME }}" + format: "table" + - name: Run vulnerability scanner (sarif) + if: startsWith( github.ref, 'refs/tags/') + uses: aquasecurity/trivy-action@master + with: + image-ref: "${{ steps.image_name.outputs.IMAGE_NAME }}:${{ steps.determine_tag.outputs.TAG_NAME }}" + format: "template" + template: "@/contrib/sarif.tpl" + output: "trivy-results.sarif" + - name: Upload Trivy scan results to Security tab + if: startsWith( github.ref, 'refs/tags/') + uses: github/codeql-action/upload-sarif@v1 + with: + sarif_file: "trivy-results.sarif" + slack: + name: Slack notification + needs: build + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/master' || startsWith( github.ref, 'refs/tags/') + steps: + - uses: technote-space/workflow-conclusion-action@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - uses: 8398a7/action-slack@v2 + with: + author_name: vald-backup-gateway image build + status: ${{ env.WORKFLOW_CONCLUSION }} + only_mention_fail: channel + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_NOTIFY_WEBHOOK_URL }} diff --git a/.github/workflows/dockers-gateway-lb-image.yml b/.github/workflows/dockers-gateway-lb-image.yml new file mode 100755 index 0000000000..bdaca9408a --- /dev/null +++ b/.github/workflows/dockers-gateway-lb-image.yml @@ -0,0 +1,165 @@ +name: "Build docker image: gateway-lb" +on: + push: + branches: + - master + tags: + - "*.*.*" + - "v*.*.*" + - "*.*.*-*" + - "v*.*.*-*" + paths: + - "go.mod" + - "go.sum" + - "internal/**" + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - "apis/grpc/**" + - "pkg/gateway/lb/**" + - "cmd/gateway/lb/**" + - "pkg/gateway/internal/**" + - "dockers/base/Dockerfile" + - "dockers/gateway/lb/Dockerfile" + - "versions/GO_VERSION" + pull_request: + paths: + - "go.mod" + - "go.sum" + - "internal/**" + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - "apis/grpc/**" + - "pkg/gateway/lb/**" + - "cmd/gateway/lb/**" + - "pkg/gateway/internal/**" + - "dockers/base/Dockerfile" + - "dockers/gateway/lb/Dockerfile" + - "versions/GO_VERSION" + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Setup QEMU + uses: docker/setup-qemu-action@v1 + with: + platforms: all + - name: Setup Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v1 + with: + buildkitd-flags: "--debug" + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-vald-gateway-lb-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx-vald-gateway-lb- + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_PASS }} + - name: Login to GitHub Container Registry + uses: docker/login-action@v1 + with: + registry: ghcr.io + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} + - name: Image name + id: image_name + run: | + image_name=`make docker/name/gateway-lb` + base_platforms=`make docker/platforms` + echo "IMAGE_NAME=${image_name}" >> $GITHUB_ENV + echo "::set-output name=IMAGE_NAME::${image_name}" + echo "::set-output name=BASE_PLATFORMS::${base_platforms}" + - name: Determine tag name (master) + if: github.ref == 'refs/heads/master' + run: | + echo "PRIMARY_TAG=nightly" >> $GITHUB_ENV + echo "PLATFORMS=${PLATFORMS}" >> $GITHUB_ENV + env: + PLATFORMS: ${{ steps.image_name.outputs.BASE_PLATFORMS }} + - name: Determine tag name (pull request) + if: github.event_name == 'pull_request' + run: | + pr_num=`cat $GITHUB_EVENT_PATH | jq -r ".number"` + echo "PR-${pr_num}" > versions/VALD_VERSION + echo "PRIMARY_TAG=pr-${pr_num}" >> $GITHUB_ENV + echo "PLATFORMS=${PLATFORMS}" >> $GITHUB_ENV + env: + PLATFORMS: linux/amd64 + - name: Determine tag name (tags) + if: startsWith( github.ref, 'refs/tags/') + id: determine_tag + run: | + tag_name=`echo $GITHUB_REF | sed -e 's:^refs/tags/::'` + echo "::set-output name=TAG_NAME::${tag_name}" + echo "PRIMARY_TAG=${tag_name}" >> $GITHUB_ENV + echo "PLATFORMS=${PLATFORMS}" >> $GITHUB_ENV + env: + PLATFORMS: ${{ steps.image_name.outputs.BASE_PLATFORMS }} + - name: Build and Push + run: | + make \ + DOCKER="docker buildx" \ + DOCKER_OPTS="--platform ${PLATFORMS} --builder ${BUILDER} ${CACHE_OPTS} ${LABEL_OPTS} --push" \ + TAG="${PRIMARY_TAG}" \ + docker/build/gateway-lb + make \ + REPO="ghcr.io/vdaas/vald" \ + DOCKER="docker buildx" \ + DOCKER_OPTS="--platform ${PLATFORMS} --builder ${BUILDER} ${CACHE_OPTS} ${LABEL_OPTS} --push" \ + TAG="${PRIMARY_TAG}" \ + docker/build/gateway-lb + env: + DOCKER_BUILDKIT: 1 + BUILDER: ${{ steps.buildx.outputs.name }} + CACHE_OPTS: "--cache-from=type=local,src=/tmp/.buildx-cache --cache-to=type=local,mode=max,dest=/tmp/.buildx-cache" + LABEL_OPTS: "--label org.opencontainers.image.url=${{ github.event.repository.html_url }} --label org.opencontainers.image.source=${{ github.event.repository.html_url }} --label org.opencontainers.image.revision=${{ github.sha }}" + - name: Initialize CodeQL + if: startsWith( github.ref, 'refs/tags/') + uses: github/codeql-action/init@v1 + - name: Run vulnerability scanner (table) + if: startsWith( github.ref, 'refs/tags/') + uses: aquasecurity/trivy-action@master + with: + image-ref: "${{ steps.image_name.outputs.IMAGE_NAME }}:${{ steps.determine_tag.outputs.TAG_NAME }}" + format: "table" + - name: Run vulnerability scanner (sarif) + if: startsWith( github.ref, 'refs/tags/') + uses: aquasecurity/trivy-action@master + with: + image-ref: "${{ steps.image_name.outputs.IMAGE_NAME }}:${{ steps.determine_tag.outputs.TAG_NAME }}" + format: "template" + template: "@/contrib/sarif.tpl" + output: "trivy-results.sarif" + - name: Upload Trivy scan results to Security tab + if: startsWith( github.ref, 'refs/tags/') + uses: github/codeql-action/upload-sarif@v1 + with: + sarif_file: "trivy-results.sarif" + slack: + name: Slack notification + needs: build + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/master' || startsWith( github.ref, 'refs/tags/') + steps: + - uses: technote-space/workflow-conclusion-action@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - uses: 8398a7/action-slack@v2 + with: + author_name: vald-lb-gateway image build + status: ${{ env.WORKFLOW_CONCLUSION }} + only_mention_fail: channel + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_NOTIFY_WEBHOOK_URL }} diff --git a/.github/workflows/dockers-gateway-meta-image.yml b/.github/workflows/dockers-gateway-meta-image.yml new file mode 100755 index 0000000000..ccab041e24 --- /dev/null +++ b/.github/workflows/dockers-gateway-meta-image.yml @@ -0,0 +1,165 @@ +name: "Build docker image: gateway-meta" +on: + push: + branches: + - master + tags: + - "*.*.*" + - "v*.*.*" + - "*.*.*-*" + - "v*.*.*-*" + paths: + - "go.mod" + - "go.sum" + - "internal/**" + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - "apis/grpc/**" + - "pkg/gateway/meta/**" + - "cmd/gateway/meta/**" + - "pkg/gateway/internal/**" + - "dockers/base/Dockerfile" + - "dockers/gateway/meta/Dockerfile" + - "versions/GO_VERSION" + pull_request: + paths: + - "go.mod" + - "go.sum" + - "internal/**" + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - "apis/grpc/**" + - "pkg/gateway/meta/**" + - "cmd/gateway/meta/**" + - "pkg/gateway/internal/**" + - "dockers/base/Dockerfile" + - "dockers/gateway/meta/Dockerfile" + - "versions/GO_VERSION" + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Setup QEMU + uses: docker/setup-qemu-action@v1 + with: + platforms: all + - name: Setup Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v1 + with: + buildkitd-flags: "--debug" + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-vald-gateway-meta-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx-vald-gateway-meta- + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_PASS }} + - name: Login to GitHub Container Registry + uses: docker/login-action@v1 + with: + registry: ghcr.io + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} + - name: Image name + id: image_name + run: | + image_name=`make docker/name/gateway-meta` + base_platforms=`make docker/platforms` + echo "IMAGE_NAME=${image_name}" >> $GITHUB_ENV + echo "::set-output name=IMAGE_NAME::${image_name}" + echo "::set-output name=BASE_PLATFORMS::${base_platforms}" + - name: Determine tag name (master) + if: github.ref == 'refs/heads/master' + run: | + echo "PRIMARY_TAG=nightly" >> $GITHUB_ENV + echo "PLATFORMS=${PLATFORMS}" >> $GITHUB_ENV + env: + PLATFORMS: ${{ steps.image_name.outputs.BASE_PLATFORMS }} + - name: Determine tag name (pull request) + if: github.event_name == 'pull_request' + run: | + pr_num=`cat $GITHUB_EVENT_PATH | jq -r ".number"` + echo "PR-${pr_num}" > versions/VALD_VERSION + echo "PRIMARY_TAG=pr-${pr_num}" >> $GITHUB_ENV + echo "PLATFORMS=${PLATFORMS}" >> $GITHUB_ENV + env: + PLATFORMS: linux/amd64 + - name: Determine tag name (tags) + if: startsWith( github.ref, 'refs/tags/') + id: determine_tag + run: | + tag_name=`echo $GITHUB_REF | sed -e 's:^refs/tags/::'` + echo "::set-output name=TAG_NAME::${tag_name}" + echo "PRIMARY_TAG=${tag_name}" >> $GITHUB_ENV + echo "PLATFORMS=${PLATFORMS}" >> $GITHUB_ENV + env: + PLATFORMS: ${{ steps.image_name.outputs.BASE_PLATFORMS }} + - name: Build and Push + run: | + make \ + DOCKER="docker buildx" \ + DOCKER_OPTS="--platform ${PLATFORMS} --builder ${BUILDER} ${CACHE_OPTS} ${LABEL_OPTS} --push" \ + TAG="${PRIMARY_TAG}" \ + docker/build/gateway-meta + make \ + REPO="ghcr.io/vdaas/vald" \ + DOCKER="docker buildx" \ + DOCKER_OPTS="--platform ${PLATFORMS} --builder ${BUILDER} ${CACHE_OPTS} ${LABEL_OPTS} --push" \ + TAG="${PRIMARY_TAG}" \ + docker/build/gateway-meta + env: + DOCKER_BUILDKIT: 1 + BUILDER: ${{ steps.buildx.outputs.name }} + CACHE_OPTS: "--cache-from=type=local,src=/tmp/.buildx-cache --cache-to=type=local,mode=max,dest=/tmp/.buildx-cache" + LABEL_OPTS: "--label org.opencontainers.image.url=${{ github.event.repository.html_url }} --label org.opencontainers.image.source=${{ github.event.repository.html_url }} --label org.opencontainers.image.revision=${{ github.sha }}" + - name: Initialize CodeQL + if: startsWith( github.ref, 'refs/tags/') + uses: github/codeql-action/init@v1 + - name: Run vulnerability scanner (table) + if: startsWith( github.ref, 'refs/tags/') + uses: aquasecurity/trivy-action@master + with: + image-ref: "${{ steps.image_name.outputs.IMAGE_NAME }}:${{ steps.determine_tag.outputs.TAG_NAME }}" + format: "table" + - name: Run vulnerability scanner (sarif) + if: startsWith( github.ref, 'refs/tags/') + uses: aquasecurity/trivy-action@master + with: + image-ref: "${{ steps.image_name.outputs.IMAGE_NAME }}:${{ steps.determine_tag.outputs.TAG_NAME }}" + format: "template" + template: "@/contrib/sarif.tpl" + output: "trivy-results.sarif" + - name: Upload Trivy scan results to Security tab + if: startsWith( github.ref, 'refs/tags/') + uses: github/codeql-action/upload-sarif@v1 + with: + sarif_file: "trivy-results.sarif" + slack: + name: Slack notification + needs: build + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/master' || startsWith( github.ref, 'refs/tags/') + steps: + - uses: technote-space/workflow-conclusion-action@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - uses: 8398a7/action-slack@v2 + with: + author_name: vald-meta-gateway image build + status: ${{ env.WORKFLOW_CONCLUSION }} + only_mention_fail: channel + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_NOTIFY_WEBHOOK_URL }} diff --git a/Makefile b/Makefile index 249fc011b3..9db354e35d 100644 --- a/Makefile +++ b/Makefile @@ -22,38 +22,46 @@ TAG ?= latest BASE_IMAGE = $(NAME)-base AGENT_IMAGE = $(NAME)-agent-ngt AGENT_SIDECAR_IMAGE = $(NAME)-agent-sidecar -GATEWAY_IMAGE = $(NAME)-gateway -DISCOVERER_IMAGE = $(NAME)-discoverer-k8s -META_REDIS_IMAGE = $(NAME)-meta-redis -META_CASSANDRA_IMAGE = $(NAME)-meta-cassandra -MANAGER_BACKUP_MYSQL_IMAGE = $(NAME)-manager-backup-mysql -MANAGER_BACKUP_CASSANDRA_IMAGE = $(NAME)-manager-backup-cassandra -MANAGER_COMPRESSOR_IMAGE = $(NAME)-manager-compressor -MANAGER_INDEX_IMAGE = $(NAME)-manager-index +BACKUP_GATEWAY_IMAGE = $(NAME)-backup-gateway +BASE_IMAGE = $(NAME)-base CI_CONTAINER_IMAGE = $(NAME)-ci-container DEV_CONTAINER_IMAGE = $(NAME)-dev-container +DISCOVERER_IMAGE = $(NAME)-discoverer-k8s +FILTER_GATEWAY_IMAGE = $(NAME)-filter-gateway +GATEWAY_IMAGE = $(NAME)-gateway HELM_OPERATOR_IMAGE = $(NAME)-helm-operator +LB_GATEWAY_IMAGE = $(NAME)-lb-gateway LOADTEST_IMAGE = $(NAME)-loadtest +MANAGER_BACKUP_CASSANDRA_IMAGE = $(NAME)-manager-backup-cassandra +MANAGER_BACKUP_MYSQL_IMAGE = $(NAME)-manager-backup-mysql +MANAGER_COMPRESSOR_IMAGE = $(NAME)-manager-compressor +MANAGER_INDEX_IMAGE = $(NAME)-manager-index +META_CASSANDRA_IMAGE = $(NAME)-meta-cassandra +META_GATEWAY_IMAGE = $(NAME)-meta-gateway +META_REDIS_IMAGE = $(NAME)-meta-redis VERSION := $(eval VALD_VERSION := $(shell cat versions/VALD_VERSION))$(VALD_VERSION) NGT_VERSION := $(eval NGT_VERSION := $(shell cat versions/NGT_VERSION))$(NGT_VERSION) NGT_REPO = github.com/yahoojapan/NGT +GOPROXY=direct GO_VERSION := $(eval GO_VERSION := $(shell cat versions/GO_VERSION))$(GO_VERSION) GOOS := $(eval GOOS := $(shell go env GOOS))$(GOOS) GOARCH := $(eval GOARCH := $(shell go env GOARCH))$(GOARCH) GOPATH := $(eval GOPATH := $(shell go env GOPATH))$(GOPATH) GOCACHE := $(eval GOCACHE := $(shell go env GOCACHE))$(GOCACHE) +TEMP_DIR := $(eval TEMP_DIR := $(shell mktemp -d))$(TEMP_DIR) + TENSORFLOW_C_VERSION := $(eval TENSORFLOW_C_VERSION := $(shell cat versions/TENSORFLOW_C_VERSION))$(TENSORFLOW_C_VERSION) OPERATOR_SDK_VERSION := $(eval OPERATOR_SDK_VERSION := $(shell cat versions/OPERATOR_SDK_VERSION))$(OPERATOR_SDK_VERSION) KIND_VERSION ?= v0.9.0 -HELM_VERSION ?= v3.3.4 -HELM_DOCS_VERSION ?= 1.3.0 -VALDCLI_VERSION ?= v0.0.61 +HELM_VERSION ?= v3.4.0 +HELM_DOCS_VERSION ?= 1.4.0 +VALDCLI_VERSION ?= v0.0.62 TELEPRESENCE_VERSION ?= 0.108 SWAP_DEPLOYMENT_TYPE ?= deployment @@ -63,6 +71,7 @@ SWAP_TAG ?= latest BINDIR ?= /usr/local/bin UNAME := $(eval UNAME := $(shell uname))$(UNAME) +PWD := $(eval PWD := $(shell pwd))$(PWD) ifeq ($(UNAME),Linux) CPU_INFO_FLAGS := $(eval CPU_INFO_FLAGS := $(shell cat /proc/cpuinfo | grep flags | cut -d " " -f 2- | head -1))$(CPU_INFO_FLAGS) @@ -83,9 +92,11 @@ BENCH_DATASET_MD5_DIR = $(BENCH_DATASET_BASE_DIR)/$(BENCH_DATASET_MD5_DIR_NAME) BENCH_DATASET_HDF5_DIR = $(BENCH_DATASET_BASE_DIR)/$(BENCH_DATASET_HDF5_DIR_NAME) PROTOS := $(eval PROTOS := $(shell find apis/proto -type f -regex ".*\.proto"))$(PROTOS) +PROTOS_V0 := $(eval PROTOS_V0 := $(filter-out apis/proto/v%.proto,$(PROTOS)))$(PROTOS_V0) +PROTOS_V1 := $(eval PROTOS_V1 := $(filter apis/proto/v1/%.proto,$(PROTOS)))$(PROTOS_V1) PBGOS = $(PROTOS:apis/proto/%.proto=apis/grpc/%.pb.go) SWAGGERS = $(PROTOS:apis/proto/%.proto=apis/swagger/%.swagger.json) -PBDOCS = apis/docs/docs.md +PBDOCS = apis/docs/v0/docs.md apis/docs/v1/docs.md ifeq ($(GOARCH),amd64) CFLAGS ?= -mno-avx512f -mno-avx512dq -mno-avx512cd -mno-avx512bw -mno-avx512vl @@ -141,11 +152,10 @@ NUMPANES ?= 4 BODY = "" PROTO_PATHS = \ - $(PROTODIRS:%=./apis/proto/%) \ - $(GOPATH)/src/github.com/protocolbuffers/protobuf/src \ - $(GOPATH)/src/github.com/gogo/protobuf/protobuf \ - $(GOPATH)/src/github.com/googleapis/googleapis \ - $(GOPATH)/src/github.com/envoyproxy/protoc-gen-validate + $(PWD) \ + $(GOPATH)/src \ + $(GOPATH)/src/$(GOPKG) \ + $(GOPATH)/src/github.com/googleapis/googleapis GO_SOURCES = $(eval GO_SOURCES := $(shell find \ ./cmd \ @@ -153,7 +163,7 @@ GO_SOURCES = $(eval GO_SOURCES := $(shell find \ ./internal \ ./pkg \ -not -path './cmd/cli/*' \ - -not -path './internal/core/ngt/*' \ + -not -path './internal/core/algorithm/ngt/*' \ -not -path './internal/test/comparator/*' \ -not -path './internal/test/mock/*' \ -not -path './hack/benchmark/internal/client/ngtd/*' \ @@ -174,7 +184,7 @@ GO_OPTION_SOURCES = $(eval GO_OPTION_SOURCES := $(shell find \ ./internal \ ./pkg \ -not -path './cmd/cli/*' \ - -not -path './internal/core/ngt/*' \ + -not -path './internal/core/algorithm/ngt/*' \ -not -path './internal/test/comparator/*' \ -not -path './internal/test/mock/*' \ -not -path './hack/benchmark/internal/client/ngtd/*' \ @@ -235,18 +245,22 @@ all: clean deps ## clean clean: go clean -cache -modcache -testcache -i -r + mv ./apis/grpc/v1/vald/vald.go $(TEMP_DIR)/vald.go rm -rf \ /go/pkg \ ./*.log \ ./*.svg \ ./apis/docs \ ./apis/swagger \ + ./apis/grpc \ ./bench \ ./pprof \ ./libs \ $(GOCACHE) \ ./go.sum \ ./go.mod + mkdir -p ./apis/grpc/v1/vald + mv $(TEMP_DIR)/vald.go ./apis/grpc/v1/vald/vald.go cp ./hack/go.mod.default ./go.mod .PHONY: license @@ -254,6 +268,7 @@ clean: license: go run hack/license/gen/main.go ./ + .PHONY: init ## initialize development environment init: \ @@ -275,8 +290,8 @@ tools/install: \ ## update deps, license, and run goimports update: \ clean \ - deps \ proto/all \ + deps \ format \ go/deps @@ -324,7 +339,8 @@ go/deps: ./go.sum \ ./go.mod cp ./hack/go.mod.default ./go.mod - go mod tidy + GOPRIVATE=$(GOPKG) go mod tidy + go get -u all 2>/dev/null || true .PHONY: goimports/install @@ -372,13 +388,13 @@ version/telepresence: ngt/install: /usr/local/include/NGT/Capi.h /usr/local/include/NGT/Capi.h: curl -LO https://github.com/yahoojapan/NGT/archive/v$(NGT_VERSION).tar.gz - tar zxf v$(NGT_VERSION).tar.gz -C /tmp - cd /tmp/NGT-$(NGT_VERSION) && \ - cmake -DCMAKE_C_FLAGS="$(CFLAGS)" -DCMAKE_CXX_FLAGS="$(CXXFLAGS)" . - make -j -C /tmp/NGT-$(NGT_VERSION) - make install -C /tmp/NGT-$(NGT_VERSION) + tar zxf v$(NGT_VERSION).tar.gz -C $(TEMP_DIR)/ + cd $(TEMP_DIR)/NGT-$(NGT_VERSION) && \ + cmake -DCMAKE_C_FLAGS="$(CFLAGS)" -DCMAKE_CXX_FLAGS="$(CXXFLAGS)" . + make -j -C $(TEMP_DIR)/NGT-$(NGT_VERSION) + make install -C $(TEMP_DIR)/NGT-$(NGT_VERSION) rm -rf v$(NGT_VERSION).tar.gz - rm -rf /tmp/NGT-$(NGT_VERSION) + rm -rf $(TEMP_DIR)/NGT-$(NGT_VERSION) ldconfig .PHONY: tensorflow/install @@ -403,12 +419,12 @@ lint: .PHONY: changelog/update ## update changelog changelog/update: - echo "# CHANGELOG" > /tmp/CHANGELOG.md - echo "" >> /tmp/CHANGELOG.md - $(MAKE) -s changelog/next/print >> /tmp/CHANGELOG.md - echo "" >> /tmp/CHANGELOG.md - tail -n +2 CHANGELOG.md >> /tmp/CHANGELOG.md - mv -f /tmp/CHANGELOG.md CHANGELOG.md + echo "# CHANGELOG" > $(TEMP_DIR)/CHANGELOG.md + echo "" >> $(TEMP_DIR)/CHANGELOG.md + $(MAKE) -s changelog/next/print >> $(TEMP_DIR)/CHANGELOG.md + echo "" >> $(TEMP_DIR)/CHANGELOG.md + tail -n +2 CHANGELOG.md >> $(TEMP_DIR)/CHANGELOG.md + mv -f $(TEMP_DIR)/CHANGELOG.md CHANGELOG.md .PHONY: changelog/next/print ## print next changelog entry diff --git a/Makefile.d/build.mk b/Makefile.d/build.mk index 0a76e4b959..aae29b1ca7 100644 --- a/Makefile.d/build.mk +++ b/Makefile.d/build.mk @@ -21,6 +21,9 @@ binary/build: \ cmd/agent/sidecar/sidecar \ cmd/discoverer/k8s/discoverer \ cmd/gateway/vald/vald \ + cmd/gateway/lb/lb \ + cmd/gateway/meta/meta \ + cmd/gateway/backup/backup \ cmd/meta/redis/meta \ cmd/meta/cassandra/meta \ cmd/manager/backup/mysql/backup \ @@ -135,6 +138,81 @@ cmd/gateway/vald/vald: \ -o $@ \ $(dir $@)main.go +cmd/gateway/lb/lb: \ + $(GO_SOURCES_INTERNAL) \ + $(PBGOS) \ + $(shell find ./cmd/gateway/lb -type f -name '*.go' -not -name '*_test.go' -not -name 'doc.go') \ + $(shell find ./pkg/gateway/lb -type f -name '*.go' -not -name '*_test.go' -not -name 'doc.go') + export CGO_ENABLED=1 \ + && export GO111MODULE=on \ + && go build \ + --ldflags "-s -w -linkmode 'external' \ + -extldflags '-static' \ + -X '$(GOPKG)/internal/info.Version=$(VERSION)' \ + -X '$(GOPKG)/internal/info.GitCommit=$(GIT_COMMIT)' \ + -X '$(GOPKG)/internal/info.BuildTime=$(DATETIME)' \ + -X '$(GOPKG)/internal/info.GoVersion=$(GO_VERSION)' \ + -X '$(GOPKG)/internal/info.GoOS=$(GOOS)' \ + -X '$(GOPKG)/internal/info.GoArch=$(GOARCH)' \ + -X '$(GOPKG)/internal/info.CGOEnabled=$${CGO_ENABLED}' \ + -X '$(GOPKG)/internal/info.BuildCPUInfoFlags=$(CPU_INFO_FLAGS)'" \ + -a \ + -tags netgo \ + -installsuffix netgo \ + -trimpath \ + -o $@ \ + $(dir $@)main.go + +cmd/gateway/meta/meta: \ + $(GO_SOURCES_INTERNAL) \ + $(PBGOS) \ + $(shell find ./cmd/gateway/meta -type f -name '*.go' -not -name '*_test.go' -not -name 'doc.go') \ + $(shell find ./pkg/gateway/meta -type f -name '*.go' -not -name '*_test.go' -not -name 'doc.go') + export CGO_ENABLED=1 \ + && export GO111MODULE=on \ + && go build \ + --ldflags "-s -w -linkmode 'external' \ + -extldflags '-static' \ + -X '$(GOPKG)/internal/info.Version=$(VERSION)' \ + -X '$(GOPKG)/internal/info.GitCommit=$(GIT_COMMIT)' \ + -X '$(GOPKG)/internal/info.BuildTime=$(DATETIME)' \ + -X '$(GOPKG)/internal/info.GoVersion=$(GO_VERSION)' \ + -X '$(GOPKG)/internal/info.GoOS=$(GOOS)' \ + -X '$(GOPKG)/internal/info.GoArch=$(GOARCH)' \ + -X '$(GOPKG)/internal/info.CGOEnabled=$${CGO_ENABLED}' \ + -X '$(GOPKG)/internal/info.BuildCPUInfoFlags=$(CPU_INFO_FLAGS)'" \ + -a \ + -tags netgo \ + -installsuffix netgo \ + -trimpath \ + -o $@ \ + $(dir $@)main.go + +cmd/gateway/backup/backup: \ + $(GO_SOURCES_INTERNAL) \ + $(PBGOS) \ + $(shell find ./cmd/gateway/backup -type f -name '*.go' -not -name '*_test.go' -not -name 'doc.go') \ + $(shell find ./pkg/gateway/backup -type f -name '*.go' -not -name '*_test.go' -not -name 'doc.go') + export CGO_ENABLED=1 \ + && export GO111MODULE=on \ + && go build \ + --ldflags "-s -w -linkmode 'external' \ + -extldflags '-static' \ + -X '$(GOPKG)/internal/info.Version=$(VERSION)' \ + -X '$(GOPKG)/internal/info.GitCommit=$(GIT_COMMIT)' \ + -X '$(GOPKG)/internal/info.BuildTime=$(DATETIME)' \ + -X '$(GOPKG)/internal/info.GoVersion=$(GO_VERSION)' \ + -X '$(GOPKG)/internal/info.GoOS=$(GOOS)' \ + -X '$(GOPKG)/internal/info.GoArch=$(GOARCH)' \ + -X '$(GOPKG)/internal/info.CGOEnabled=$${CGO_ENABLED}' \ + -X '$(GOPKG)/internal/info.BuildCPUInfoFlags=$(CPU_INFO_FLAGS)'" \ + -a \ + -tags netgo \ + -installsuffix netgo \ + -trimpath \ + -o $@ \ + $(dir $@)main.go + cmd/meta/redis/meta: \ $(GO_SOURCES_INTERNAL) \ $(PBGOS) \ @@ -342,6 +420,9 @@ binary/build/zip: \ artifacts/vald-agent-sidecar-$(GOOS)-$(GOARCH).zip \ artifacts/vald-discoverer-k8s-$(GOOS)-$(GOARCH).zip \ artifacts/vald-gateway-$(GOOS)-$(GOARCH).zip \ + artifacts/vald-gateway-lb-$(GOOS)-$(GOARCH).zip \ + artifacts/vald-gateway-meta-$(GOOS)-$(GOARCH).zip \ + artifacts/vald-gateway-backup-$(GOOS)-$(GOARCH).zip \ artifacts/vald-meta-redis-$(GOOS)-$(GOARCH).zip \ artifacts/vald-meta-cassandra-$(GOOS)-$(GOARCH).zip \ artifacts/vald-manager-backup-mysql-$(GOOS)-$(GOARCH).zip \ @@ -365,6 +446,18 @@ artifacts/vald-gateway-$(GOOS)-$(GOARCH).zip: cmd/gateway/vald/vald $(call mkdir, $(dir $@)) zip --junk-paths $@ $< +artifacts/vald-gateway-lb-$(GOOS)-$(GOARCH).zip: cmd/gateway/lb/lb + $(call mkdir, $(dir $@)) + zip --junk-paths $@ $< + +artifacts/vald-gateway-meta-$(GOOS)-$(GOARCH).zip: cmd/gateway/meta/meta + $(call mkdir, $(dir $@)) + zip --junk-paths $@ $< + +artifacts/vald-gateway-backup-$(GOOS)-$(GOARCH).zip: cmd/gateway/backup/backup + $(call mkdir, $(dir $@)) + zip --junk-paths $@ $< + artifacts/vald-meta-redis-$(GOOS)-$(GOARCH).zip: cmd/meta/redis/meta $(call mkdir, $(dir $@)) zip --junk-paths $@ $< diff --git a/Makefile.d/docker.mk b/Makefile.d/docker.mk index b9a7b702c0..d57acd85dc 100644 --- a/Makefile.d/docker.mk +++ b/Makefile.d/docker.mk @@ -21,6 +21,9 @@ docker/build: \ docker/build/agent-sidecar \ docker/build/discoverer-k8s \ docker/build/gateway-vald \ + docker/build/gateway-lb \ + docker/build/gateway-meta \ + docker/build/gateway-backup \ docker/build/meta-redis \ docker/build/meta-cassandra \ docker/build/backup-manager-mysql \ @@ -110,6 +113,54 @@ docker/build/gateway-vald: --build-arg DISTROLESS_IMAGE_TAG=$(DISTROLESS_IMAGE_TAG) \ --build-arg UPX_OPTIONS=$(UPX_OPTIONS) +.PHONY: docker/name/gateway-lb +docker/name/gateway-lb: + @echo "$(REPO)/$(LB_GATEWAY_IMAGE)" + +.PHONY: docker/build/gateway-lb +## build gateway-lb image +docker/build/gateway-lb: + $(DOCKER) build \ + $(DOCKER_OPTS) \ + -f dockers/gateway/lb/Dockerfile \ + -t $(REPO)/$(LB_GATEWAY_IMAGE):$(TAG) . \ + --build-arg GO_VERSION=$(GO_VERSION) \ + --build-arg DISTROLESS_IMAGE=$(DISTROLESS_IMAGE) \ + --build-arg DISTROLESS_IMAGE_TAG=$(DISTROLESS_IMAGE_TAG) \ + --build-arg UPX_OPTIONS=$(UPX_OPTIONS) + +.PHONY: docker/name/gateway-meta +docker/name/gateway-meta: + @echo "$(REPO)/$(META_GATEWAY_IMAGE)" + +.PHONY: docker/build/gateway-meta +## build gateway-meta image +docker/build/gateway-meta: + $(DOCKER) build \ + $(DOCKER_OPTS) \ + -f dockers/gateway/meta/Dockerfile \ + -t $(REPO)/$(META_GATEWAY_IMAGE):$(TAG) . \ + --build-arg GO_VERSION=$(GO_VERSION) \ + --build-arg DISTROLESS_IMAGE=$(DISTROLESS_IMAGE) \ + --build-arg DISTROLESS_IMAGE_TAG=$(DISTROLESS_IMAGE_TAG) \ + --build-arg UPX_OPTIONS=$(UPX_OPTIONS) + +.PHONY: docker/name/gateway-backup +docker/name/gateway-backup: + @echo "$(REPO)/$(BACKUP_GATEWAY_IMAGE)" + +.PHONY: docker/build/gateway-backup +## build gateway-backup image +docker/build/gateway-backup: + $(DOCKER) build \ + $(DOCKER_OPTS) \ + -f dockers/gateway/backup/Dockerfile \ + -t $(REPO)/$(BACKUP_GATEWAY_IMAGE):$(TAG) . \ + --build-arg GO_VERSION=$(GO_VERSION) \ + --build-arg DISTROLESS_IMAGE=$(DISTROLESS_IMAGE) \ + --build-arg DISTROLESS_IMAGE_TAG=$(DISTROLESS_IMAGE_TAG) \ + --build-arg UPX_OPTIONS=$(UPX_OPTIONS) + .PHONY: docker/name/meta-redis docker/name/meta-redis: @echo "$(REPO)/$(META_REDIS_IMAGE)" diff --git a/Makefile.d/helm.mk b/Makefile.d/helm.mk index 406d128381..7894074f56 100644 --- a/Makefile.d/helm.mk +++ b/Makefile.d/helm.mk @@ -29,14 +29,14 @@ helm-docs/install: $(BINDIR)/helm-docs ifeq ($(UNAME),Darwin) $(BINDIR)/helm-docs: mkdir -p $(BINDIR) - cd $$(mktemp -d) \ + cd $(TEMP_DIR) \ && curl -LO https://github.com/norwoodj/helm-docs/releases/download/v$(HELM_DOCS_VERSION)/helm-docs_$(HELM_DOCS_VERSION)_Darwin_x86_64.tar.gz \ && tar xzvf helm-docs_$(HELM_DOCS_VERSION)_Darwin_x86_64.tar.gz \ && mv helm-docs $(BINDIR)/helm-docs else $(BINDIR)/helm-docs: mkdir -p $(BINDIR) - cd $$(mktemp -d) \ + cd $(TEMP_DIR) \ && curl -LO https://github.com/norwoodj/helm-docs/releases/download/v$(HELM_DOCS_VERSION)/helm-docs_$(HELM_DOCS_VERSION)_Linux_x86_64.tar.gz \ && tar xzvf helm-docs_$(HELM_DOCS_VERSION)_Linux_x86_64.tar.gz \ && mv helm-docs $(BINDIR)/helm-docs diff --git a/Makefile.d/kind.mk b/Makefile.d/kind.mk index 1d48796fca..2ff905fb71 100644 --- a/Makefile.d/kind.mk +++ b/Makefile.d/kind.mk @@ -50,12 +50,14 @@ kind/stop: ## start kind (kubernetes in docker) multi node cluster kind/cluster/start: kind create cluster --name $(NAME)-cluster --config $(ROOTDIR)/k8s/debug/kind/config.yaml - @make kind/login + kubectl apply -f https://projectcontour.io/quickstart/contour.yaml + kubectl patch daemonsets -n projectcontour envoy -p '{"spec":{"template":{"spec":{"nodeSelector":{"ingress-ready":"true"},"tolerations":[{"key":"node-role.kubernetes.io/master","operator":"Equal","effect":"NoSchedule"}]}}}}' .PHONY: kind/cluster/stop ## stop kind (kubernetes in docker) multi node cluster kind/cluster/stop: + kubectl delete -f https://projectcontour.io/quickstart/contour.yaml kind delete cluster --name $(NAME)-cluster .PHONY: kind/cluster/login diff --git a/Makefile.d/proto.mk b/Makefile.d/proto.mk index 28b0e627cf..7e25d1394b 100644 --- a/Makefile.d/proto.mk +++ b/Makefile.d/proto.mk @@ -16,6 +16,7 @@ .PHONY: proto/all ## build protobufs proto/all: \ + proto/deps \ pbgo \ pbdoc \ swagger @@ -161,7 +162,6 @@ $(SWAGGERS): \ $(call protoc-gen, $(patsubst apis/swagger/%.swagger.json,apis/proto/%.proto,$@), --swagger_out=json_names_for_fields=true:$(dir $@)) $(PBDOCS): \ - $(PROTOS) \ $(GOPATH)/bin/protoc-gen-doc \ $(GOPATH)/bin/protoc-gen-go \ $(GOPATH)/bin/protoc-gen-gogo \ @@ -177,6 +177,13 @@ $(PBDOCS): \ $(GOPATH)/src/github.com/protocolbuffers/protobuf \ $(GOPATH)/src/github.com/googleapis/googleapis \ $(GOPATH)/src/github.com/envoyproxy/protoc-gen-validate - @$(call green, "generating documents...") + +apis/docs/v0/docs.md: $(PROTOS_V0) + @$(call green, "generating documents for API v0...") + $(call mkdir, $(dir $@)) + $(call protoc-gen, $(PROTOS_V0), --plugin=protoc-gen-doc=$(GOPATH)/bin/protoc-gen-doc --doc_opt=markdown$(COMMA)docs.md --doc_out=$(dir $@)) + +apis/docs/v1/docs.md: $(PROTOS_V1) + @$(call green, "generating documents for API v1...") $(call mkdir, $(dir $@)) - $(call protoc-gen, $(PROTOS), --plugin=protoc-gen-doc=$(GOPATH)/bin/protoc-gen-doc --doc_opt=markdown$(COMMA)docs.md --doc_out=$(dir $@)) + $(call protoc-gen, $(PROTOS_V1), --plugin=protoc-gen-doc=$(GOPATH)/bin/protoc-gen-doc --doc_opt=markdown$(COMMA)docs.md --doc_out=$(dir $@)) diff --git a/Makefile.d/test.mk b/Makefile.d/test.mk index 7b4cca1c97..24f7b245b3 100644 --- a/Makefile.d/test.mk +++ b/Makefile.d/test.mk @@ -105,6 +105,7 @@ gotests/patch: \ @$(call green, "apply patches to go test files...") find $(ROOTDIR)/internal/k8s/* -name '*_test.go' | xargs sed -i -E "s%k8s.io/apimachinery/pkg/api/errors%github.com/vdaas/vald/internal/errors%g" find $(ROOTDIR)/* -name '*_test.go' | xargs sed -i -E "s%cockroachdb/errors%vdaas/vald/internal/errors%g" + find $(ROOTDIR)/* -name '*_test.go' | xargs sed -i -E "s%golang.org/x/sync/errgroup%github.com/vdaas/vald/internal/errgroup%g" find $(ROOTDIR)/* -name '*_test.go' | xargs sed -i -E "s%pkg/errors%vdaas/vald/internal/errors%g" find $(ROOTDIR)/* -name '*_test.go' | xargs sed -i -E "s%go-errors/errors%vdaas/vald/internal/errors%g" find $(ROOTDIR)/internal/errors -name '*_test.go' | xargs sed -i -E "s%\"github.com/vdaas/vald/internal/errors\"%%g" diff --git a/apis/docs/docs.md b/apis/docs/v0/docs.md similarity index 61% rename from apis/docs/docs.md rename to apis/docs/v0/docs.md index 673141e815..ba06c654a6 100644 --- a/apis/docs/docs.md +++ b/apis/docs/v0/docs.md @@ -3,29 +3,16 @@ ## Table of Contents -- [ingress/ingress_filter.proto](#ingress/ingress_filter.proto) - - [IngressFilter](#ingress_filter.IngressFilter) - -- [egress/egress_filter.proto](#egress/egress_filter.proto) +- [apis/proto/filter/egress/egress_filter.proto](#apis/proto/filter/egress/egress_filter.proto) - [EgressFilter](#egress_filter.EgressFilter) -- [meta.proto](#meta.proto) - - [Meta](#meta_manager.Meta) - -- [vald/vald.proto](#vald/vald.proto) +- [apis/proto/gateway/vald/vald.proto](#apis/proto/gateway/vald/vald.proto) - [Vald](#vald.Vald) -- [errors.proto](#errors.proto) - - [Errors](#errors.Errors) - - [Errors.RPC](#errors.Errors.RPC) - -- [sidecar/sidecar.proto](#sidecar/sidecar.proto) - - [Sidecar](#sidecar.Sidecar) +- [apis/proto/gateway/filter/filter.proto](#apis/proto/gateway/filter/filter.proto) + - [Filter](#filter.Filter) -- [core/agent.proto](#core/agent.proto) - - [Agent](#core.Agent) - -- [payload.proto](#payload.proto) +- [apis/proto/payload/payload.proto](#apis/proto/payload/payload.proto) - [Backup](#payload.Backup) - [Backup.Compressed](#payload.Backup.Compressed) - [Backup.Compressed.MetaVector](#payload.Backup.Compressed.MetaVector) @@ -71,9 +58,12 @@ - [Meta.Val](#payload.Meta.Val) - [Meta.Vals](#payload.Meta.Vals) - [Object](#payload.Object) + - [Object.Blob](#payload.Object.Blob) - [Object.Distance](#payload.Object.Distance) - [Object.ID](#payload.Object.ID) - [Object.IDs](#payload.Object.IDs) + - [Object.Location](#payload.Object.Location) + - [Object.Locations](#payload.Object.Locations) - [Object.Vector](#payload.Object.Vector) - [Object.Vectors](#payload.Object.Vectors) - [Replication](#payload.Replication) @@ -83,61 +73,21 @@ - [Search](#payload.Search) - [Search.Config](#payload.Search.Config) - [Search.IDRequest](#payload.Search.IDRequest) + - [Search.MultiIDRequest](#payload.Search.MultiIDRequest) + - [Search.MultiRequest](#payload.Search.MultiRequest) + - [Search.ObjectRequest](#payload.Search.ObjectRequest) - [Search.Request](#payload.Search.Request) - [Search.Response](#payload.Search.Response) - -- [discoverer.proto](#discoverer.proto) - - [Discoverer](#discoverer.Discoverer) - -- [backup/backup_manager.proto](#backup/backup_manager.proto) - - [Backup](#backup_manager.Backup) - -- [index/index_manager.proto](#index/index_manager.proto) - - [Index](#index_manager.Index) - -- [compressor/compressor.proto](#compressor/compressor.proto) - - [Backup](#compressor.Backup) - -- [traffic/traffic_manager.proto](#traffic/traffic_manager.proto) -- [replication/agent/replication_manager.proto](#replication/agent/replication_manager.proto) - - [Replication](#replication_manager.Replication) - -- [replication/controller/replication_manager.proto](#replication/controller/replication_manager.proto) - - [ReplicationController](#replication_manager.ReplicationController) + - [Search.Responses](#payload.Search.Responses) - [Scalar Value Types](#scalar-value-types) - -

Top

- -## ingress/ingress_filter.proto - - - - - - - - - - - -### IngressFilter - - -| Method Name | Request Type | Response Type | Description | -| ----------- | ------------ | ------------- | ------------| - - - - - - +

Top

-## egress/egress_filter.proto +## apis/proto/filter/egress/egress_filter.proto @@ -161,45 +111,10 @@ - +

Top

-## meta.proto - - - - - - - - - - - -### Meta - - -| Method Name | Request Type | Response Type | Description | -| ----------- | ------------ | ------------- | ------------| -| GetMeta | [.payload.Meta.Key](#payload.Meta.Key) | [.payload.Meta.Val](#payload.Meta.Val) | | -| GetMetas | [.payload.Meta.Keys](#payload.Meta.Keys) | [.payload.Meta.Vals](#payload.Meta.Vals) | | -| GetMetaInverse | [.payload.Meta.Val](#payload.Meta.Val) | [.payload.Meta.Key](#payload.Meta.Key) | | -| GetMetasInverse | [.payload.Meta.Vals](#payload.Meta.Vals) | [.payload.Meta.Keys](#payload.Meta.Keys) | | -| SetMeta | [.payload.Meta.KeyVal](#payload.Meta.KeyVal) | [.payload.Empty](#payload.Empty) | | -| SetMetas | [.payload.Meta.KeyVals](#payload.Meta.KeyVals) | [.payload.Empty](#payload.Empty) | | -| DeleteMeta | [.payload.Meta.Key](#payload.Meta.Key) | [.payload.Meta.Val](#payload.Meta.Val) | | -| DeleteMetas | [.payload.Meta.Keys](#payload.Meta.Keys) | [.payload.Meta.Vals](#payload.Meta.Vals) | | -| DeleteMetaInverse | [.payload.Meta.Val](#payload.Meta.Val) | [.payload.Meta.Key](#payload.Meta.Key) | | -| DeleteMetasInverse | [.payload.Meta.Vals](#payload.Meta.Vals) | [.payload.Meta.Keys](#payload.Meta.Keys) | | - - - - - - -

Top

- -## vald/vald.proto +## apis/proto/gateway/vald/vald.proto @@ -221,101 +136,29 @@ | SearchByID | [.payload.Search.IDRequest](#payload.Search.IDRequest) | [.payload.Search.Response](#payload.Search.Response) | | | StreamSearch | [.payload.Search.Request](#payload.Search.Request) stream | [.payload.Search.Response](#payload.Search.Response) stream | | | StreamSearchByID | [.payload.Search.IDRequest](#payload.Search.IDRequest) stream | [.payload.Search.Response](#payload.Search.Response) stream | | -| Insert | [.payload.Object.Vector](#payload.Object.Vector) | [.payload.Empty](#payload.Empty) | | -| StreamInsert | [.payload.Object.Vector](#payload.Object.Vector) stream | [.payload.Empty](#payload.Empty) stream | | -| MultiInsert | [.payload.Object.Vectors](#payload.Object.Vectors) | [.payload.Empty](#payload.Empty) | | -| Update | [.payload.Object.Vector](#payload.Object.Vector) | [.payload.Empty](#payload.Empty) | | -| StreamUpdate | [.payload.Object.Vector](#payload.Object.Vector) stream | [.payload.Empty](#payload.Empty) stream | | -| MultiUpdate | [.payload.Object.Vectors](#payload.Object.Vectors) | [.payload.Empty](#payload.Empty) | | -| Upsert | [.payload.Object.Vector](#payload.Object.Vector) | [.payload.Empty](#payload.Empty) | | -| StreamUpsert | [.payload.Object.Vector](#payload.Object.Vector) stream | [.payload.Empty](#payload.Empty) stream | | -| MultiUpsert | [.payload.Object.Vectors](#payload.Object.Vectors) | [.payload.Empty](#payload.Empty) | | -| Remove | [.payload.Object.ID](#payload.Object.ID) | [.payload.Empty](#payload.Empty) | | -| StreamRemove | [.payload.Object.ID](#payload.Object.ID) stream | [.payload.Empty](#payload.Empty) stream | | -| MultiRemove | [.payload.Object.IDs](#payload.Object.IDs) | [.payload.Empty](#payload.Empty) | | -| GetObject | [.payload.Object.ID](#payload.Object.ID) | [.payload.Backup.MetaVector](#payload.Backup.MetaVector) | | -| StreamGetObject | [.payload.Object.ID](#payload.Object.ID) stream | [.payload.Backup.MetaVector](#payload.Backup.MetaVector) stream | | - - - - - - -

Top

- -## errors.proto - - - - - -### Errors - - - - - - - - - -### Errors.RPC - - - -| Field | Type | Label | Description | -| ----- | ---- | ----- | ----------- | -| type | [string](#string) | | | -| msg | [string](#string) | | | -| details | [string](#string) | repeated | | -| instance | [string](#string) | | | -| status | [int64](#int64) | | | -| error | [string](#string) | | | -| roots | [Errors.RPC](#errors.Errors.RPC) | repeated | | - - - - - - - - - - - - - - - - -

Top

- -## sidecar/sidecar.proto - - - - - - - - - - - -### Sidecar - - -| Method Name | Request Type | Response Type | Description | -| ----------- | ------------ | ------------- | ------------| +| Insert | [.payload.Object.Vector](#payload.Object.Vector) | [.payload.Object.Location](#payload.Object.Location) | | +| StreamInsert | [.payload.Object.Vector](#payload.Object.Vector) stream | [.payload.Object.Location](#payload.Object.Location) stream | | +| MultiInsert | [.payload.Object.Vectors](#payload.Object.Vectors) | [.payload.Object.Locations](#payload.Object.Locations) | | +| Update | [.payload.Object.Vector](#payload.Object.Vector) | [.payload.Object.Location](#payload.Object.Location) | | +| StreamUpdate | [.payload.Object.Vector](#payload.Object.Vector) stream | [.payload.Object.Location](#payload.Object.Location) stream | | +| MultiUpdate | [.payload.Object.Vectors](#payload.Object.Vectors) | [.payload.Object.Locations](#payload.Object.Locations) | | +| Upsert | [.payload.Object.Vector](#payload.Object.Vector) | [.payload.Object.Location](#payload.Object.Location) | | +| StreamUpsert | [.payload.Object.Vector](#payload.Object.Vector) stream | [.payload.Object.Location](#payload.Object.Location) stream | | +| MultiUpsert | [.payload.Object.Vectors](#payload.Object.Vectors) | [.payload.Object.Locations](#payload.Object.Locations) | | +| Remove | [.payload.Object.ID](#payload.Object.ID) | [.payload.Object.Location](#payload.Object.Location) | | +| StreamRemove | [.payload.Object.ID](#payload.Object.ID) stream | [.payload.Object.Location](#payload.Object.Location) stream | | +| MultiRemove | [.payload.Object.IDs](#payload.Object.IDs) | [.payload.Object.Locations](#payload.Object.Locations) | | +| GetObject | [.payload.Object.ID](#payload.Object.ID) | [.payload.Object.Vector](#payload.Object.Vector) | | +| StreamGetObject | [.payload.Object.ID](#payload.Object.ID) stream | [.payload.Object.Vector](#payload.Object.Vector) stream | | - +

Top

-## core/agent.proto +## apis/proto/gateway/filter/filter.proto @@ -325,42 +168,33 @@ - + -### Agent +### Filter | Method Name | Request Type | Response Type | Description | | ----------- | ------------ | ------------- | ------------| -| Exists | [.payload.Object.ID](#payload.Object.ID) | [.payload.Object.ID](#payload.Object.ID) | | -| Search | [.payload.Search.Request](#payload.Search.Request) | [.payload.Search.Response](#payload.Search.Response) | | -| SearchByID | [.payload.Search.IDRequest](#payload.Search.IDRequest) | [.payload.Search.Response](#payload.Search.Response) | | -| StreamSearch | [.payload.Search.Request](#payload.Search.Request) stream | [.payload.Search.Response](#payload.Search.Response) stream | | -| StreamSearchByID | [.payload.Search.IDRequest](#payload.Search.IDRequest) stream | [.payload.Search.Response](#payload.Search.Response) stream | | -| Insert | [.payload.Object.Vector](#payload.Object.Vector) | [.payload.Empty](#payload.Empty) | | -| StreamInsert | [.payload.Object.Vector](#payload.Object.Vector) stream | [.payload.Empty](#payload.Empty) stream | | -| MultiInsert | [.payload.Object.Vectors](#payload.Object.Vectors) | [.payload.Empty](#payload.Empty) | | -| Update | [.payload.Object.Vector](#payload.Object.Vector) | [.payload.Empty](#payload.Empty) | | -| StreamUpdate | [.payload.Object.Vector](#payload.Object.Vector) stream | [.payload.Empty](#payload.Empty) stream | | -| MultiUpdate | [.payload.Object.Vectors](#payload.Object.Vectors) | [.payload.Empty](#payload.Empty) | | -| Remove | [.payload.Object.ID](#payload.Object.ID) | [.payload.Empty](#payload.Empty) | | -| StreamRemove | [.payload.Object.ID](#payload.Object.ID) stream | [.payload.Empty](#payload.Empty) stream | | -| MultiRemove | [.payload.Object.IDs](#payload.Object.IDs) | [.payload.Empty](#payload.Empty) | | -| GetObject | [.payload.Object.ID](#payload.Object.ID) | [.payload.Object.Vector](#payload.Object.Vector) | | -| StreamGetObject | [.payload.Object.ID](#payload.Object.ID) stream | [.payload.Object.Vector](#payload.Object.Vector) stream | | -| CreateIndex | [.payload.Control.CreateIndexRequest](#payload.Control.CreateIndexRequest) | [.payload.Empty](#payload.Empty) | | -| SaveIndex | [.payload.Empty](#payload.Empty) | [.payload.Empty](#payload.Empty) | | -| CreateAndSaveIndex | [.payload.Control.CreateIndexRequest](#payload.Control.CreateIndexRequest) | [.payload.Empty](#payload.Empty) | | -| IndexInfo | [.payload.Empty](#payload.Empty) | [.payload.Info.Index.Count](#payload.Info.Index.Count) | | +| SearchObject | [.payload.Search.ObjectRequest](#payload.Search.ObjectRequest) | [.payload.Search.Response](#payload.Search.Response) | | +| StreamSearchObject | [.payload.Search.ObjectRequest](#payload.Search.ObjectRequest) stream | [.payload.Search.Response](#payload.Search.Response) stream | | +| InsertObject | [.payload.Object.Blob](#payload.Object.Blob) | [.payload.Object.Location](#payload.Object.Location) | | +| StreamInsertObject | [.payload.Object.Blob](#payload.Object.Blob) stream | [.payload.Object.Location](#payload.Object.Location) stream | | +| MultiInsertObject | [.payload.Object.Blob](#payload.Object.Blob) | [.payload.Object.Locations](#payload.Object.Locations) | | +| UpdateObject | [.payload.Object.Blob](#payload.Object.Blob) | [.payload.Object.Location](#payload.Object.Location) | | +| StreamUpdateObject | [.payload.Object.Blob](#payload.Object.Blob) stream | [.payload.Object.Location](#payload.Object.Location) stream | | +| MultiUpdateObject | [.payload.Object.Blob](#payload.Object.Blob) | [.payload.Object.Locations](#payload.Object.Locations) | | +| UpsertObject | [.payload.Object.Blob](#payload.Object.Blob) | [.payload.Object.Location](#payload.Object.Location) | | +| StreamUpsertObject | [.payload.Object.Blob](#payload.Object.Blob) stream | [.payload.Object.Location](#payload.Object.Location) stream | | +| MultiUpsertObject | [.payload.Object.Blob](#payload.Object.Blob) | [.payload.Object.Locations](#payload.Object.Locations) | | - +

Top

-## payload.proto +## apis/proto/payload/payload.proto @@ -393,7 +227,6 @@ | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | | uuid | [string](#string) | | | -| meta | [string](#string) | | | | vector | [bytes](#bytes) | | | | ips | [string](#string) | repeated | | @@ -552,7 +385,6 @@ | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | | uuid | [string](#string) | | | -| meta | [string](#string) | | | | vector | [float](#float) | repeated | | | ips | [string](#string) | repeated | | @@ -986,6 +818,22 @@ + + +### Object.Blob + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| id | [string](#string) | | | +| object | [bytes](#bytes) | | | + + + + + + ### Object.Distance @@ -1032,6 +880,38 @@ + + +### Object.Location + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| name | [string](#string) | | | +| uuid | [string](#string) | | | +| ips | [string](#string) | repeated | | + + + + + + + + +### Object.Locations + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| locations | [Object.Location](#payload.Object.Location) | repeated | | + + + + + + ### Object.Vector @@ -1139,6 +1019,7 @@ | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | +| request_id | [string](#string) | | | | num | [uint32](#uint32) | | | | radius | [float](#float) | | | | epsilon | [float](#float) | | | @@ -1165,213 +1046,96 @@ - + -### Search.Request +### Search.MultiIDRequest | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| vector | [float](#float) | repeated | | -| config | [Search.Config](#payload.Search.Config) | | | +| requests | [Search.IDRequest](#payload.Search.IDRequest) | repeated | | - + -### Search.Response +### Search.MultiRequest | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| results | [Object.Distance](#payload.Object.Distance) | repeated | | - - - - - - - - - - - - - - - - -

Top

- -## discoverer.proto - - - - - - - - - - - -### Discoverer - - -| Method Name | Request Type | Response Type | Description | -| ----------- | ------------ | ------------- | ------------| -| Pods | [.payload.Discoverer.Request](#payload.Discoverer.Request) | [.payload.Info.Pods](#payload.Info.Pods) | | -| Nodes | [.payload.Discoverer.Request](#payload.Discoverer.Request) | [.payload.Info.Nodes](#payload.Info.Nodes) | | - - - - - - -

Top

- -## backup/backup_manager.proto - - - - - - - - - - - -### Backup - - -| Method Name | Request Type | Response Type | Description | -| ----------- | ------------ | ------------- | ------------| -| GetVector | [.payload.Backup.GetVector.Request](#payload.Backup.GetVector.Request) | [.payload.Backup.Compressed.MetaVector](#payload.Backup.Compressed.MetaVector) | | -| Locations | [.payload.Backup.Locations.Request](#payload.Backup.Locations.Request) | [.payload.Info.IPs](#payload.Info.IPs) | | -| Register | [.payload.Backup.Compressed.MetaVector](#payload.Backup.Compressed.MetaVector) | [.payload.Empty](#payload.Empty) | | -| RegisterMulti | [.payload.Backup.Compressed.MetaVectors](#payload.Backup.Compressed.MetaVectors) | [.payload.Empty](#payload.Empty) | | -| Remove | [.payload.Backup.Remove.Request](#payload.Backup.Remove.Request) | [.payload.Empty](#payload.Empty) | | -| RemoveMulti | [.payload.Backup.Remove.RequestMulti](#payload.Backup.Remove.RequestMulti) | [.payload.Empty](#payload.Empty) | | -| RegisterIPs | [.payload.Backup.IP.Register.Request](#payload.Backup.IP.Register.Request) | [.payload.Empty](#payload.Empty) | | -| RemoveIPs | [.payload.Backup.IP.Remove.Request](#payload.Backup.IP.Remove.Request) | [.payload.Empty](#payload.Empty) | | - - - - - - -

Top

- -## index/index_manager.proto - - - - - - - +| requests | [Search.Request](#payload.Search.Request) | repeated | | - -### Index -| Method Name | Request Type | Response Type | Description | -| ----------- | ------------ | ------------- | ------------| -| IndexInfo | [.payload.Empty](#payload.Empty) | [.payload.Info.Index.Count](#payload.Info.Index.Count) | | - - - - - - -

Top

-## compressor/compressor.proto + +### Search.ObjectRequest - - - +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| object | [bytes](#bytes) | | | +| config | [Search.Config](#payload.Search.Config) | | | - -### Backup -| Method Name | Request Type | Response Type | Description | -| ----------- | ------------ | ------------- | ------------| -| GetVector | [.payload.Backup.GetVector.Request](#payload.Backup.GetVector.Request) | [.payload.Backup.MetaVector](#payload.Backup.MetaVector) | | -| Locations | [.payload.Backup.Locations.Request](#payload.Backup.Locations.Request) | [.payload.Info.IPs](#payload.Info.IPs) | | -| Register | [.payload.Backup.MetaVector](#payload.Backup.MetaVector) | [.payload.Empty](#payload.Empty) | | -| RegisterMulti | [.payload.Backup.MetaVectors](#payload.Backup.MetaVectors) | [.payload.Empty](#payload.Empty) | | -| Remove | [.payload.Backup.Remove.Request](#payload.Backup.Remove.Request) | [.payload.Empty](#payload.Empty) | | -| RemoveMulti | [.payload.Backup.Remove.RequestMulti](#payload.Backup.Remove.RequestMulti) | [.payload.Empty](#payload.Empty) | | -| RegisterIPs | [.payload.Backup.IP.Register.Request](#payload.Backup.IP.Register.Request) | [.payload.Empty](#payload.Empty) | | -| RemoveIPs | [.payload.Backup.IP.Remove.Request](#payload.Backup.IP.Remove.Request) | [.payload.Empty](#payload.Empty) | | - + +### Search.Request - -

Top

-## traffic/traffic_manager.proto +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| vector | [float](#float) | repeated | | +| config | [Search.Config](#payload.Search.Config) | | | - - - - + +### Search.Response - -

Top

-## replication/agent/replication_manager.proto +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| request_id | [string](#string) | | | +| results | [Object.Distance](#payload.Object.Distance) | repeated | | - - - - -### Replication + +### Search.Responses -| Method Name | Request Type | Response Type | Description | -| ----------- | ------------ | ------------- | ------------| -| Recover | [.payload.Replication.Recovery](#payload.Replication.Recovery) | [.payload.Empty](#payload.Empty) | | -| Rebalance | [.payload.Replication.Rebalance](#payload.Replication.Rebalance) | [.payload.Empty](#payload.Empty) | | -| AgentInfo | [.payload.Empty](#payload.Empty) | [.payload.Replication.Agents](#payload.Replication.Agents) | | - +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| responses | [Search.Response](#payload.Search.Response) | repeated | | - -

Top

-## replication/controller/replication_manager.proto @@ -1380,16 +1144,6 @@ - - - -### ReplicationController - - -| Method Name | Request Type | Response Type | Description | -| ----------- | ------------ | ------------- | ------------| -| ReplicationInfo | [.payload.Empty](#payload.Empty) | [.payload.Replication.Agents](#payload.Replication.Agents) | | - diff --git a/apis/docs/v1/docs.md b/apis/docs/v1/docs.md new file mode 100644 index 0000000000..d97ade97dd --- /dev/null +++ b/apis/docs/v1/docs.md @@ -0,0 +1,2019 @@ +# Protocol Documentation + + +## Table of Contents + +- [apis/proto/v1/agent/core/agent.proto](#apis/proto/v1/agent/core/agent.proto) + - [Agent](#core.v1.Agent) + +- [apis/proto/v1/agent/sidecar/sidecar.proto](#apis/proto/v1/agent/sidecar/sidecar.proto) + - [Sidecar](#sidecar.v1.Sidecar) + +- [apis/proto/v1/discoverer/discoverer.proto](#apis/proto/v1/discoverer/discoverer.proto) + - [Discoverer](#discoverer.v1.Discoverer) + +- [apis/proto/v1/errors/errors.proto](#apis/proto/v1/errors/errors.proto) + - [Errors](#errors.v1.Errors) + - [Errors.RPC](#errors.v1.Errors.RPC) + +- [apis/proto/v1/filter/egress/egress_filter.proto](#apis/proto/v1/filter/egress/egress_filter.proto) + - [EgressFilter](#filter.egress.v1.EgressFilter) + +- [apis/proto/v1/filter/ingress/ingress_filter.proto](#apis/proto/v1/filter/ingress/ingress_filter.proto) + - [IngressFilter](#filter.ingress.v1.IngressFilter) + +- [apis/proto/v1/gateway/vald/vald.proto](#apis/proto/v1/gateway/vald/vald.proto) + - [Vald](#vald.v1.Vald) + +- [apis/proto/v1/manager/backup/backup_manager.proto](#apis/proto/v1/manager/backup/backup_manager.proto) + - [Backup](#manager.backup.v1.Backup) + +- [apis/proto/v1/manager/compressor/compressor.proto](#apis/proto/v1/manager/compressor/compressor.proto) + - [Backup](#manager.compressor.v1.Backup) + +- [apis/proto/v1/manager/index/index_manager.proto](#apis/proto/v1/manager/index/index_manager.proto) + - [Index](#manager.index.v1.Index) + +- [apis/proto/v1/manager/replication/agent/replication_manager.proto](#apis/proto/v1/manager/replication/agent/replication_manager.proto) + - [Replication](#manager.replication.agent.v1.Replication) + +- [apis/proto/v1/manager/replication/controller/replication_manager.proto](#apis/proto/v1/manager/replication/controller/replication_manager.proto) + - [ReplicationController](#manager.replication.controller.v1.ReplicationController) + +- [apis/proto/v1/meta/meta.proto](#apis/proto/v1/meta/meta.proto) + - [Meta](#meta.v1.Meta) + +- [apis/proto/v1/payload/payload.proto](#apis/proto/v1/payload/payload.proto) + - [Backup](#payload.v1.Backup) + - [Backup.Compressed](#payload.v1.Backup.Compressed) + - [Backup.Compressed.Vector](#payload.v1.Backup.Compressed.Vector) + - [Backup.Compressed.Vectors](#payload.v1.Backup.Compressed.Vectors) + - [Backup.GetVector](#payload.v1.Backup.GetVector) + - [Backup.GetVector.Owner](#payload.v1.Backup.GetVector.Owner) + - [Backup.GetVector.Request](#payload.v1.Backup.GetVector.Request) + - [Backup.IP](#payload.v1.Backup.IP) + - [Backup.IP.Register](#payload.v1.Backup.IP.Register) + - [Backup.IP.Register.Request](#payload.v1.Backup.IP.Register.Request) + - [Backup.IP.Remove](#payload.v1.Backup.IP.Remove) + - [Backup.IP.Remove.Request](#payload.v1.Backup.IP.Remove.Request) + - [Backup.Locations](#payload.v1.Backup.Locations) + - [Backup.Locations.Request](#payload.v1.Backup.Locations.Request) + - [Backup.Remove](#payload.v1.Backup.Remove) + - [Backup.Remove.Request](#payload.v1.Backup.Remove.Request) + - [Backup.Remove.RequestMulti](#payload.v1.Backup.Remove.RequestMulti) + - [Backup.Vector](#payload.v1.Backup.Vector) + - [Backup.Vectors](#payload.v1.Backup.Vectors) + - [Control](#payload.v1.Control) + - [Control.CreateIndexRequest](#payload.v1.Control.CreateIndexRequest) + - [Discoverer](#payload.v1.Discoverer) + - [Discoverer.Request](#payload.v1.Discoverer.Request) + - [Empty](#payload.v1.Empty) + - [Filter](#payload.v1.Filter) + - [Filter.Config](#payload.v1.Filter.Config) + - [Filter.Target](#payload.v1.Filter.Target) + - [Info](#payload.v1.Info) + - [Info.CPU](#payload.v1.Info.CPU) + - [Info.IPs](#payload.v1.Info.IPs) + - [Info.Index](#payload.v1.Info.Index) + - [Info.Index.Count](#payload.v1.Info.Index.Count) + - [Info.Index.UUID](#payload.v1.Info.Index.UUID) + - [Info.Index.UUID.Committed](#payload.v1.Info.Index.UUID.Committed) + - [Info.Index.UUID.Uncommitted](#payload.v1.Info.Index.UUID.Uncommitted) + - [Info.Memory](#payload.v1.Info.Memory) + - [Info.Node](#payload.v1.Info.Node) + - [Info.Nodes](#payload.v1.Info.Nodes) + - [Info.Pod](#payload.v1.Info.Pod) + - [Info.Pods](#payload.v1.Info.Pods) + - [Insert](#payload.v1.Insert) + - [Insert.Config](#payload.v1.Insert.Config) + - [Insert.MultiRequest](#payload.v1.Insert.MultiRequest) + - [Insert.Request](#payload.v1.Insert.Request) + - [Meta](#payload.v1.Meta) + - [Meta.Key](#payload.v1.Meta.Key) + - [Meta.KeyVal](#payload.v1.Meta.KeyVal) + - [Meta.KeyVals](#payload.v1.Meta.KeyVals) + - [Meta.Keys](#payload.v1.Meta.Keys) + - [Meta.Val](#payload.v1.Meta.Val) + - [Meta.Vals](#payload.v1.Meta.Vals) + - [Object](#payload.v1.Object) + - [Object.Blob](#payload.v1.Object.Blob) + - [Object.Distance](#payload.v1.Object.Distance) + - [Object.ID](#payload.v1.Object.ID) + - [Object.IDs](#payload.v1.Object.IDs) + - [Object.Location](#payload.v1.Object.Location) + - [Object.Locations](#payload.v1.Object.Locations) + - [Object.Vector](#payload.v1.Object.Vector) + - [Object.Vectors](#payload.v1.Object.Vectors) + - [Remove](#payload.v1.Remove) + - [Remove.Config](#payload.v1.Remove.Config) + - [Remove.MultiRequest](#payload.v1.Remove.MultiRequest) + - [Remove.Request](#payload.v1.Remove.Request) + - [Replication](#payload.v1.Replication) + - [Replication.Agents](#payload.v1.Replication.Agents) + - [Replication.Rebalance](#payload.v1.Replication.Rebalance) + - [Replication.Recovery](#payload.v1.Replication.Recovery) + - [Search](#payload.v1.Search) + - [Search.Config](#payload.v1.Search.Config) + - [Search.IDRequest](#payload.v1.Search.IDRequest) + - [Search.MultiIDRequest](#payload.v1.Search.MultiIDRequest) + - [Search.MultiRequest](#payload.v1.Search.MultiRequest) + - [Search.ObjectRequest](#payload.v1.Search.ObjectRequest) + - [Search.Request](#payload.v1.Search.Request) + - [Search.Response](#payload.v1.Search.Response) + - [Search.Responses](#payload.v1.Search.Responses) + - [Update](#payload.v1.Update) + - [Update.Config](#payload.v1.Update.Config) + - [Update.MultiRequest](#payload.v1.Update.MultiRequest) + - [Update.Request](#payload.v1.Update.Request) + - [Upsert](#payload.v1.Upsert) + - [Upsert.Config](#payload.v1.Upsert.Config) + - [Upsert.MultiRequest](#payload.v1.Upsert.MultiRequest) + - [Upsert.Request](#payload.v1.Upsert.Request) + +- [apis/proto/v1/vald/filter.proto](#apis/proto/v1/vald/filter.proto) + - [Filter](#vald.v1.Filter) + +- [apis/proto/v1/vald/insert.proto](#apis/proto/v1/vald/insert.proto) + - [Insert](#vald.v1.Insert) + +- [apis/proto/v1/vald/object.proto](#apis/proto/v1/vald/object.proto) + - [Object](#vald.v1.Object) + +- [apis/proto/v1/vald/remove.proto](#apis/proto/v1/vald/remove.proto) + - [Remove](#vald.v1.Remove) + +- [apis/proto/v1/vald/search.proto](#apis/proto/v1/vald/search.proto) + - [Search](#vald.v1.Search) + +- [apis/proto/v1/vald/update.proto](#apis/proto/v1/vald/update.proto) + - [Update](#vald.v1.Update) + +- [apis/proto/v1/vald/upsert.proto](#apis/proto/v1/vald/upsert.proto) + - [Upsert](#vald.v1.Upsert) + +- [Scalar Value Types](#scalar-value-types) + + + + +

Top

+ +## apis/proto/v1/agent/core/agent.proto + + + + + + + + + + + +### Agent + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| CreateIndex | [.payload.v1.Control.CreateIndexRequest](#payload.v1.Control.CreateIndexRequest) | [.payload.v1.Empty](#payload.v1.Empty) | | +| SaveIndex | [.payload.v1.Empty](#payload.v1.Empty) | [.payload.v1.Empty](#payload.v1.Empty) | | +| CreateAndSaveIndex | [.payload.v1.Control.CreateIndexRequest](#payload.v1.Control.CreateIndexRequest) | [.payload.v1.Empty](#payload.v1.Empty) | | +| IndexInfo | [.payload.v1.Empty](#payload.v1.Empty) | [.payload.v1.Info.Index.Count](#payload.v1.Info.Index.Count) | | + + + + + + +

Top

+ +## apis/proto/v1/agent/sidecar/sidecar.proto + + + + + + + + + + + +### Sidecar + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| + + + + + + +

Top

+ +## apis/proto/v1/discoverer/discoverer.proto + + + + + + + + + + + +### Discoverer + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| Pods | [.payload.v1.Discoverer.Request](#payload.v1.Discoverer.Request) | [.payload.v1.Info.Pods](#payload.v1.Info.Pods) | | +| Nodes | [.payload.v1.Discoverer.Request](#payload.v1.Discoverer.Request) | [.payload.v1.Info.Nodes](#payload.v1.Info.Nodes) | | + + + + + + +

Top

+ +## apis/proto/v1/errors/errors.proto + + + + + +### Errors + + + + + + + + + +### Errors.RPC + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| type | [string](#string) | | | +| msg | [string](#string) | | | +| details | [string](#string) | repeated | | +| instance | [string](#string) | | | +| status | [int64](#int64) | | | +| error | [string](#string) | | | +| roots | [Errors.RPC](#errors.v1.Errors.RPC) | repeated | | + + + + + + + + + + + + + + + + +

Top

+ +## apis/proto/v1/filter/egress/egress_filter.proto + + + + + + + + + + + +### EgressFilter + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| Filter | [.payload.v1.Object.Distance](#payload.v1.Object.Distance) | [.payload.v1.Object.Distance](#payload.v1.Object.Distance) | | +| StreamFilter | [.payload.v1.Object.Distance](#payload.v1.Object.Distance) stream | [.payload.v1.Object.Distance](#payload.v1.Object.Distance) stream | | + + + + + + +

Top

+ +## apis/proto/v1/filter/ingress/ingress_filter.proto + + + + + + + + + + + +### IngressFilter + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| GenVector | [.payload.v1.Object.Blob](#payload.v1.Object.Blob) | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) | | +| StreamGenVector | [.payload.v1.Object.Blob](#payload.v1.Object.Blob) stream | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) stream | | +| FilterVector | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) | | +| StreamFilterVector | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) stream | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) stream | | + + + + + + +

Top

+ +## apis/proto/v1/gateway/vald/vald.proto + + + + + + + + + + + +### Vald + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| Exists | [.payload.v1.Object.ID](#payload.v1.Object.ID) | [.payload.v1.Object.ID](#payload.v1.Object.ID) | | +| Search | [.payload.v1.Search.Request](#payload.v1.Search.Request) | [.payload.v1.Search.Response](#payload.v1.Search.Response) | | +| SearchByID | [.payload.v1.Search.IDRequest](#payload.v1.Search.IDRequest) | [.payload.v1.Search.Response](#payload.v1.Search.Response) | | +| StreamSearch | [.payload.v1.Search.Request](#payload.v1.Search.Request) stream | [.payload.v1.Search.Response](#payload.v1.Search.Response) stream | | +| StreamSearchByID | [.payload.v1.Search.IDRequest](#payload.v1.Search.IDRequest) stream | [.payload.v1.Search.Response](#payload.v1.Search.Response) stream | | +| Insert | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) | [.payload.v1.Object.Location](#payload.v1.Object.Location) | | +| StreamInsert | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) stream | [.payload.v1.Object.Location](#payload.v1.Object.Location) stream | | +| MultiInsert | [.payload.v1.Object.Vectors](#payload.v1.Object.Vectors) | [.payload.v1.Object.Locations](#payload.v1.Object.Locations) | | +| Update | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) | [.payload.v1.Object.Location](#payload.v1.Object.Location) | | +| StreamUpdate | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) stream | [.payload.v1.Object.Location](#payload.v1.Object.Location) stream | | +| MultiUpdate | [.payload.v1.Object.Vectors](#payload.v1.Object.Vectors) | [.payload.v1.Object.Locations](#payload.v1.Object.Locations) | | +| Upsert | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) | [.payload.v1.Object.Location](#payload.v1.Object.Location) | | +| StreamUpsert | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) stream | [.payload.v1.Object.Location](#payload.v1.Object.Location) stream | | +| MultiUpsert | [.payload.v1.Object.Vectors](#payload.v1.Object.Vectors) | [.payload.v1.Object.Locations](#payload.v1.Object.Locations) | | +| Remove | [.payload.v1.Object.ID](#payload.v1.Object.ID) | [.payload.v1.Object.Location](#payload.v1.Object.Location) | | +| StreamRemove | [.payload.v1.Object.ID](#payload.v1.Object.ID) stream | [.payload.v1.Object.Location](#payload.v1.Object.Location) stream | | +| MultiRemove | [.payload.v1.Object.IDs](#payload.v1.Object.IDs) | [.payload.v1.Object.Locations](#payload.v1.Object.Locations) | | +| GetObject | [.payload.v1.Object.ID](#payload.v1.Object.ID) | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) | | +| StreamGetObject | [.payload.v1.Object.ID](#payload.v1.Object.ID) stream | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) stream | | + + + + + + +

Top

+ +## apis/proto/v1/manager/backup/backup_manager.proto + + + + + + + + + + + +### Backup + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| GetVector | [.payload.v1.Backup.GetVector.Request](#payload.v1.Backup.GetVector.Request) | [.payload.v1.Backup.Compressed.Vector](#payload.v1.Backup.Compressed.Vector) | | +| Locations | [.payload.v1.Backup.Locations.Request](#payload.v1.Backup.Locations.Request) | [.payload.v1.Info.IPs](#payload.v1.Info.IPs) | | +| Register | [.payload.v1.Backup.Compressed.Vector](#payload.v1.Backup.Compressed.Vector) | [.payload.v1.Empty](#payload.v1.Empty) | | +| RegisterMulti | [.payload.v1.Backup.Compressed.Vectors](#payload.v1.Backup.Compressed.Vectors) | [.payload.v1.Empty](#payload.v1.Empty) | | +| Remove | [.payload.v1.Backup.Remove.Request](#payload.v1.Backup.Remove.Request) | [.payload.v1.Empty](#payload.v1.Empty) | | +| RemoveMulti | [.payload.v1.Backup.Remove.RequestMulti](#payload.v1.Backup.Remove.RequestMulti) | [.payload.v1.Empty](#payload.v1.Empty) | | +| RegisterIPs | [.payload.v1.Backup.IP.Register.Request](#payload.v1.Backup.IP.Register.Request) | [.payload.v1.Empty](#payload.v1.Empty) | | +| RemoveIPs | [.payload.v1.Backup.IP.Remove.Request](#payload.v1.Backup.IP.Remove.Request) | [.payload.v1.Empty](#payload.v1.Empty) | | + + + + + + +

Top

+ +## apis/proto/v1/manager/compressor/compressor.proto + + + + + + + + + + + +### Backup + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| GetVector | [.payload.v1.Backup.GetVector.Request](#payload.v1.Backup.GetVector.Request) | [.payload.v1.Backup.Vector](#payload.v1.Backup.Vector) | | +| Locations | [.payload.v1.Backup.Locations.Request](#payload.v1.Backup.Locations.Request) | [.payload.v1.Info.IPs](#payload.v1.Info.IPs) | | +| Register | [.payload.v1.Backup.Vector](#payload.v1.Backup.Vector) | [.payload.v1.Empty](#payload.v1.Empty) | | +| RegisterMulti | [.payload.v1.Backup.Vectors](#payload.v1.Backup.Vectors) | [.payload.v1.Empty](#payload.v1.Empty) | | +| Remove | [.payload.v1.Backup.Remove.Request](#payload.v1.Backup.Remove.Request) | [.payload.v1.Empty](#payload.v1.Empty) | | +| RemoveMulti | [.payload.v1.Backup.Remove.RequestMulti](#payload.v1.Backup.Remove.RequestMulti) | [.payload.v1.Empty](#payload.v1.Empty) | | +| RegisterIPs | [.payload.v1.Backup.IP.Register.Request](#payload.v1.Backup.IP.Register.Request) | [.payload.v1.Empty](#payload.v1.Empty) | | +| RemoveIPs | [.payload.v1.Backup.IP.Remove.Request](#payload.v1.Backup.IP.Remove.Request) | [.payload.v1.Empty](#payload.v1.Empty) | | + + + + + + +

Top

+ +## apis/proto/v1/manager/index/index_manager.proto + + + + + + + + + + + +### Index + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| IndexInfo | [.payload.v1.Empty](#payload.v1.Empty) | [.payload.v1.Info.Index.Count](#payload.v1.Info.Index.Count) | | + + + + + + +

Top

+ +## apis/proto/v1/manager/replication/agent/replication_manager.proto + + + + + + + + + + + +### Replication + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| Recover | [.payload.v1.Replication.Recovery](#payload.v1.Replication.Recovery) | [.payload.v1.Empty](#payload.v1.Empty) | | +| Rebalance | [.payload.v1.Replication.Rebalance](#payload.v1.Replication.Rebalance) | [.payload.v1.Empty](#payload.v1.Empty) | | +| AgentInfo | [.payload.v1.Empty](#payload.v1.Empty) | [.payload.v1.Replication.Agents](#payload.v1.Replication.Agents) | | + + + + + + +

Top

+ +## apis/proto/v1/manager/replication/controller/replication_manager.proto + + + + + + + + + + + +### ReplicationController + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| ReplicationInfo | [.payload.v1.Empty](#payload.v1.Empty) | [.payload.v1.Replication.Agents](#payload.v1.Replication.Agents) | | + + + + + + +

Top

+ +## apis/proto/v1/meta/meta.proto + + + + + + + + + + + +### Meta + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| GetMeta | [.payload.v1.Meta.Key](#payload.v1.Meta.Key) | [.payload.v1.Meta.Val](#payload.v1.Meta.Val) | | +| GetMetas | [.payload.v1.Meta.Keys](#payload.v1.Meta.Keys) | [.payload.v1.Meta.Vals](#payload.v1.Meta.Vals) | | +| GetMetaInverse | [.payload.v1.Meta.Val](#payload.v1.Meta.Val) | [.payload.v1.Meta.Key](#payload.v1.Meta.Key) | | +| GetMetasInverse | [.payload.v1.Meta.Vals](#payload.v1.Meta.Vals) | [.payload.v1.Meta.Keys](#payload.v1.Meta.Keys) | | +| SetMeta | [.payload.v1.Meta.KeyVal](#payload.v1.Meta.KeyVal) | [.payload.v1.Empty](#payload.v1.Empty) | | +| SetMetas | [.payload.v1.Meta.KeyVals](#payload.v1.Meta.KeyVals) | [.payload.v1.Empty](#payload.v1.Empty) | | +| DeleteMeta | [.payload.v1.Meta.Key](#payload.v1.Meta.Key) | [.payload.v1.Meta.Val](#payload.v1.Meta.Val) | | +| DeleteMetas | [.payload.v1.Meta.Keys](#payload.v1.Meta.Keys) | [.payload.v1.Meta.Vals](#payload.v1.Meta.Vals) | | +| DeleteMetaInverse | [.payload.v1.Meta.Val](#payload.v1.Meta.Val) | [.payload.v1.Meta.Key](#payload.v1.Meta.Key) | | +| DeleteMetasInverse | [.payload.v1.Meta.Vals](#payload.v1.Meta.Vals) | [.payload.v1.Meta.Keys](#payload.v1.Meta.Keys) | | + + + + + + +

Top

+ +## apis/proto/v1/payload/payload.proto + + + + + +### Backup + + + + + + + + + +### Backup.Compressed + + + + + + + + + +### Backup.Compressed.Vector + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| uuid | [string](#string) | | | +| vector | [bytes](#bytes) | | | +| ips | [string](#string) | repeated | | + + + + + + + + +### Backup.Compressed.Vectors + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| vectors | [Backup.Compressed.Vector](#payload.v1.Backup.Compressed.Vector) | repeated | | + + + + + + + + +### Backup.GetVector + + + + + + + + + +### Backup.GetVector.Owner + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| ip | [string](#string) | | | + + + + + + + + +### Backup.GetVector.Request + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| uuid | [string](#string) | | | + + + + + + + + +### Backup.IP + + + + + + + + + +### Backup.IP.Register + + + + + + + + + +### Backup.IP.Register.Request + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| uuid | [string](#string) | | | +| ips | [string](#string) | repeated | | + + + + + + + + +### Backup.IP.Remove + + + + + + + + + +### Backup.IP.Remove.Request + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| ips | [string](#string) | repeated | | + + + + + + + + +### Backup.Locations + + + + + + + + + +### Backup.Locations.Request + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| uuid | [string](#string) | | | + + + + + + + + +### Backup.Remove + + + + + + + + + +### Backup.Remove.Request + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| uuid | [string](#string) | | | + + + + + + + + +### Backup.Remove.RequestMulti + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| uuids | [string](#string) | repeated | | + + + + + + + + +### Backup.Vector + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| uuid | [string](#string) | | | +| vector | [float](#float) | repeated | | +| ips | [string](#string) | repeated | | + + + + + + + + +### Backup.Vectors + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| vectors | [Backup.Vector](#payload.v1.Backup.Vector) | repeated | | + + + + + + + + +### Control + + + + + + + + + +### Control.CreateIndexRequest + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| pool_size | [uint32](#uint32) | | | + + + + + + + + +### Discoverer + + + + + + + + + +### Discoverer.Request + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| name | [string](#string) | | | +| namespace | [string](#string) | | | +| node | [string](#string) | | | + + + + + + + + +### Empty + + + + + + + + + +### Filter + + + + + + + + + +### Filter.Config + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| targets | [string](#string) | repeated | | + + + + + + + + +### Filter.Target + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| host | [string](#string) | | | +| port | [uint32](#uint32) | | | + + + + + + + + +### Info + + + + + + + + + +### Info.CPU + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| limit | [double](#double) | | | +| request | [double](#double) | | | +| usage | [double](#double) | | | + + + + + + + + +### Info.IPs + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| ip | [string](#string) | repeated | | + + + + + + + + +### Info.Index + + + + + + + + + +### Info.Index.Count + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| stored | [uint32](#uint32) | | | +| uncommitted | [uint32](#uint32) | | | +| indexing | [bool](#bool) | | | + + + + + + + + +### Info.Index.UUID + + + + + + + + + +### Info.Index.UUID.Committed + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| uuid | [string](#string) | | | + + + + + + + + +### Info.Index.UUID.Uncommitted + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| uuid | [string](#string) | | | + + + + + + + + +### Info.Memory + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| limit | [double](#double) | | | +| request | [double](#double) | | | +| usage | [double](#double) | | | + + + + + + + + +### Info.Node + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| name | [string](#string) | | | +| internal_addr | [string](#string) | | | +| external_addr | [string](#string) | | | +| cpu | [Info.CPU](#payload.v1.Info.CPU) | | | +| memory | [Info.Memory](#payload.v1.Info.Memory) | | | +| Pods | [Info.Pods](#payload.v1.Info.Pods) | | | + + + + + + + + +### Info.Nodes + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| nodes | [Info.Node](#payload.v1.Info.Node) | repeated | | + + + + + + + + +### Info.Pod + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| app_name | [string](#string) | | | +| name | [string](#string) | | | +| namespace | [string](#string) | | | +| ip | [string](#string) | | | +| cpu | [Info.CPU](#payload.v1.Info.CPU) | | | +| memory | [Info.Memory](#payload.v1.Info.Memory) | | | +| node | [Info.Node](#payload.v1.Info.Node) | | | + + + + + + + + +### Info.Pods + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| pods | [Info.Pod](#payload.v1.Info.Pod) | repeated | | + + + + + + + + +### Insert + + + + + + + + + +### Insert.Config + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| skip_strict_exist_check | [bool](#bool) | | | +| filters | [Filter.Config](#payload.v1.Filter.Config) | | | + + + + + + + + +### Insert.MultiRequest + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| requests | [Insert.Request](#payload.v1.Insert.Request) | repeated | | + + + + + + + + +### Insert.Request + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| vector | [Object.Vector](#payload.v1.Object.Vector) | | | +| config | [Insert.Config](#payload.v1.Insert.Config) | | | + + + + + + + + +### Meta + + + + + + + + + +### Meta.Key + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | + + + + + + + + +### Meta.KeyVal + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| key | [string](#string) | | | +| val | [string](#string) | | | + + + + + + + + +### Meta.KeyVals + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| kvs | [Meta.KeyVal](#payload.v1.Meta.KeyVal) | repeated | | + + + + + + + + +### Meta.Keys + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| keys | [string](#string) | repeated | | + + + + + + + + +### Meta.Val + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| val | [string](#string) | | | + + + + + + + + +### Meta.Vals + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| vals | [string](#string) | repeated | | + + + + + + + + +### Object + + + + + + + + + +### Object.Blob + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| id | [string](#string) | | | +| object | [bytes](#bytes) | | | + + + + + + + + +### Object.Distance + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| id | [string](#string) | | | +| distance | [float](#float) | | | + + + + + + + + +### Object.ID + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| id | [string](#string) | | | + + + + + + + + +### Object.IDs + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| ids | [string](#string) | repeated | | + + + + + + + + +### Object.Location + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| name | [string](#string) | | | +| uuid | [string](#string) | | | +| ips | [string](#string) | repeated | | + + + + + + + + +### Object.Locations + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| locations | [Object.Location](#payload.v1.Object.Location) | repeated | | + + + + + + + + +### Object.Vector + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| id | [string](#string) | | | +| vector | [float](#float) | repeated | | + + + + + + + + +### Object.Vectors + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| vectors | [Object.Vector](#payload.v1.Object.Vector) | repeated | | + + + + + + + + +### Remove + + + + + + + + + +### Remove.Config + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| skip_strict_exist_check | [bool](#bool) | | | + + + + + + + + +### Remove.MultiRequest + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| requests | [Remove.Request](#payload.v1.Remove.Request) | repeated | | + + + + + + + + +### Remove.Request + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| id | [Object.ID](#payload.v1.Object.ID) | | | +| config | [Remove.Config](#payload.v1.Remove.Config) | | | + + + + + + + + +### Replication + + + + + + + + + +### Replication.Agents + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| agents | [string](#string) | repeated | | +| removed_agents | [string](#string) | repeated | | +| replicating_agent | [string](#string) | repeated | | + + + + + + + + +### Replication.Rebalance + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| high_usage_agents | [string](#string) | repeated | | +| low_usage_agents | [string](#string) | repeated | | + + + + + + + + +### Replication.Recovery + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| deleted_agents | [string](#string) | repeated | | + + + + + + + + +### Search + + + + + + + + + +### Search.Config + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| request_id | [string](#string) | | | +| num | [uint32](#uint32) | | | +| radius | [float](#float) | | | +| epsilon | [float](#float) | | | +| timeout | [int64](#int64) | | | +| filters | [Filter.Config](#payload.v1.Filter.Config) | | | + + + + + + + + +### Search.IDRequest + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| id | [string](#string) | | | +| config | [Search.Config](#payload.v1.Search.Config) | | | + + + + + + + + +### Search.MultiIDRequest + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| requests | [Search.IDRequest](#payload.v1.Search.IDRequest) | repeated | | + + + + + + + + +### Search.MultiRequest + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| requests | [Search.Request](#payload.v1.Search.Request) | repeated | | + + + + + + + + +### Search.ObjectRequest + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| object | [bytes](#bytes) | | | +| config | [Search.Config](#payload.v1.Search.Config) | | | + + + + + + + + +### Search.Request + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| vector | [float](#float) | repeated | | +| config | [Search.Config](#payload.v1.Search.Config) | | | + + + + + + + + +### Search.Response + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| request_id | [string](#string) | | | +| results | [Object.Distance](#payload.v1.Object.Distance) | repeated | | + + + + + + + + +### Search.Responses + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| responses | [Search.Response](#payload.v1.Search.Response) | repeated | | + + + + + + + + +### Update + + + + + + + + + +### Update.Config + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| skip_strict_exist_check | [bool](#bool) | | | +| filters | [Filter.Config](#payload.v1.Filter.Config) | | | + + + + + + + + +### Update.MultiRequest + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| requests | [Update.Request](#payload.v1.Update.Request) | repeated | | + + + + + + + + +### Update.Request + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| vector | [Object.Vector](#payload.v1.Object.Vector) | | | +| config | [Update.Config](#payload.v1.Update.Config) | | | + + + + + + + + +### Upsert + + + + + + + + + +### Upsert.Config + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| skip_strict_exist_check | [bool](#bool) | | | +| filters | [Filter.Config](#payload.v1.Filter.Config) | | | + + + + + + + + +### Upsert.MultiRequest + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| requests | [Upsert.Request](#payload.v1.Upsert.Request) | repeated | | + + + + + + + + +### Upsert.Request + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| vector | [Object.Vector](#payload.v1.Object.Vector) | | | +| config | [Upsert.Config](#payload.v1.Upsert.Config) | | | + + + + + + + + + + + + + + + + +

Top

+ +## apis/proto/v1/vald/filter.proto + + + + + + + + + + + +### Filter + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| SearchObject | [.payload.v1.Search.ObjectRequest](#payload.v1.Search.ObjectRequest) | [.payload.v1.Search.Response](#payload.v1.Search.Response) | | +| StreamSearchObject | [.payload.v1.Search.ObjectRequest](#payload.v1.Search.ObjectRequest) stream | [.payload.v1.Search.Response](#payload.v1.Search.Response) stream | | +| InsertObject | [.payload.v1.Object.Blob](#payload.v1.Object.Blob) | [.payload.v1.Object.Location](#payload.v1.Object.Location) | | +| StreamInsertObject | [.payload.v1.Object.Blob](#payload.v1.Object.Blob) stream | [.payload.v1.Object.Location](#payload.v1.Object.Location) stream | | +| MultiInsertObject | [.payload.v1.Object.Blob](#payload.v1.Object.Blob) | [.payload.v1.Object.Locations](#payload.v1.Object.Locations) | | +| UpdateObject | [.payload.v1.Object.Blob](#payload.v1.Object.Blob) | [.payload.v1.Object.Location](#payload.v1.Object.Location) | | +| StreamUpdateObject | [.payload.v1.Object.Blob](#payload.v1.Object.Blob) stream | [.payload.v1.Object.Location](#payload.v1.Object.Location) stream | | +| MultiUpdateObject | [.payload.v1.Object.Blob](#payload.v1.Object.Blob) | [.payload.v1.Object.Locations](#payload.v1.Object.Locations) | | +| UpsertObject | [.payload.v1.Object.Blob](#payload.v1.Object.Blob) | [.payload.v1.Object.Location](#payload.v1.Object.Location) | | +| StreamUpsertObject | [.payload.v1.Object.Blob](#payload.v1.Object.Blob) stream | [.payload.v1.Object.Location](#payload.v1.Object.Location) stream | | +| MultiUpsertObject | [.payload.v1.Object.Blob](#payload.v1.Object.Blob) | [.payload.v1.Object.Locations](#payload.v1.Object.Locations) | | + + + + + + +

Top

+ +## apis/proto/v1/vald/insert.proto + + + + + + + + + + + +### Insert + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| Insert | [.payload.v1.Insert.Request](#payload.v1.Insert.Request) | [.payload.v1.Object.Location](#payload.v1.Object.Location) | | +| StreamInsert | [.payload.v1.Insert.Request](#payload.v1.Insert.Request) stream | [.payload.v1.Object.Location](#payload.v1.Object.Location) stream | | +| MultiInsert | [.payload.v1.Insert.MultiRequest](#payload.v1.Insert.MultiRequest) | [.payload.v1.Object.Locations](#payload.v1.Object.Locations) | | + + + + + + +

Top

+ +## apis/proto/v1/vald/object.proto + + + + + + + + + + + +### Object + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| Exists | [.payload.v1.Object.ID](#payload.v1.Object.ID) | [.payload.v1.Object.ID](#payload.v1.Object.ID) | | +| GetObject | [.payload.v1.Object.ID](#payload.v1.Object.ID) | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) | | +| StreamGetObject | [.payload.v1.Object.ID](#payload.v1.Object.ID) stream | [.payload.v1.Object.Vector](#payload.v1.Object.Vector) stream | | + + + + + + +

Top

+ +## apis/proto/v1/vald/remove.proto + + + + + + + + + + + +### Remove + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| Remove | [.payload.v1.Remove.Request](#payload.v1.Remove.Request) | [.payload.v1.Object.Location](#payload.v1.Object.Location) | | +| StreamRemove | [.payload.v1.Remove.Request](#payload.v1.Remove.Request) stream | [.payload.v1.Object.Location](#payload.v1.Object.Location) stream | | +| MultiRemove | [.payload.v1.Remove.MultiRequest](#payload.v1.Remove.MultiRequest) | [.payload.v1.Object.Locations](#payload.v1.Object.Locations) | | + + + + + + +

Top

+ +## apis/proto/v1/vald/search.proto + + + + + + + + + + + +### Search + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| Search | [.payload.v1.Search.Request](#payload.v1.Search.Request) | [.payload.v1.Search.Response](#payload.v1.Search.Response) | | +| SearchByID | [.payload.v1.Search.IDRequest](#payload.v1.Search.IDRequest) | [.payload.v1.Search.Response](#payload.v1.Search.Response) | | +| StreamSearch | [.payload.v1.Search.Request](#payload.v1.Search.Request) stream | [.payload.v1.Search.Response](#payload.v1.Search.Response) stream | | +| StreamSearchByID | [.payload.v1.Search.IDRequest](#payload.v1.Search.IDRequest) stream | [.payload.v1.Search.Response](#payload.v1.Search.Response) stream | | +| MultiSearch | [.payload.v1.Search.MultiRequest](#payload.v1.Search.MultiRequest) | [.payload.v1.Search.Responses](#payload.v1.Search.Responses) | | +| MultiSearchByID | [.payload.v1.Search.MultiIDRequest](#payload.v1.Search.MultiIDRequest) | [.payload.v1.Search.Responses](#payload.v1.Search.Responses) | | + + + + + + +

Top

+ +## apis/proto/v1/vald/update.proto + + + + + + + + + + + +### Update + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| Update | [.payload.v1.Update.Request](#payload.v1.Update.Request) | [.payload.v1.Object.Location](#payload.v1.Object.Location) | | +| StreamUpdate | [.payload.v1.Update.Request](#payload.v1.Update.Request) stream | [.payload.v1.Object.Location](#payload.v1.Object.Location) stream | | +| MultiUpdate | [.payload.v1.Update.MultiRequest](#payload.v1.Update.MultiRequest) | [.payload.v1.Object.Locations](#payload.v1.Object.Locations) | | + + + + + + +

Top

+ +## apis/proto/v1/vald/upsert.proto + + + + + + + + + + + +### Upsert + + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ------------| +| Upsert | [.payload.v1.Upsert.Request](#payload.v1.Upsert.Request) | [.payload.v1.Object.Location](#payload.v1.Object.Location) | | +| StreamUpsert | [.payload.v1.Upsert.Request](#payload.v1.Upsert.Request) stream | [.payload.v1.Object.Location](#payload.v1.Object.Location) stream | | +| MultiUpsert | [.payload.v1.Upsert.MultiRequest](#payload.v1.Upsert.MultiRequest) | [.payload.v1.Object.Locations](#payload.v1.Object.Locations) | | + + + + + +## Scalar Value Types + +| .proto Type | Notes | C++ | Java | Python | Go | C# | PHP | Ruby | +| ----------- | ----- | --- | ---- | ------ | -- | -- | --- | ---- | +| double | | double | double | float | float64 | double | float | Float | +| float | | float | float | float | float32 | float | float | Float | +| int32 | Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. | int32 | int | int | int32 | int | integer | Bignum or Fixnum (as required) | +| int64 | Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. | int64 | long | int/long | int64 | long | integer/string | Bignum | +| uint32 | Uses variable-length encoding. | uint32 | int | int/long | uint32 | uint | integer | Bignum or Fixnum (as required) | +| uint64 | Uses variable-length encoding. | uint64 | long | int/long | uint64 | ulong | integer/string | Bignum or Fixnum (as required) | +| sint32 | Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. | int32 | int | int | int32 | int | integer | Bignum or Fixnum (as required) | +| sint64 | Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. | int64 | long | int/long | int64 | long | integer/string | Bignum | +| fixed32 | Always four bytes. More efficient than uint32 if values are often greater than 2^28. | uint32 | int | int | uint32 | uint | integer | Bignum or Fixnum (as required) | +| fixed64 | Always eight bytes. More efficient than uint64 if values are often greater than 2^56. | uint64 | long | int/long | uint64 | ulong | integer/string | Bignum | +| sfixed32 | Always four bytes. | int32 | int | int | int32 | int | integer | Bignum or Fixnum (as required) | +| sfixed64 | Always eight bytes. | int64 | long | int/long | int64 | long | integer/string | Bignum | +| bool | | bool | boolean | boolean | bool | bool | boolean | TrueClass/FalseClass | +| string | A string must always contain UTF-8 encoded or 7-bit ASCII text. | string | String | str/unicode | string | string | string | String (UTF-8) | +| bytes | May contain any arbitrary sequence of bytes. | string | ByteString | str | []byte | ByteString | string | String (ASCII-8BIT) | + diff --git a/apis/grpc/agent/core/agent.pb.go b/apis/grpc/agent/core/agent.pb.go deleted file mode 100644 index 1f69fbcaae..0000000000 --- a/apis/grpc/agent/core/agent.pb.go +++ /dev/null @@ -1,1043 +0,0 @@ -// -// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package core - -import ( - context "context" - fmt "fmt" - math "math" - - _ "github.com/danielvladco/go-proto-gql/pb" - proto "github.com/gogo/protobuf/proto" - payload "github.com/vdaas/vald/apis/grpc/payload" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func init() { proto.RegisterFile("core/agent.proto", fileDescriptor_30864f15308ac822) } - -var fileDescriptor_30864f15308ac822 = []byte{ - // 604 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x95, 0x4f, 0x6e, 0x13, 0x31, - 0x14, 0xc6, 0x99, 0xaa, 0x04, 0xc5, 0x49, 0xff, 0xc8, 0x2d, 0x69, 0x3b, 0xaa, 0x52, 0x29, 0x6c, - 0x50, 0x16, 0x36, 0x02, 0x16, 0x15, 0x62, 0x41, 0x93, 0xb4, 0x65, 0x84, 0xa2, 0xa2, 0x46, 0x54, - 0xc0, 0x0a, 0x67, 0xc6, 0x9d, 0x0e, 0x9a, 0xd8, 0x53, 0xdb, 0x89, 0x1a, 0x21, 0x36, 0x5c, 0x81, - 0x8b, 0x74, 0xcf, 0x05, 0x58, 0x22, 0x71, 0x81, 0x28, 0xe2, 0x20, 0xc8, 0xf6, 0x24, 0x4d, 0x32, - 0x41, 0x28, 0x59, 0xce, 0xf3, 0xfb, 0x7e, 0xfe, 0x3e, 0x4b, 0x6f, 0x1e, 0xd8, 0xf4, 0xb9, 0xa0, - 0x98, 0x84, 0x94, 0x29, 0x94, 0x08, 0xae, 0x38, 0x5c, 0xd5, 0x15, 0x77, 0x2d, 0x21, 0xfd, 0x98, - 0x93, 0xc0, 0x16, 0xdd, 0xfd, 0x90, 0xf3, 0x30, 0xa6, 0x98, 0x24, 0x11, 0x26, 0x8c, 0x71, 0x45, - 0x54, 0xc4, 0x99, 0x4c, 0x4f, 0x8b, 0x49, 0x1b, 0x87, 0xd7, 0xb1, 0xfd, 0x7a, 0xfa, 0xa3, 0x00, - 0xee, 0x1f, 0x69, 0x20, 0x3c, 0x01, 0xb9, 0xe3, 0x9b, 0x48, 0x2a, 0x09, 0x21, 0x1a, 0xf1, 0xce, - 0xda, 0x9f, 0xa9, 0xaf, 0x90, 0xd7, 0x70, 0xe7, 0xd4, 0x2a, 0xdb, 0xdf, 0x7e, 0xff, 0xf9, 0xbe, - 0xb2, 0x0e, 0x8b, 0x98, 0x1a, 0x21, 0xfe, 0x12, 0x05, 0x5f, 0xe1, 0x19, 0xc8, 0xb5, 0x28, 0x11, - 0xfe, 0x15, 0xdc, 0x19, 0x6b, 0x6c, 0x01, 0x9d, 0xd3, 0xeb, 0x2e, 0x95, 0xca, 0xdd, 0xcd, 0x1e, - 0xc8, 0x84, 0x33, 0x49, 0x2b, 0xd0, 0x20, 0x8b, 0x95, 0x07, 0x58, 0x9a, 0x93, 0x17, 0x4e, 0x15, - 0xbe, 0x07, 0xc0, 0xb6, 0xd5, 0xfa, 0x5e, 0x03, 0xee, 0xcd, 0x6a, 0xbd, 0xc6, 0xff, 0xb1, 0x0f, - 0x0d, 0x76, 0xa3, 0x02, 0x52, 0x2c, 0x8e, 0x02, 0x4d, 0x3e, 0x05, 0xc5, 0x96, 0x12, 0x94, 0x74, - 0x96, 0x37, 0x7c, 0xef, 0xb1, 0xf3, 0xc4, 0x81, 0x4d, 0xb0, 0x39, 0x09, 0x5a, 0xde, 0xa8, 0xc5, - 0xbd, 0x06, 0x39, 0x8f, 0x49, 0x2a, 0x14, 0x2c, 0xcd, 0x3e, 0xfb, 0x05, 0xf5, 0x15, 0x17, 0xee, - 0xfa, 0xb8, 0x7e, 0xdc, 0x49, 0x54, 0xbf, 0x52, 0xba, 0x1d, 0x1c, 0x38, 0xe3, 0xb7, 0x8b, 0x8c, - 0x58, 0x27, 0x7c, 0x39, 0x4a, 0xb8, 0x20, 0xcf, 0xfa, 0x38, 0x04, 0x85, 0x66, 0x37, 0x56, 0x51, - 0x2a, 0xde, 0x99, 0x2f, 0x96, 0x59, 0xb5, 0x4e, 0xf0, 0x2e, 0x09, 0x88, 0xa2, 0x4b, 0x26, 0xe8, - 0x1a, 0xf1, 0x54, 0x82, 0x05, 0x79, 0xd3, 0x09, 0x52, 0xf1, 0x02, 0x09, 0x4e, 0x40, 0xee, 0x9c, - 0x76, 0x78, 0x8f, 0xce, 0x1d, 0x87, 0xd9, 0xfe, 0xdd, 0xb1, 0xfb, 0xf5, 0x6a, 0x11, 0x0b, 0x23, - 0xb4, 0xe3, 0x70, 0x38, 0xf2, 0xbf, 0x00, 0xcd, 0x7a, 0x7f, 0x9e, 0x7a, 0x4f, 0x85, 0x5b, 0x59, - 0xe1, 0x3c, 0xdf, 0x4d, 0x90, 0x3f, 0xa5, 0xca, 0xb6, 0xcc, 0xbd, 0xec, 0x1f, 0x0f, 0x38, 0x31, - 0xcd, 0xdc, 0xd4, 0xad, 0xfd, 0x3a, 0xd8, 0xb0, 0xf6, 0x97, 0x83, 0xda, 0x24, 0x1f, 0x40, 0xa1, - 0x2e, 0x28, 0x51, 0xd4, 0x63, 0x01, 0xbd, 0x81, 0x8f, 0xc6, 0xcd, 0x75, 0xce, 0x94, 0xe0, 0x31, - 0x9a, 0x38, 0x1d, 0xcd, 0xc8, 0x6c, 0xb2, 0x74, 0x84, 0xe1, 0x1a, 0x8e, 0x74, 0x1b, 0xf6, 0x8d, - 0x04, 0xbe, 0x02, 0xf9, 0x16, 0xe9, 0xa5, 0xe0, 0x19, 0x4d, 0x86, 0xb1, 0x65, 0x18, 0x6b, 0xb0, - 0x90, 0x32, 0x24, 0xe9, 0x51, 0x18, 0x02, 0x68, 0xaf, 0x3f, 0x62, 0xc1, 0x1d, 0x6a, 0x29, 0x8f, - 0xfb, 0x86, 0x5f, 0x82, 0xdb, 0x53, 0x1e, 0x09, 0x0b, 0xcc, 0x45, 0x6f, 0x40, 0xde, 0xa8, 0x3d, - 0x76, 0xc9, 0x33, 0x56, 0xef, 0xfe, 0x16, 0xfa, 0x18, 0x99, 0x46, 0x54, 0xe7, 0x5d, 0xa6, 0x32, - 0xae, 0x23, 0x76, 0xc9, 0xdd, 0xd5, 0xdb, 0xc1, 0xc1, 0x4a, 0xed, 0xd3, 0xcf, 0x61, 0xd9, 0xf9, - 0x35, 0x2c, 0x3b, 0x83, 0x61, 0xd9, 0x01, 0x7b, 0x5c, 0x84, 0xa8, 0x17, 0x10, 0x22, 0x51, 0x8f, - 0xc4, 0x01, 0xb2, 0x8b, 0x42, 0x6f, 0x88, 0x5a, 0xfe, 0x82, 0xc4, 0x81, 0xf9, 0xcf, 0xbf, 0x75, - 0x3e, 0x56, 0xc3, 0x48, 0x5d, 0x75, 0xdb, 0xc8, 0xe7, 0x1d, 0x6c, 0xda, 0xb1, 0x6e, 0xd7, 0xeb, - 0x42, 0xe2, 0x50, 0x24, 0xbe, 0xdd, 0x30, 0x58, 0x0b, 0xdb, 0x39, 0xb3, 0x26, 0x9e, 0xfd, 0x0d, - 0x00, 0x00, 0xff, 0xff, 0x1b, 0x3e, 0x24, 0x30, 0x7b, 0x06, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// AgentClient is the client API for Agent service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type AgentClient interface { - Exists(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_ID, error) - Search(ctx context.Context, in *payload.Search_Request, opts ...grpc.CallOption) (*payload.Search_Response, error) - SearchByID(ctx context.Context, in *payload.Search_IDRequest, opts ...grpc.CallOption) (*payload.Search_Response, error) - StreamSearch(ctx context.Context, opts ...grpc.CallOption) (Agent_StreamSearchClient, error) - StreamSearchByID(ctx context.Context, opts ...grpc.CallOption) (Agent_StreamSearchByIDClient, error) - Insert(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Empty, error) - StreamInsert(ctx context.Context, opts ...grpc.CallOption) (Agent_StreamInsertClient, error) - MultiInsert(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Empty, error) - Update(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Empty, error) - StreamUpdate(ctx context.Context, opts ...grpc.CallOption) (Agent_StreamUpdateClient, error) - MultiUpdate(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Empty, error) - Remove(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Empty, error) - StreamRemove(ctx context.Context, opts ...grpc.CallOption) (Agent_StreamRemoveClient, error) - MultiRemove(ctx context.Context, in *payload.Object_IDs, opts ...grpc.CallOption) (*payload.Empty, error) - GetObject(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_Vector, error) - StreamGetObject(ctx context.Context, opts ...grpc.CallOption) (Agent_StreamGetObjectClient, error) - CreateIndex(ctx context.Context, in *payload.Control_CreateIndexRequest, opts ...grpc.CallOption) (*payload.Empty, error) - SaveIndex(ctx context.Context, in *payload.Empty, opts ...grpc.CallOption) (*payload.Empty, error) - CreateAndSaveIndex(ctx context.Context, in *payload.Control_CreateIndexRequest, opts ...grpc.CallOption) (*payload.Empty, error) - IndexInfo(ctx context.Context, in *payload.Empty, opts ...grpc.CallOption) (*payload.Info_Index_Count, error) -} - -type agentClient struct { - cc *grpc.ClientConn -} - -func NewAgentClient(cc *grpc.ClientConn) AgentClient { - return &agentClient{cc} -} - -func (c *agentClient) Exists(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_ID, error) { - out := new(payload.Object_ID) - err := c.cc.Invoke(ctx, "/core.Agent/Exists", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *agentClient) Search(ctx context.Context, in *payload.Search_Request, opts ...grpc.CallOption) (*payload.Search_Response, error) { - out := new(payload.Search_Response) - err := c.cc.Invoke(ctx, "/core.Agent/Search", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *agentClient) SearchByID(ctx context.Context, in *payload.Search_IDRequest, opts ...grpc.CallOption) (*payload.Search_Response, error) { - out := new(payload.Search_Response) - err := c.cc.Invoke(ctx, "/core.Agent/SearchByID", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *agentClient) StreamSearch(ctx context.Context, opts ...grpc.CallOption) (Agent_StreamSearchClient, error) { - stream, err := c.cc.NewStream(ctx, &_Agent_serviceDesc.Streams[0], "/core.Agent/StreamSearch", opts...) - if err != nil { - return nil, err - } - x := &agentStreamSearchClient{stream} - return x, nil -} - -type Agent_StreamSearchClient interface { - Send(*payload.Search_Request) error - Recv() (*payload.Search_Response, error) - grpc.ClientStream -} - -type agentStreamSearchClient struct { - grpc.ClientStream -} - -func (x *agentStreamSearchClient) Send(m *payload.Search_Request) error { - return x.ClientStream.SendMsg(m) -} - -func (x *agentStreamSearchClient) Recv() (*payload.Search_Response, error) { - m := new(payload.Search_Response) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *agentClient) StreamSearchByID(ctx context.Context, opts ...grpc.CallOption) (Agent_StreamSearchByIDClient, error) { - stream, err := c.cc.NewStream(ctx, &_Agent_serviceDesc.Streams[1], "/core.Agent/StreamSearchByID", opts...) - if err != nil { - return nil, err - } - x := &agentStreamSearchByIDClient{stream} - return x, nil -} - -type Agent_StreamSearchByIDClient interface { - Send(*payload.Search_IDRequest) error - Recv() (*payload.Search_Response, error) - grpc.ClientStream -} - -type agentStreamSearchByIDClient struct { - grpc.ClientStream -} - -func (x *agentStreamSearchByIDClient) Send(m *payload.Search_IDRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *agentStreamSearchByIDClient) Recv() (*payload.Search_Response, error) { - m := new(payload.Search_Response) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *agentClient) Insert(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/core.Agent/Insert", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *agentClient) StreamInsert(ctx context.Context, opts ...grpc.CallOption) (Agent_StreamInsertClient, error) { - stream, err := c.cc.NewStream(ctx, &_Agent_serviceDesc.Streams[2], "/core.Agent/StreamInsert", opts...) - if err != nil { - return nil, err - } - x := &agentStreamInsertClient{stream} - return x, nil -} - -type Agent_StreamInsertClient interface { - Send(*payload.Object_Vector) error - Recv() (*payload.Empty, error) - grpc.ClientStream -} - -type agentStreamInsertClient struct { - grpc.ClientStream -} - -func (x *agentStreamInsertClient) Send(m *payload.Object_Vector) error { - return x.ClientStream.SendMsg(m) -} - -func (x *agentStreamInsertClient) Recv() (*payload.Empty, error) { - m := new(payload.Empty) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *agentClient) MultiInsert(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/core.Agent/MultiInsert", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *agentClient) Update(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/core.Agent/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *agentClient) StreamUpdate(ctx context.Context, opts ...grpc.CallOption) (Agent_StreamUpdateClient, error) { - stream, err := c.cc.NewStream(ctx, &_Agent_serviceDesc.Streams[3], "/core.Agent/StreamUpdate", opts...) - if err != nil { - return nil, err - } - x := &agentStreamUpdateClient{stream} - return x, nil -} - -type Agent_StreamUpdateClient interface { - Send(*payload.Object_Vector) error - Recv() (*payload.Empty, error) - grpc.ClientStream -} - -type agentStreamUpdateClient struct { - grpc.ClientStream -} - -func (x *agentStreamUpdateClient) Send(m *payload.Object_Vector) error { - return x.ClientStream.SendMsg(m) -} - -func (x *agentStreamUpdateClient) Recv() (*payload.Empty, error) { - m := new(payload.Empty) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *agentClient) MultiUpdate(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/core.Agent/MultiUpdate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *agentClient) Remove(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/core.Agent/Remove", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *agentClient) StreamRemove(ctx context.Context, opts ...grpc.CallOption) (Agent_StreamRemoveClient, error) { - stream, err := c.cc.NewStream(ctx, &_Agent_serviceDesc.Streams[4], "/core.Agent/StreamRemove", opts...) - if err != nil { - return nil, err - } - x := &agentStreamRemoveClient{stream} - return x, nil -} - -type Agent_StreamRemoveClient interface { - Send(*payload.Object_ID) error - Recv() (*payload.Empty, error) - grpc.ClientStream -} - -type agentStreamRemoveClient struct { - grpc.ClientStream -} - -func (x *agentStreamRemoveClient) Send(m *payload.Object_ID) error { - return x.ClientStream.SendMsg(m) -} - -func (x *agentStreamRemoveClient) Recv() (*payload.Empty, error) { - m := new(payload.Empty) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *agentClient) MultiRemove(ctx context.Context, in *payload.Object_IDs, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/core.Agent/MultiRemove", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *agentClient) GetObject(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_Vector, error) { - out := new(payload.Object_Vector) - err := c.cc.Invoke(ctx, "/core.Agent/GetObject", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *agentClient) StreamGetObject(ctx context.Context, opts ...grpc.CallOption) (Agent_StreamGetObjectClient, error) { - stream, err := c.cc.NewStream(ctx, &_Agent_serviceDesc.Streams[5], "/core.Agent/StreamGetObject", opts...) - if err != nil { - return nil, err - } - x := &agentStreamGetObjectClient{stream} - return x, nil -} - -type Agent_StreamGetObjectClient interface { - Send(*payload.Object_ID) error - Recv() (*payload.Object_Vector, error) - grpc.ClientStream -} - -type agentStreamGetObjectClient struct { - grpc.ClientStream -} - -func (x *agentStreamGetObjectClient) Send(m *payload.Object_ID) error { - return x.ClientStream.SendMsg(m) -} - -func (x *agentStreamGetObjectClient) Recv() (*payload.Object_Vector, error) { - m := new(payload.Object_Vector) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *agentClient) CreateIndex(ctx context.Context, in *payload.Control_CreateIndexRequest, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/core.Agent/CreateIndex", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *agentClient) SaveIndex(ctx context.Context, in *payload.Empty, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/core.Agent/SaveIndex", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *agentClient) CreateAndSaveIndex(ctx context.Context, in *payload.Control_CreateIndexRequest, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/core.Agent/CreateAndSaveIndex", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *agentClient) IndexInfo(ctx context.Context, in *payload.Empty, opts ...grpc.CallOption) (*payload.Info_Index_Count, error) { - out := new(payload.Info_Index_Count) - err := c.cc.Invoke(ctx, "/core.Agent/IndexInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// AgentServer is the server API for Agent service. -type AgentServer interface { - Exists(context.Context, *payload.Object_ID) (*payload.Object_ID, error) - Search(context.Context, *payload.Search_Request) (*payload.Search_Response, error) - SearchByID(context.Context, *payload.Search_IDRequest) (*payload.Search_Response, error) - StreamSearch(Agent_StreamSearchServer) error - StreamSearchByID(Agent_StreamSearchByIDServer) error - Insert(context.Context, *payload.Object_Vector) (*payload.Empty, error) - StreamInsert(Agent_StreamInsertServer) error - MultiInsert(context.Context, *payload.Object_Vectors) (*payload.Empty, error) - Update(context.Context, *payload.Object_Vector) (*payload.Empty, error) - StreamUpdate(Agent_StreamUpdateServer) error - MultiUpdate(context.Context, *payload.Object_Vectors) (*payload.Empty, error) - Remove(context.Context, *payload.Object_ID) (*payload.Empty, error) - StreamRemove(Agent_StreamRemoveServer) error - MultiRemove(context.Context, *payload.Object_IDs) (*payload.Empty, error) - GetObject(context.Context, *payload.Object_ID) (*payload.Object_Vector, error) - StreamGetObject(Agent_StreamGetObjectServer) error - CreateIndex(context.Context, *payload.Control_CreateIndexRequest) (*payload.Empty, error) - SaveIndex(context.Context, *payload.Empty) (*payload.Empty, error) - CreateAndSaveIndex(context.Context, *payload.Control_CreateIndexRequest) (*payload.Empty, error) - IndexInfo(context.Context, *payload.Empty) (*payload.Info_Index_Count, error) -} - -// UnimplementedAgentServer can be embedded to have forward compatible implementations. -type UnimplementedAgentServer struct { -} - -func (*UnimplementedAgentServer) Exists(ctx context.Context, req *payload.Object_ID) (*payload.Object_ID, error) { - return nil, status.Errorf(codes.Unimplemented, "method Exists not implemented") -} -func (*UnimplementedAgentServer) Search(ctx context.Context, req *payload.Search_Request) (*payload.Search_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") -} -func (*UnimplementedAgentServer) SearchByID(ctx context.Context, req *payload.Search_IDRequest) (*payload.Search_Response, error) { - return nil, status.Errorf(codes.Unimplemented, "method SearchByID not implemented") -} -func (*UnimplementedAgentServer) StreamSearch(srv Agent_StreamSearchServer) error { - return status.Errorf(codes.Unimplemented, "method StreamSearch not implemented") -} -func (*UnimplementedAgentServer) StreamSearchByID(srv Agent_StreamSearchByIDServer) error { - return status.Errorf(codes.Unimplemented, "method StreamSearchByID not implemented") -} -func (*UnimplementedAgentServer) Insert(ctx context.Context, req *payload.Object_Vector) (*payload.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Insert not implemented") -} -func (*UnimplementedAgentServer) StreamInsert(srv Agent_StreamInsertServer) error { - return status.Errorf(codes.Unimplemented, "method StreamInsert not implemented") -} -func (*UnimplementedAgentServer) MultiInsert(ctx context.Context, req *payload.Object_Vectors) (*payload.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method MultiInsert not implemented") -} -func (*UnimplementedAgentServer) Update(ctx context.Context, req *payload.Object_Vector) (*payload.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (*UnimplementedAgentServer) StreamUpdate(srv Agent_StreamUpdateServer) error { - return status.Errorf(codes.Unimplemented, "method StreamUpdate not implemented") -} -func (*UnimplementedAgentServer) MultiUpdate(ctx context.Context, req *payload.Object_Vectors) (*payload.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method MultiUpdate not implemented") -} -func (*UnimplementedAgentServer) Remove(ctx context.Context, req *payload.Object_ID) (*payload.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Remove not implemented") -} -func (*UnimplementedAgentServer) StreamRemove(srv Agent_StreamRemoveServer) error { - return status.Errorf(codes.Unimplemented, "method StreamRemove not implemented") -} -func (*UnimplementedAgentServer) MultiRemove(ctx context.Context, req *payload.Object_IDs) (*payload.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method MultiRemove not implemented") -} -func (*UnimplementedAgentServer) GetObject(ctx context.Context, req *payload.Object_ID) (*payload.Object_Vector, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetObject not implemented") -} -func (*UnimplementedAgentServer) StreamGetObject(srv Agent_StreamGetObjectServer) error { - return status.Errorf(codes.Unimplemented, "method StreamGetObject not implemented") -} -func (*UnimplementedAgentServer) CreateIndex(ctx context.Context, req *payload.Control_CreateIndexRequest) (*payload.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateIndex not implemented") -} -func (*UnimplementedAgentServer) SaveIndex(ctx context.Context, req *payload.Empty) (*payload.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method SaveIndex not implemented") -} -func (*UnimplementedAgentServer) CreateAndSaveIndex(ctx context.Context, req *payload.Control_CreateIndexRequest) (*payload.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateAndSaveIndex not implemented") -} -func (*UnimplementedAgentServer) IndexInfo(ctx context.Context, req *payload.Empty) (*payload.Info_Index_Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method IndexInfo not implemented") -} - -func RegisterAgentServer(s *grpc.Server, srv AgentServer) { - s.RegisterService(&_Agent_serviceDesc, srv) -} - -func _Agent_Exists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Object_ID) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).Exists(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core.Agent/Exists", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).Exists(ctx, req.(*payload.Object_ID)) - } - return interceptor(ctx, in, info, handler) -} - -func _Agent_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Search_Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).Search(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core.Agent/Search", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).Search(ctx, req.(*payload.Search_Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _Agent_SearchByID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Search_IDRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).SearchByID(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core.Agent/SearchByID", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).SearchByID(ctx, req.(*payload.Search_IDRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Agent_StreamSearch_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(AgentServer).StreamSearch(&agentStreamSearchServer{stream}) -} - -type Agent_StreamSearchServer interface { - Send(*payload.Search_Response) error - Recv() (*payload.Search_Request, error) - grpc.ServerStream -} - -type agentStreamSearchServer struct { - grpc.ServerStream -} - -func (x *agentStreamSearchServer) Send(m *payload.Search_Response) error { - return x.ServerStream.SendMsg(m) -} - -func (x *agentStreamSearchServer) Recv() (*payload.Search_Request, error) { - m := new(payload.Search_Request) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Agent_StreamSearchByID_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(AgentServer).StreamSearchByID(&agentStreamSearchByIDServer{stream}) -} - -type Agent_StreamSearchByIDServer interface { - Send(*payload.Search_Response) error - Recv() (*payload.Search_IDRequest, error) - grpc.ServerStream -} - -type agentStreamSearchByIDServer struct { - grpc.ServerStream -} - -func (x *agentStreamSearchByIDServer) Send(m *payload.Search_Response) error { - return x.ServerStream.SendMsg(m) -} - -func (x *agentStreamSearchByIDServer) Recv() (*payload.Search_IDRequest, error) { - m := new(payload.Search_IDRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Agent_Insert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Object_Vector) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).Insert(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core.Agent/Insert", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).Insert(ctx, req.(*payload.Object_Vector)) - } - return interceptor(ctx, in, info, handler) -} - -func _Agent_StreamInsert_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(AgentServer).StreamInsert(&agentStreamInsertServer{stream}) -} - -type Agent_StreamInsertServer interface { - Send(*payload.Empty) error - Recv() (*payload.Object_Vector, error) - grpc.ServerStream -} - -type agentStreamInsertServer struct { - grpc.ServerStream -} - -func (x *agentStreamInsertServer) Send(m *payload.Empty) error { - return x.ServerStream.SendMsg(m) -} - -func (x *agentStreamInsertServer) Recv() (*payload.Object_Vector, error) { - m := new(payload.Object_Vector) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Agent_MultiInsert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Object_Vectors) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).MultiInsert(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core.Agent/MultiInsert", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).MultiInsert(ctx, req.(*payload.Object_Vectors)) - } - return interceptor(ctx, in, info, handler) -} - -func _Agent_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Object_Vector) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core.Agent/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).Update(ctx, req.(*payload.Object_Vector)) - } - return interceptor(ctx, in, info, handler) -} - -func _Agent_StreamUpdate_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(AgentServer).StreamUpdate(&agentStreamUpdateServer{stream}) -} - -type Agent_StreamUpdateServer interface { - Send(*payload.Empty) error - Recv() (*payload.Object_Vector, error) - grpc.ServerStream -} - -type agentStreamUpdateServer struct { - grpc.ServerStream -} - -func (x *agentStreamUpdateServer) Send(m *payload.Empty) error { - return x.ServerStream.SendMsg(m) -} - -func (x *agentStreamUpdateServer) Recv() (*payload.Object_Vector, error) { - m := new(payload.Object_Vector) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Agent_MultiUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Object_Vectors) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).MultiUpdate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core.Agent/MultiUpdate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).MultiUpdate(ctx, req.(*payload.Object_Vectors)) - } - return interceptor(ctx, in, info, handler) -} - -func _Agent_Remove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Object_ID) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).Remove(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core.Agent/Remove", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).Remove(ctx, req.(*payload.Object_ID)) - } - return interceptor(ctx, in, info, handler) -} - -func _Agent_StreamRemove_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(AgentServer).StreamRemove(&agentStreamRemoveServer{stream}) -} - -type Agent_StreamRemoveServer interface { - Send(*payload.Empty) error - Recv() (*payload.Object_ID, error) - grpc.ServerStream -} - -type agentStreamRemoveServer struct { - grpc.ServerStream -} - -func (x *agentStreamRemoveServer) Send(m *payload.Empty) error { - return x.ServerStream.SendMsg(m) -} - -func (x *agentStreamRemoveServer) Recv() (*payload.Object_ID, error) { - m := new(payload.Object_ID) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Agent_MultiRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Object_IDs) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).MultiRemove(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core.Agent/MultiRemove", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).MultiRemove(ctx, req.(*payload.Object_IDs)) - } - return interceptor(ctx, in, info, handler) -} - -func _Agent_GetObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Object_ID) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).GetObject(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core.Agent/GetObject", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).GetObject(ctx, req.(*payload.Object_ID)) - } - return interceptor(ctx, in, info, handler) -} - -func _Agent_StreamGetObject_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(AgentServer).StreamGetObject(&agentStreamGetObjectServer{stream}) -} - -type Agent_StreamGetObjectServer interface { - Send(*payload.Object_Vector) error - Recv() (*payload.Object_ID, error) - grpc.ServerStream -} - -type agentStreamGetObjectServer struct { - grpc.ServerStream -} - -func (x *agentStreamGetObjectServer) Send(m *payload.Object_Vector) error { - return x.ServerStream.SendMsg(m) -} - -func (x *agentStreamGetObjectServer) Recv() (*payload.Object_ID, error) { - m := new(payload.Object_ID) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Agent_CreateIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Control_CreateIndexRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).CreateIndex(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core.Agent/CreateIndex", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).CreateIndex(ctx, req.(*payload.Control_CreateIndexRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Agent_SaveIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).SaveIndex(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core.Agent/SaveIndex", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).SaveIndex(ctx, req.(*payload.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Agent_CreateAndSaveIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Control_CreateIndexRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).CreateAndSaveIndex(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core.Agent/CreateAndSaveIndex", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).CreateAndSaveIndex(ctx, req.(*payload.Control_CreateIndexRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Agent_IndexInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AgentServer).IndexInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core.Agent/IndexInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentServer).IndexInfo(ctx, req.(*payload.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -var _Agent_serviceDesc = grpc.ServiceDesc{ - ServiceName: "core.Agent", - HandlerType: (*AgentServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Exists", - Handler: _Agent_Exists_Handler, - }, - { - MethodName: "Search", - Handler: _Agent_Search_Handler, - }, - { - MethodName: "SearchByID", - Handler: _Agent_SearchByID_Handler, - }, - { - MethodName: "Insert", - Handler: _Agent_Insert_Handler, - }, - { - MethodName: "MultiInsert", - Handler: _Agent_MultiInsert_Handler, - }, - { - MethodName: "Update", - Handler: _Agent_Update_Handler, - }, - { - MethodName: "MultiUpdate", - Handler: _Agent_MultiUpdate_Handler, - }, - { - MethodName: "Remove", - Handler: _Agent_Remove_Handler, - }, - { - MethodName: "MultiRemove", - Handler: _Agent_MultiRemove_Handler, - }, - { - MethodName: "GetObject", - Handler: _Agent_GetObject_Handler, - }, - { - MethodName: "CreateIndex", - Handler: _Agent_CreateIndex_Handler, - }, - { - MethodName: "SaveIndex", - Handler: _Agent_SaveIndex_Handler, - }, - { - MethodName: "CreateAndSaveIndex", - Handler: _Agent_CreateAndSaveIndex_Handler, - }, - { - MethodName: "IndexInfo", - Handler: _Agent_IndexInfo_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamSearch", - Handler: _Agent_StreamSearch_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "StreamSearchByID", - Handler: _Agent_StreamSearchByID_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "StreamInsert", - Handler: _Agent_StreamInsert_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "StreamUpdate", - Handler: _Agent_StreamUpdate_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "StreamRemove", - Handler: _Agent_StreamRemove_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "StreamGetObject", - Handler: _Agent_StreamGetObject_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "core/agent.proto", -} diff --git a/apis/grpc/filter/egress/egress_filter.pb.go b/apis/grpc/filter/egress/egress_filter.pb.go index 6e923b7c34..f50da87ec7 100644 --- a/apis/grpc/filter/egress/egress_filter.pb.go +++ b/apis/grpc/filter/egress/egress_filter.pb.go @@ -21,10 +21,8 @@ import ( fmt "fmt" math "math" - _ "github.com/danielvladco/go-proto-gql/pb" proto "github.com/gogo/protobuf/proto" payload "github.com/vdaas/vald/apis/grpc/payload" - _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -41,27 +39,27 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func init() { proto.RegisterFile("egress/egress_filter.proto", fileDescriptor_8d8e16edf70dd8e8) } - -var fileDescriptor_8d8e16edf70dd8e8 = []byte{ - // 262 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0x4d, 0x4a, 0x03, 0x31, - 0x14, 0x80, 0x1b, 0x91, 0x2e, 0xc2, 0x14, 0x64, 0x56, 0x12, 0xca, 0x08, 0x5d, 0xb9, 0x31, 0x11, - 0xdd, 0xbb, 0x28, 0x2a, 0xee, 0x14, 0x0b, 0x2e, 0xdc, 0xc8, 0x9b, 0x4c, 0x4c, 0x23, 0x99, 0xbc, - 0x98, 0xc4, 0x82, 0x97, 0xf1, 0x0c, 0x1e, 0xc3, 0xa5, 0x47, 0x28, 0x73, 0x12, 0xe9, 0x64, 0x84, - 0xaa, 0xb8, 0xca, 0xcf, 0xc7, 0xfb, 0xe0, 0x7d, 0x94, 0x29, 0x1d, 0x54, 0x8c, 0x22, 0x1f, 0x0f, - 0x8f, 0xc6, 0x26, 0x15, 0xb8, 0x0f, 0x98, 0xb0, 0x9c, 0xfc, 0xf8, 0x64, 0x13, 0x0f, 0xaf, 0x16, - 0xa1, 0xc9, 0x94, 0x4d, 0x35, 0xa2, 0xb6, 0x4a, 0x80, 0x37, 0x02, 0x9c, 0xc3, 0x04, 0xc9, 0xa0, - 0x8b, 0x03, 0x2d, 0x7c, 0x2d, 0xf4, 0xb3, 0xcd, 0xaf, 0x93, 0x37, 0x42, 0x8b, 0x8b, 0x5e, 0x76, - 0xd9, 0xbb, 0xca, 0x33, 0x3a, 0x1e, 0x6e, 0xfb, 0xfc, 0x5b, 0xbb, 0x50, 0x10, 0xe4, 0x92, 0xdf, - 0xaa, 0xe8, 0xd1, 0x45, 0xc5, 0xfe, 0x25, 0xb3, 0x51, 0x79, 0x45, 0x8b, 0x45, 0x0a, 0x0a, 0xda, - 0x3f, 0x96, 0xeb, 0xfa, 0x49, 0xc9, 0xc4, 0xcf, 0x4d, 0x4c, 0xe0, 0xe4, 0xb6, 0xe5, 0x17, 0x99, - 0x8d, 0x0e, 0xc9, 0x31, 0x61, 0xbb, 0xef, 0xeb, 0x83, 0x9d, 0x79, 0xfb, 0xd1, 0x55, 0xe4, 0xb3, - 0xab, 0xc8, 0xba, 0xab, 0x08, 0x9d, 0x62, 0xd0, 0x7c, 0xd5, 0x00, 0x44, 0xbe, 0x02, 0xdb, 0xf0, - 0xa1, 0x4a, 0xce, 0x31, 0xdf, 0xbb, 0x03, 0xdb, 0x6c, 0x6f, 0x73, 0x43, 0xee, 0x8f, 0xb4, 0x49, - 0xcb, 0x97, 0x9a, 0x4b, 0x6c, 0x45, 0x3f, 0x28, 0x36, 0x83, 0x9b, 0x36, 0x51, 0xe8, 0xe0, 0xa5, - 0xc8, 0x8a, 0x21, 0x73, 0x3d, 0xee, 0xb3, 0x9c, 0x7e, 0x05, 0x00, 0x00, 0xff, 0xff, 0xd5, 0x4a, - 0xa7, 0x4b, 0x7e, 0x01, 0x00, 0x00, +func init() { + proto.RegisterFile("apis/proto/filter/egress/egress_filter.proto", fileDescriptor_deef420cdb5157c4) +} + +var fileDescriptor_deef420cdb5157c4 = []byte{ + // 234 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x49, 0x2c, 0xc8, 0x2c, + 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x4f, 0xcb, 0xcc, 0x29, 0x49, 0x2d, 0xd2, 0x4f, 0x4d, + 0x2f, 0x4a, 0x2d, 0x2e, 0x86, 0x52, 0xf1, 0x10, 0x41, 0x3d, 0xb0, 0x0a, 0x21, 0x5e, 0x14, 0x41, + 0x29, 0x05, 0x24, 0xcd, 0x05, 0x89, 0x95, 0x39, 0xf9, 0x89, 0x29, 0x30, 0x1a, 0xa2, 0xc1, 0x68, + 0x06, 0x23, 0x17, 0x8f, 0x2b, 0x58, 0x8f, 0x1b, 0x58, 0x8b, 0x90, 0x1d, 0x17, 0x1b, 0x94, 0x25, + 0xa1, 0x07, 0x53, 0x1a, 0x9c, 0x9a, 0x58, 0x94, 0x9c, 0xa1, 0x17, 0x94, 0x5a, 0x5c, 0x90, 0x9f, + 0x57, 0x9c, 0x2a, 0x85, 0x53, 0x46, 0x89, 0x41, 0xc8, 0x83, 0x8b, 0x27, 0xb8, 0xa4, 0x28, 0x35, + 0x31, 0x17, 0xc3, 0x14, 0xff, 0xa4, 0xac, 0xd4, 0xe4, 0x12, 0x3d, 0x97, 0xcc, 0xe2, 0x92, 0xc4, + 0xbc, 0x64, 0x64, 0x53, 0xd0, 0x64, 0x94, 0x18, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x72, 0x4f, 0x3c, + 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0x46, 0x2e, 0x99, 0xfc, 0xa2, 0x74, + 0xbd, 0xb2, 0x94, 0xc4, 0xc4, 0x62, 0xbd, 0xb2, 0xc4, 0x9c, 0x14, 0x3d, 0xa8, 0xb7, 0x21, 0xfe, + 0x75, 0x12, 0x08, 0x4b, 0xcc, 0x49, 0x41, 0xf6, 0x47, 0x00, 0x63, 0x94, 0x6e, 0x7a, 0x66, 0x49, + 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0x58, 0xa3, 0x3e, 0x48, 0xa3, 0x3e, 0x38, 0x48, + 0xd2, 0x8b, 0x0a, 0x92, 0x51, 0x83, 0x33, 0x89, 0x0d, 0x1c, 0x20, 0xc6, 0x80, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xea, 0x47, 0x77, 0x84, 0x71, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -210,5 +208,5 @@ var _EgressFilter_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "egress/egress_filter.proto", + Metadata: "apis/proto/filter/egress/egress_filter.proto", } diff --git a/apis/grpc/filter/ingress/ingress_filter.pb.go b/apis/grpc/filter/ingress/ingress_filter.pb.go deleted file mode 100644 index 61734d018e..0000000000 --- a/apis/grpc/filter/ingress/ingress_filter.pb.go +++ /dev/null @@ -1,100 +0,0 @@ -// -// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ingress - -import ( - context "context" - fmt "fmt" - math "math" - - _ "github.com/danielvladco/go-proto-gql/pb" - proto "github.com/gogo/protobuf/proto" - _ "github.com/vdaas/vald/apis/grpc/payload" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func init() { proto.RegisterFile("ingress/ingress_filter.proto", fileDescriptor_8f5342c46835d3ee) } - -var fileDescriptor_8f5342c46835d3ee = []byte{ - // 192 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xc9, 0xcc, 0x4b, 0x2f, - 0x4a, 0x2d, 0x2e, 0xd6, 0x87, 0xd2, 0xf1, 0x69, 0x99, 0x39, 0x25, 0xa9, 0x45, 0x7a, 0x05, 0x45, - 0xf9, 0x25, 0xf9, 0x42, 0x7c, 0xa8, 0xa2, 0x52, 0xbc, 0x05, 0x89, 0x95, 0x39, 0xf9, 0x89, 0x29, - 0x10, 0x69, 0x29, 0x99, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xfd, 0xc4, 0x82, 0x4c, 0xfd, 0xc4, - 0xbc, 0xbc, 0xfc, 0x92, 0xc4, 0x92, 0xcc, 0xfc, 0xbc, 0x62, 0xa8, 0x2c, 0x4f, 0x41, 0x92, 0x7e, - 0x7a, 0x61, 0x0e, 0x84, 0x67, 0xc4, 0xcf, 0xc5, 0xeb, 0x09, 0x31, 0xcc, 0x0d, 0x6c, 0x96, 0x53, - 0xc1, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xc8, 0x25, 0x9b, - 0x5f, 0x94, 0xae, 0x57, 0x96, 0x92, 0x98, 0x58, 0xac, 0x57, 0x96, 0x98, 0x93, 0xa2, 0x07, 0x75, - 0x06, 0xd4, 0x7e, 0x27, 0xc1, 0xb0, 0xc4, 0x9c, 0x14, 0x14, 0xfd, 0x01, 0x8c, 0x51, 0x7a, 0xe9, - 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x60, 0xad, 0xfa, 0x20, 0xad, 0x20, - 0xd7, 0x14, 0xeb, 0xa7, 0x17, 0x15, 0x24, 0xeb, 0x43, 0x0c, 0x81, 0x79, 0x2d, 0x89, 0x0d, 0xec, - 0x12, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x72, 0x78, 0xf1, 0xa6, 0xf4, 0x00, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// IngressFilterClient is the client API for IngressFilter service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type IngressFilterClient interface { -} - -type ingressFilterClient struct { - cc *grpc.ClientConn -} - -func NewIngressFilterClient(cc *grpc.ClientConn) IngressFilterClient { - return &ingressFilterClient{cc} -} - -// IngressFilterServer is the server API for IngressFilter service. -type IngressFilterServer interface { -} - -// UnimplementedIngressFilterServer can be embedded to have forward compatible implementations. -type UnimplementedIngressFilterServer struct { -} - -func RegisterIngressFilterServer(s *grpc.Server, srv IngressFilterServer) { - s.RegisterService(&_IngressFilter_serviceDesc, srv) -} - -var _IngressFilter_serviceDesc = grpc.ServiceDesc{ - ServiceName: "ingress_filter.IngressFilter", - HandlerType: (*IngressFilterServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{}, - Metadata: "ingress/ingress_filter.proto", -} diff --git a/apis/grpc/gateway/filter/filter.pb.go b/apis/grpc/gateway/filter/filter.pb.go new file mode 100644 index 0000000000..aa315cf400 --- /dev/null +++ b/apis/grpc/gateway/filter/filter.pb.go @@ -0,0 +1,642 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package filter + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + payload "github.com/vdaas/vald/apis/grpc/payload" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("apis/proto/gateway/filter/filter.proto", fileDescriptor_098434da7999ba4a) +} + +var fileDescriptor_098434da7999ba4a = []byte{ + // 378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0x41, 0x4b, 0x2b, 0x31, + 0x10, 0xc7, 0x9b, 0x77, 0xe8, 0x83, 0x50, 0x1e, 0x74, 0xdf, 0x3b, 0xbc, 0x16, 0x5b, 0xa4, 0x07, + 0x11, 0x0f, 0x89, 0xe8, 0xcd, 0x63, 0x0f, 0x15, 0x45, 0x51, 0x5a, 0x14, 0xf4, 0x36, 0xbb, 0x1b, + 0xb7, 0x91, 0x74, 0x13, 0x93, 0x6c, 0xa5, 0x57, 0x3f, 0x80, 0x17, 0xbf, 0x94, 0x47, 0xc1, 0x2f, + 0x20, 0xc5, 0x0f, 0x22, 0x4d, 0xb6, 0xb2, 0x16, 0x8b, 0x85, 0xed, 0x29, 0xec, 0xfc, 0x33, 0xbf, + 0x99, 0xff, 0x0e, 0x19, 0xbc, 0x05, 0x8a, 0x1b, 0xaa, 0xb4, 0xb4, 0x92, 0x26, 0x60, 0xd9, 0x3d, + 0x4c, 0xe8, 0x0d, 0x17, 0x96, 0xe9, 0xfc, 0x20, 0x4e, 0x0b, 0xaa, 0xfe, 0xab, 0xb9, 0x59, 0xb8, + 0xaf, 0x60, 0x22, 0x24, 0xc4, 0xf3, 0xd3, 0xdf, 0x6c, 0x6e, 0x24, 0x52, 0x26, 0x82, 0x51, 0x50, + 0x9c, 0x42, 0x9a, 0x4a, 0x0b, 0x96, 0xcb, 0xd4, 0x78, 0x75, 0xef, 0xf1, 0x37, 0xae, 0xf6, 0x1c, + 0x2a, 0x08, 0x71, 0x6d, 0xc0, 0x40, 0x47, 0xc3, 0xb3, 0xf0, 0x96, 0x45, 0x36, 0x68, 0x91, 0x39, + 0xc8, 0x87, 0x89, 0x8f, 0xf7, 0xd9, 0x5d, 0xc6, 0x8c, 0x6d, 0xfe, 0x5f, 0x94, 0xfb, 0xcc, 0x28, + 0x99, 0x1a, 0xd6, 0x69, 0x3c, 0xbc, 0xbe, 0x3f, 0xfd, 0xfa, 0xdb, 0xf9, 0x43, 0x8d, 0x53, 0xa8, + 0x74, 0x89, 0x07, 0x68, 0x27, 0x18, 0xe0, 0x60, 0x60, 0x35, 0x83, 0xd1, 0x7a, 0x2a, 0x55, 0xb6, + 0xd1, 0x2e, 0x0a, 0xae, 0x70, 0xed, 0x28, 0x35, 0x4c, 0xdb, 0x1c, 0xf7, 0xef, 0xf3, 0xbe, 0x0f, + 0x90, 0xae, 0x90, 0x61, 0x81, 0x92, 0x47, 0x4f, 0x64, 0xe4, 0x7e, 0x45, 0xa1, 0x5f, 0xee, 0x30, + 0x85, 0x7e, 0x8f, 0xe7, 0xfd, 0x96, 0x2a, 0xe0, 0xdb, 0xec, 0xe1, 0xfa, 0x69, 0x26, 0x2c, 0x5f, + 0x01, 0xd5, 0x58, 0x86, 0x32, 0x9d, 0xca, 0xcc, 0xee, 0x85, 0x8a, 0xc1, 0xb2, 0xd2, 0x76, 0x33, + 0x87, 0xf9, 0xce, 0x6e, 0xa9, 0x02, 0x5f, 0xed, 0xae, 0x80, 0xfa, 0xd9, 0xee, 0x5a, 0xa6, 0x9b, + 0xa9, 0x65, 0xd3, 0x2d, 0x55, 0x60, 0xd1, 0x6e, 0xa9, 0xe9, 0x76, 0xd5, 0xf3, 0xb4, 0x8d, 0x5e, + 0xa6, 0x6d, 0xf4, 0x36, 0x6d, 0x23, 0xdc, 0x92, 0x3a, 0x21, 0xe3, 0x18, 0xc0, 0x90, 0x31, 0x88, + 0x98, 0xe4, 0x2b, 0x81, 0xf8, 0xd7, 0xdf, 0xad, 0x5f, 0x82, 0x88, 0xfd, 0xf3, 0x3d, 0xf4, 0xca, + 0x39, 0xba, 0x26, 0x09, 0xb7, 0xc3, 0x2c, 0x24, 0x91, 0x1c, 0x51, 0x97, 0x4a, 0x67, 0xa9, 0xd4, + 0x2d, 0x8a, 0x44, 0xab, 0x68, 0x61, 0xaf, 0x84, 0x55, 0xb7, 0x09, 0xf6, 0x3f, 0x02, 0x00, 0x00, + 0xff, 0xff, 0x1b, 0x2c, 0x52, 0xcf, 0x7b, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// FilterClient is the client API for Filter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type FilterClient interface { + SearchObject(ctx context.Context, in *payload.Search_ObjectRequest, opts ...grpc.CallOption) (*payload.Search_Response, error) + StreamSearchObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamSearchObjectClient, error) + InsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Location, error) + StreamInsertObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamInsertObjectClient, error) + MultiInsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Locations, error) + UpdateObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Location, error) + StreamUpdateObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamUpdateObjectClient, error) + MultiUpdateObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Locations, error) + UpsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Location, error) + StreamUpsertObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamUpsertObjectClient, error) + MultiUpsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Locations, error) +} + +type filterClient struct { + cc *grpc.ClientConn +} + +func NewFilterClient(cc *grpc.ClientConn) FilterClient { + return &filterClient{cc} +} + +func (c *filterClient) SearchObject(ctx context.Context, in *payload.Search_ObjectRequest, opts ...grpc.CallOption) (*payload.Search_Response, error) { + out := new(payload.Search_Response) + err := c.cc.Invoke(ctx, "/filter.Filter/SearchObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *filterClient) StreamSearchObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamSearchObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Filter_serviceDesc.Streams[0], "/filter.Filter/StreamSearchObject", opts...) + if err != nil { + return nil, err + } + x := &filterStreamSearchObjectClient{stream} + return x, nil +} + +type Filter_StreamSearchObjectClient interface { + Send(*payload.Search_ObjectRequest) error + Recv() (*payload.Search_Response, error) + grpc.ClientStream +} + +type filterStreamSearchObjectClient struct { + grpc.ClientStream +} + +func (x *filterStreamSearchObjectClient) Send(m *payload.Search_ObjectRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *filterStreamSearchObjectClient) Recv() (*payload.Search_Response, error) { + m := new(payload.Search_Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *filterClient) InsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/filter.Filter/InsertObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *filterClient) StreamInsertObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamInsertObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Filter_serviceDesc.Streams[1], "/filter.Filter/StreamInsertObject", opts...) + if err != nil { + return nil, err + } + x := &filterStreamInsertObjectClient{stream} + return x, nil +} + +type Filter_StreamInsertObjectClient interface { + Send(*payload.Object_Blob) error + Recv() (*payload.Object_Location, error) + grpc.ClientStream +} + +type filterStreamInsertObjectClient struct { + grpc.ClientStream +} + +func (x *filterStreamInsertObjectClient) Send(m *payload.Object_Blob) error { + return x.ClientStream.SendMsg(m) +} + +func (x *filterStreamInsertObjectClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *filterClient) MultiInsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) + err := c.cc.Invoke(ctx, "/filter.Filter/MultiInsertObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *filterClient) UpdateObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/filter.Filter/UpdateObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *filterClient) StreamUpdateObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamUpdateObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Filter_serviceDesc.Streams[2], "/filter.Filter/StreamUpdateObject", opts...) + if err != nil { + return nil, err + } + x := &filterStreamUpdateObjectClient{stream} + return x, nil +} + +type Filter_StreamUpdateObjectClient interface { + Send(*payload.Object_Blob) error + Recv() (*payload.Object_Location, error) + grpc.ClientStream +} + +type filterStreamUpdateObjectClient struct { + grpc.ClientStream +} + +func (x *filterStreamUpdateObjectClient) Send(m *payload.Object_Blob) error { + return x.ClientStream.SendMsg(m) +} + +func (x *filterStreamUpdateObjectClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *filterClient) MultiUpdateObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) + err := c.cc.Invoke(ctx, "/filter.Filter/MultiUpdateObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *filterClient) UpsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/filter.Filter/UpsertObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *filterClient) StreamUpsertObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamUpsertObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Filter_serviceDesc.Streams[3], "/filter.Filter/StreamUpsertObject", opts...) + if err != nil { + return nil, err + } + x := &filterStreamUpsertObjectClient{stream} + return x, nil +} + +type Filter_StreamUpsertObjectClient interface { + Send(*payload.Object_Blob) error + Recv() (*payload.Object_Location, error) + grpc.ClientStream +} + +type filterStreamUpsertObjectClient struct { + grpc.ClientStream +} + +func (x *filterStreamUpsertObjectClient) Send(m *payload.Object_Blob) error { + return x.ClientStream.SendMsg(m) +} + +func (x *filterStreamUpsertObjectClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *filterClient) MultiUpsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) + err := c.cc.Invoke(ctx, "/filter.Filter/MultiUpsertObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// FilterServer is the server API for Filter service. +type FilterServer interface { + SearchObject(context.Context, *payload.Search_ObjectRequest) (*payload.Search_Response, error) + StreamSearchObject(Filter_StreamSearchObjectServer) error + InsertObject(context.Context, *payload.Object_Blob) (*payload.Object_Location, error) + StreamInsertObject(Filter_StreamInsertObjectServer) error + MultiInsertObject(context.Context, *payload.Object_Blob) (*payload.Object_Locations, error) + UpdateObject(context.Context, *payload.Object_Blob) (*payload.Object_Location, error) + StreamUpdateObject(Filter_StreamUpdateObjectServer) error + MultiUpdateObject(context.Context, *payload.Object_Blob) (*payload.Object_Locations, error) + UpsertObject(context.Context, *payload.Object_Blob) (*payload.Object_Location, error) + StreamUpsertObject(Filter_StreamUpsertObjectServer) error + MultiUpsertObject(context.Context, *payload.Object_Blob) (*payload.Object_Locations, error) +} + +// UnimplementedFilterServer can be embedded to have forward compatible implementations. +type UnimplementedFilterServer struct { +} + +func (*UnimplementedFilterServer) SearchObject(ctx context.Context, req *payload.Search_ObjectRequest) (*payload.Search_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method SearchObject not implemented") +} +func (*UnimplementedFilterServer) StreamSearchObject(srv Filter_StreamSearchObjectServer) error { + return status.Errorf(codes.Unimplemented, "method StreamSearchObject not implemented") +} +func (*UnimplementedFilterServer) InsertObject(ctx context.Context, req *payload.Object_Blob) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method InsertObject not implemented") +} +func (*UnimplementedFilterServer) StreamInsertObject(srv Filter_StreamInsertObjectServer) error { + return status.Errorf(codes.Unimplemented, "method StreamInsertObject not implemented") +} +func (*UnimplementedFilterServer) MultiInsertObject(ctx context.Context, req *payload.Object_Blob) (*payload.Object_Locations, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiInsertObject not implemented") +} +func (*UnimplementedFilterServer) UpdateObject(ctx context.Context, req *payload.Object_Blob) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateObject not implemented") +} +func (*UnimplementedFilterServer) StreamUpdateObject(srv Filter_StreamUpdateObjectServer) error { + return status.Errorf(codes.Unimplemented, "method StreamUpdateObject not implemented") +} +func (*UnimplementedFilterServer) MultiUpdateObject(ctx context.Context, req *payload.Object_Blob) (*payload.Object_Locations, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiUpdateObject not implemented") +} +func (*UnimplementedFilterServer) UpsertObject(ctx context.Context, req *payload.Object_Blob) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpsertObject not implemented") +} +func (*UnimplementedFilterServer) StreamUpsertObject(srv Filter_StreamUpsertObjectServer) error { + return status.Errorf(codes.Unimplemented, "method StreamUpsertObject not implemented") +} +func (*UnimplementedFilterServer) MultiUpsertObject(ctx context.Context, req *payload.Object_Blob) (*payload.Object_Locations, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiUpsertObject not implemented") +} + +func RegisterFilterServer(s *grpc.Server, srv FilterServer) { + s.RegisterService(&_Filter_serviceDesc, srv) +} + +func _Filter_SearchObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Search_ObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FilterServer).SearchObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filter.Filter/SearchObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FilterServer).SearchObject(ctx, req.(*payload.Search_ObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Filter_StreamSearchObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FilterServer).StreamSearchObject(&filterStreamSearchObjectServer{stream}) +} + +type Filter_StreamSearchObjectServer interface { + Send(*payload.Search_Response) error + Recv() (*payload.Search_ObjectRequest, error) + grpc.ServerStream +} + +type filterStreamSearchObjectServer struct { + grpc.ServerStream +} + +func (x *filterStreamSearchObjectServer) Send(m *payload.Search_Response) error { + return x.ServerStream.SendMsg(m) +} + +func (x *filterStreamSearchObjectServer) Recv() (*payload.Search_ObjectRequest, error) { + m := new(payload.Search_ObjectRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Filter_InsertObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Blob) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FilterServer).InsertObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filter.Filter/InsertObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FilterServer).InsertObject(ctx, req.(*payload.Object_Blob)) + } + return interceptor(ctx, in, info, handler) +} + +func _Filter_StreamInsertObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FilterServer).StreamInsertObject(&filterStreamInsertObjectServer{stream}) +} + +type Filter_StreamInsertObjectServer interface { + Send(*payload.Object_Location) error + Recv() (*payload.Object_Blob, error) + grpc.ServerStream +} + +type filterStreamInsertObjectServer struct { + grpc.ServerStream +} + +func (x *filterStreamInsertObjectServer) Send(m *payload.Object_Location) error { + return x.ServerStream.SendMsg(m) +} + +func (x *filterStreamInsertObjectServer) Recv() (*payload.Object_Blob, error) { + m := new(payload.Object_Blob) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Filter_MultiInsertObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Blob) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FilterServer).MultiInsertObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filter.Filter/MultiInsertObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FilterServer).MultiInsertObject(ctx, req.(*payload.Object_Blob)) + } + return interceptor(ctx, in, info, handler) +} + +func _Filter_UpdateObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Blob) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FilterServer).UpdateObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filter.Filter/UpdateObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FilterServer).UpdateObject(ctx, req.(*payload.Object_Blob)) + } + return interceptor(ctx, in, info, handler) +} + +func _Filter_StreamUpdateObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FilterServer).StreamUpdateObject(&filterStreamUpdateObjectServer{stream}) +} + +type Filter_StreamUpdateObjectServer interface { + Send(*payload.Object_Location) error + Recv() (*payload.Object_Blob, error) + grpc.ServerStream +} + +type filterStreamUpdateObjectServer struct { + grpc.ServerStream +} + +func (x *filterStreamUpdateObjectServer) Send(m *payload.Object_Location) error { + return x.ServerStream.SendMsg(m) +} + +func (x *filterStreamUpdateObjectServer) Recv() (*payload.Object_Blob, error) { + m := new(payload.Object_Blob) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Filter_MultiUpdateObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Blob) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FilterServer).MultiUpdateObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filter.Filter/MultiUpdateObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FilterServer).MultiUpdateObject(ctx, req.(*payload.Object_Blob)) + } + return interceptor(ctx, in, info, handler) +} + +func _Filter_UpsertObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Blob) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FilterServer).UpsertObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filter.Filter/UpsertObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FilterServer).UpsertObject(ctx, req.(*payload.Object_Blob)) + } + return interceptor(ctx, in, info, handler) +} + +func _Filter_StreamUpsertObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FilterServer).StreamUpsertObject(&filterStreamUpsertObjectServer{stream}) +} + +type Filter_StreamUpsertObjectServer interface { + Send(*payload.Object_Location) error + Recv() (*payload.Object_Blob, error) + grpc.ServerStream +} + +type filterStreamUpsertObjectServer struct { + grpc.ServerStream +} + +func (x *filterStreamUpsertObjectServer) Send(m *payload.Object_Location) error { + return x.ServerStream.SendMsg(m) +} + +func (x *filterStreamUpsertObjectServer) Recv() (*payload.Object_Blob, error) { + m := new(payload.Object_Blob) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Filter_MultiUpsertObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Blob) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FilterServer).MultiUpsertObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filter.Filter/MultiUpsertObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FilterServer).MultiUpsertObject(ctx, req.(*payload.Object_Blob)) + } + return interceptor(ctx, in, info, handler) +} + +var _Filter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "filter.Filter", + HandlerType: (*FilterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SearchObject", + Handler: _Filter_SearchObject_Handler, + }, + { + MethodName: "InsertObject", + Handler: _Filter_InsertObject_Handler, + }, + { + MethodName: "MultiInsertObject", + Handler: _Filter_MultiInsertObject_Handler, + }, + { + MethodName: "UpdateObject", + Handler: _Filter_UpdateObject_Handler, + }, + { + MethodName: "MultiUpdateObject", + Handler: _Filter_MultiUpdateObject_Handler, + }, + { + MethodName: "UpsertObject", + Handler: _Filter_UpsertObject_Handler, + }, + { + MethodName: "MultiUpsertObject", + Handler: _Filter_MultiUpsertObject_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamSearchObject", + Handler: _Filter_StreamSearchObject_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamInsertObject", + Handler: _Filter_StreamInsertObject_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamUpdateObject", + Handler: _Filter_StreamUpdateObject_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamUpsertObject", + Handler: _Filter_StreamUpsertObject_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "apis/proto/gateway/filter/filter.proto", +} diff --git a/apis/grpc/gateway/vald/vald.pb.go b/apis/grpc/gateway/vald/vald.pb.go index 58ea02233f..9f1a719e9d 100644 --- a/apis/grpc/gateway/vald/vald.pb.go +++ b/apis/grpc/gateway/vald/vald.pb.go @@ -21,7 +21,6 @@ import ( fmt "fmt" math "math" - _ "github.com/danielvladco/go-proto-gql/pb" proto "github.com/gogo/protobuf/proto" payload "github.com/vdaas/vald/apis/grpc/payload" _ "google.golang.org/genproto/googleapis/api/annotations" @@ -41,43 +40,44 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func init() { proto.RegisterFile("vald/vald.proto", fileDescriptor_b17c9fbea32974eb) } - -var fileDescriptor_b17c9fbea32974eb = []byte{ - // 518 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xc1, 0x6e, 0xd3, 0x30, - 0x18, 0xc7, 0xc9, 0x54, 0x65, 0xc2, 0x84, 0x16, 0x19, 0xd8, 0x46, 0x40, 0x9d, 0x94, 0x13, 0x9a, - 0x50, 0x8c, 0x80, 0xc3, 0x84, 0xb8, 0x50, 0x75, 0x2b, 0x3d, 0x54, 0x83, 0x4d, 0x4c, 0x88, 0x9b, - 0x93, 0x58, 0x99, 0x21, 0x8d, 0xbd, 0xf8, 0x4b, 0xa1, 0x42, 0x5c, 0x78, 0x05, 0x5e, 0x64, 0xbc, - 0x05, 0x47, 0x24, 0x5e, 0xa0, 0xaa, 0x78, 0x10, 0x14, 0x3b, 0x8d, 0xb6, 0x36, 0x12, 0x4a, 0xb9, - 0x54, 0xf5, 0xe7, 0xef, 0xff, 0xcb, 0xff, 0x6f, 0xcb, 0x1f, 0xea, 0x4c, 0x68, 0x12, 0x91, 0xe2, - 0xc7, 0x97, 0x99, 0x00, 0x81, 0x5b, 0xc5, 0x7f, 0xf7, 0xa6, 0xa4, 0xd3, 0x44, 0xd0, 0xb2, 0xe8, - 0x3e, 0x88, 0x85, 0x88, 0x13, 0x46, 0xa8, 0xe4, 0x84, 0xa6, 0xa9, 0x00, 0x0a, 0x5c, 0xa4, 0xaa, - 0xdc, 0x75, 0x64, 0x40, 0xe2, 0xf3, 0xc4, 0xac, 0x9e, 0xfc, 0x40, 0xa8, 0x75, 0x4a, 0x93, 0x08, - 0x1f, 0x22, 0xfb, 0xe0, 0x33, 0x57, 0xa0, 0x30, 0xf6, 0x17, 0xb8, 0xa3, 0xe0, 0x03, 0x0b, 0xc1, - 0x1f, 0xf6, 0xdd, 0x9a, 0x9a, 0x77, 0xe7, 0xdb, 0xef, 0x3f, 0xdf, 0x37, 0xda, 0xd8, 0x21, 0x4c, - 0x0b, 0xc9, 0x17, 0x1e, 0x7d, 0xc5, 0x47, 0xc8, 0x3e, 0x61, 0x34, 0x0b, 0xcf, 0xf0, 0x76, 0xa5, - 0x31, 0x05, 0xff, 0x98, 0x9d, 0xe7, 0x4c, 0x81, 0xbb, 0xb3, 0xba, 0xa1, 0xa4, 0x48, 0x15, 0xf3, - 0xb0, 0x46, 0x3a, 0xde, 0x26, 0x51, 0x7a, 0xe7, 0xb9, 0xb5, 0x87, 0xdf, 0x21, 0x64, 0xda, 0x7a, - 0xd3, 0x61, 0x1f, 0xdf, 0x5b, 0xd6, 0x0e, 0xfb, 0xff, 0xc6, 0xde, 0xd5, 0xd8, 0x8e, 0x87, 0x4a, - 0x2c, 0xe1, 0x51, 0x41, 0x1e, 0x20, 0xe7, 0x04, 0x32, 0x46, 0xc7, 0xeb, 0x1b, 0xbe, 0xf6, 0xd0, - 0x7a, 0x6c, 0xe1, 0x11, 0xba, 0x75, 0x19, 0xb4, 0xbe, 0x51, 0x83, 0x7b, 0x85, 0xec, 0x61, 0xaa, - 0x58, 0x06, 0x78, 0x6b, 0xf9, 0xd8, 0x4f, 0x59, 0x08, 0x22, 0x73, 0xdb, 0x55, 0xfd, 0x60, 0x2c, - 0x61, 0xea, 0x6d, 0x5d, 0xcc, 0x76, 0xad, 0xea, 0xec, 0xb8, 0x16, 0x17, 0x09, 0x5f, 0x2c, 0x12, - 0x36, 0xe4, 0x19, 0x1f, 0xfb, 0xe8, 0xc6, 0x28, 0x4f, 0x80, 0x97, 0xe2, 0xed, 0x7a, 0xb1, 0x5a, - 0x55, 0x17, 0x09, 0xde, 0xca, 0x88, 0x02, 0x5b, 0x33, 0x41, 0xae, 0xc5, 0x57, 0x12, 0x34, 0xe4, - 0x5d, 0x4d, 0x50, 0x8a, 0x9b, 0x26, 0xf8, 0x8f, 0x3b, 0xc8, 0xe5, 0xca, 0x1d, 0x34, 0xe4, 0x2d, - 0x27, 0x68, 0x7a, 0x07, 0x87, 0xc8, 0x3e, 0x66, 0x63, 0x31, 0x61, 0xb5, 0x0f, 0x7a, 0xb9, 0x7f, - 0xa7, 0x72, 0xdf, 0xde, 0x73, 0x48, 0xa6, 0x85, 0xe6, 0x41, 0xef, 0x2f, 0xfc, 0x37, 0xa0, 0x19, - 0xef, 0xcf, 0x4a, 0xef, 0xa5, 0xf0, 0xf6, 0xaa, 0xb0, 0xce, 0xf7, 0x1b, 0x74, 0x7d, 0xc0, 0xc0, - 0xb4, 0xd4, 0x7e, 0xcc, 0xad, 0x6a, 0x3d, 0x1a, 0x7e, 0xcc, 0xa5, 0x3f, 0x62, 0x40, 0xcd, 0x39, - 0x5c, 0x9a, 0x49, 0x42, 0xf7, 0x9b, 0x08, 0x03, 0xd4, 0x31, 0x11, 0xd6, 0x07, 0xeb, 0x44, 0x6e, - 0xeb, 0x62, 0xb6, 0xbb, 0xd1, 0x0b, 0x7e, 0xce, 0xbb, 0xd6, 0xaf, 0x79, 0xd7, 0x9a, 0xcd, 0xbb, - 0x16, 0xba, 0x2f, 0xb2, 0xd8, 0x9f, 0x44, 0x94, 0x2a, 0x5f, 0x0f, 0xe6, 0x98, 0x02, 0xfb, 0x44, - 0xa7, 0x7a, 0xd1, 0xdb, 0x2c, 0x66, 0xeb, 0x4b, 0xc9, 0x5f, 0x5b, 0xef, 0x1f, 0xc5, 0x1c, 0xce, - 0xf2, 0xc0, 0x0f, 0xc5, 0x98, 0xe8, 0x76, 0x3d, 0xc7, 0x8b, 0x21, 0xad, 0x48, 0x9c, 0xc9, 0x90, - 0x94, 0x42, 0x5d, 0x0e, 0x6c, 0x3d, 0x9e, 0x9f, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x95, 0xf1, - 0xaa, 0xd0, 0xf2, 0x05, 0x00, 0x00, +func init() { + proto.RegisterFile("apis/proto/gateway/vald/vald.proto", fileDescriptor_667acc189b85e014) +} + +var fileDescriptor_667acc189b85e014 = []byte{ + // 500 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x95, 0x4f, 0x6b, 0x13, 0x41, + 0x14, 0xc0, 0xbb, 0x52, 0xb6, 0x38, 0x0d, 0x56, 0xc6, 0x3f, 0xb5, 0xab, 0x04, 0xd9, 0x93, 0x14, + 0xd9, 0x11, 0xbd, 0x79, 0x11, 0xd3, 0xd8, 0x12, 0x68, 0xac, 0xb4, 0x58, 0xc4, 0xdb, 0x64, 0x77, + 0xd8, 0x8e, 0x6c, 0x76, 0xc6, 0x99, 0xd9, 0x68, 0x10, 0x2f, 0x7e, 0x01, 0x0f, 0x7e, 0x29, 0x8f, + 0x82, 0x5f, 0x40, 0x82, 0x1f, 0x44, 0xf6, 0xcd, 0x44, 0x36, 0xc9, 0x86, 0xc0, 0xee, 0x25, 0x7f, + 0xde, 0xcc, 0xfb, 0xf1, 0x7e, 0xf3, 0xe0, 0x3d, 0x14, 0x52, 0xc9, 0x35, 0x91, 0x4a, 0x18, 0x41, + 0x52, 0x6a, 0xd8, 0x27, 0x3a, 0x25, 0x13, 0x9a, 0x25, 0xf0, 0x11, 0x41, 0x1c, 0x6f, 0x97, 0xbf, + 0x83, 0x87, 0x95, 0x9b, 0x92, 0x4e, 0x33, 0x41, 0x93, 0xf9, 0xb7, 0xbd, 0x17, 0x3c, 0x48, 0x85, + 0x48, 0x33, 0x46, 0xa8, 0xe4, 0x84, 0xe6, 0xb9, 0x30, 0xd4, 0x70, 0x91, 0x6b, 0x7b, 0xfa, 0xf4, + 0xfb, 0x2e, 0xda, 0xbe, 0xa4, 0x59, 0x82, 0x8f, 0x91, 0xff, 0xea, 0x33, 0xd7, 0x46, 0x63, 0x1c, + 0xcd, 0x01, 0x67, 0xa3, 0x0f, 0x2c, 0x36, 0xd1, 0xa0, 0x1f, 0xd4, 0xc4, 0xc2, 0xdb, 0xdf, 0x7e, + 0xff, 0xfd, 0x71, 0xed, 0x06, 0xee, 0x10, 0x06, 0x89, 0xe4, 0x0b, 0x4f, 0xbe, 0xe2, 0x33, 0xe4, + 0x5f, 0x30, 0xaa, 0xe2, 0x2b, 0xbc, 0xff, 0x3f, 0xc7, 0x06, 0xa2, 0x73, 0xf6, 0xb1, 0x60, 0xda, + 0x04, 0xf7, 0x56, 0x0f, 0xb4, 0x14, 0xb9, 0x66, 0x21, 0x06, 0x64, 0x27, 0xdc, 0x21, 0x1a, 0x4e, + 0x9e, 0x7b, 0x87, 0xf8, 0x1d, 0x42, 0xf6, 0x5a, 0x6f, 0x3a, 0xe8, 0xe3, 0x83, 0xe5, 0xdc, 0x41, + 0x7f, 0x33, 0xf6, 0x0e, 0x60, 0xf7, 0x42, 0xe4, 0xb0, 0x84, 0x27, 0x25, 0xf9, 0x04, 0x75, 0x2e, + 0x8c, 0x62, 0x74, 0xdc, 0xbc, 0xe0, 0xad, 0x47, 0xde, 0x13, 0x0f, 0x0f, 0xd1, 0xcd, 0x2a, 0xa8, + 0x79, 0xa1, 0x16, 0xf7, 0x1a, 0xf9, 0x83, 0x5c, 0x33, 0x65, 0xf0, 0xdd, 0xe5, 0x67, 0xbf, 0x64, + 0xb1, 0x11, 0xaa, 0x42, 0x70, 0xf1, 0x53, 0x11, 0x43, 0x5b, 0x2b, 0x2f, 0xc8, 0x01, 0x51, 0x7a, + 0x1e, 0xcf, 0x3d, 0x1b, 0x53, 0x6d, 0x5d, 0x47, 0x68, 0x77, 0x58, 0x64, 0x86, 0x3b, 0xcc, 0x7e, + 0x3d, 0x46, 0x07, 0x07, 0xeb, 0x38, 0x3a, 0xdc, 0x2a, 0xe5, 0xde, 0xca, 0x84, 0x1a, 0xd6, 0x4a, + 0xae, 0x00, 0xc4, 0x82, 0x5c, 0x63, 0xea, 0xa2, 0x9c, 0xc3, 0xb4, 0x90, 0x6b, 0xdd, 0xb9, 0x42, + 0xae, 0x74, 0xae, 0x31, 0x75, 0x59, 0xae, 0x45, 0xe7, 0x4e, 0x91, 0x7f, 0xce, 0xc6, 0x62, 0xc2, + 0x6a, 0x27, 0xc4, 0xfa, 0x12, 0xdc, 0x9c, 0x38, 0xec, 0x10, 0x05, 0xe9, 0x76, 0x4e, 0xf4, 0xe6, + 0x6a, 0x8d, 0x98, 0x56, 0xeb, 0x85, 0xd3, 0x72, 0x88, 0x5b, 0xab, 0x88, 0x0d, 0x4a, 0x43, 0x74, + 0xfd, 0x84, 0x19, 0x7b, 0x50, 0x5b, 0xc1, 0x9a, 0x07, 0xaf, 0xcc, 0x3e, 0x01, 0x71, 0xeb, 0x74, + 0x84, 0xf6, 0xac, 0x53, 0x33, 0x28, 0x48, 0xf5, 0x46, 0x3f, 0x67, 0x5d, 0xef, 0xd7, 0xac, 0xeb, + 0xfd, 0x99, 0x75, 0x3d, 0x74, 0x5f, 0xa8, 0x34, 0x9a, 0x24, 0x94, 0xea, 0x08, 0x66, 0xbf, 0xdb, + 0x06, 0xf0, 0xa7, 0xb7, 0x53, 0x4e, 0xee, 0x97, 0x92, 0xbf, 0xf1, 0xde, 0x3f, 0x4e, 0xb9, 0xb9, + 0x2a, 0x46, 0x51, 0x2c, 0xc6, 0x04, 0xae, 0xdb, 0x7d, 0x01, 0xdb, 0x21, 0x55, 0x32, 0x5e, 0x58, + 0x23, 0x23, 0x1f, 0x86, 0xff, 0xb3, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x4b, 0x07, 0xef, + 0x68, 0x06, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -97,19 +97,19 @@ type ValdClient interface { SearchByID(ctx context.Context, in *payload.Search_IDRequest, opts ...grpc.CallOption) (*payload.Search_Response, error) StreamSearch(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamSearchClient, error) StreamSearchByID(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamSearchByIDClient, error) - Insert(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Empty, error) + Insert(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Object_Location, error) StreamInsert(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamInsertClient, error) - MultiInsert(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Empty, error) - Update(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Empty, error) + MultiInsert(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Object_Locations, error) + Update(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Object_Location, error) StreamUpdate(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamUpdateClient, error) - MultiUpdate(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Empty, error) - Upsert(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Empty, error) + MultiUpdate(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Object_Locations, error) + Upsert(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Object_Location, error) StreamUpsert(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamUpsertClient, error) - MultiUpsert(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Empty, error) - Remove(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Empty, error) + MultiUpsert(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Object_Locations, error) + Remove(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_Location, error) StreamRemove(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamRemoveClient, error) - MultiRemove(ctx context.Context, in *payload.Object_IDs, opts ...grpc.CallOption) (*payload.Empty, error) - GetObject(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Backup_MetaVector, error) + MultiRemove(ctx context.Context, in *payload.Object_IDs, opts ...grpc.CallOption) (*payload.Object_Locations, error) + GetObject(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_Vector, error) StreamGetObject(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamGetObjectClient, error) } @@ -210,8 +210,8 @@ func (x *valdStreamSearchByIDClient) Recv() (*payload.Search_Response, error) { return m, nil } -func (c *valdClient) Insert(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) +func (c *valdClient) Insert(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) err := c.cc.Invoke(ctx, "/vald.Vald/Insert", in, out, opts...) if err != nil { return nil, err @@ -230,7 +230,7 @@ func (c *valdClient) StreamInsert(ctx context.Context, opts ...grpc.CallOption) type Vald_StreamInsertClient interface { Send(*payload.Object_Vector) error - Recv() (*payload.Empty, error) + Recv() (*payload.Object_Location, error) grpc.ClientStream } @@ -242,16 +242,16 @@ func (x *valdStreamInsertClient) Send(m *payload.Object_Vector) error { return x.ClientStream.SendMsg(m) } -func (x *valdStreamInsertClient) Recv() (*payload.Empty, error) { - m := new(payload.Empty) +func (x *valdStreamInsertClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } -func (c *valdClient) MultiInsert(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) +func (c *valdClient) MultiInsert(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) err := c.cc.Invoke(ctx, "/vald.Vald/MultiInsert", in, out, opts...) if err != nil { return nil, err @@ -259,8 +259,8 @@ func (c *valdClient) MultiInsert(ctx context.Context, in *payload.Object_Vectors return out, nil } -func (c *valdClient) Update(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) +func (c *valdClient) Update(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) err := c.cc.Invoke(ctx, "/vald.Vald/Update", in, out, opts...) if err != nil { return nil, err @@ -279,7 +279,7 @@ func (c *valdClient) StreamUpdate(ctx context.Context, opts ...grpc.CallOption) type Vald_StreamUpdateClient interface { Send(*payload.Object_Vector) error - Recv() (*payload.Empty, error) + Recv() (*payload.Object_Location, error) grpc.ClientStream } @@ -291,16 +291,16 @@ func (x *valdStreamUpdateClient) Send(m *payload.Object_Vector) error { return x.ClientStream.SendMsg(m) } -func (x *valdStreamUpdateClient) Recv() (*payload.Empty, error) { - m := new(payload.Empty) +func (x *valdStreamUpdateClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } -func (c *valdClient) MultiUpdate(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) +func (c *valdClient) MultiUpdate(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) err := c.cc.Invoke(ctx, "/vald.Vald/MultiUpdate", in, out, opts...) if err != nil { return nil, err @@ -308,8 +308,8 @@ func (c *valdClient) MultiUpdate(ctx context.Context, in *payload.Object_Vectors return out, nil } -func (c *valdClient) Upsert(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) +func (c *valdClient) Upsert(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) err := c.cc.Invoke(ctx, "/vald.Vald/Upsert", in, out, opts...) if err != nil { return nil, err @@ -328,7 +328,7 @@ func (c *valdClient) StreamUpsert(ctx context.Context, opts ...grpc.CallOption) type Vald_StreamUpsertClient interface { Send(*payload.Object_Vector) error - Recv() (*payload.Empty, error) + Recv() (*payload.Object_Location, error) grpc.ClientStream } @@ -340,16 +340,16 @@ func (x *valdStreamUpsertClient) Send(m *payload.Object_Vector) error { return x.ClientStream.SendMsg(m) } -func (x *valdStreamUpsertClient) Recv() (*payload.Empty, error) { - m := new(payload.Empty) +func (x *valdStreamUpsertClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } -func (c *valdClient) MultiUpsert(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) +func (c *valdClient) MultiUpsert(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) err := c.cc.Invoke(ctx, "/vald.Vald/MultiUpsert", in, out, opts...) if err != nil { return nil, err @@ -357,8 +357,8 @@ func (c *valdClient) MultiUpsert(ctx context.Context, in *payload.Object_Vectors return out, nil } -func (c *valdClient) Remove(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) +func (c *valdClient) Remove(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) err := c.cc.Invoke(ctx, "/vald.Vald/Remove", in, out, opts...) if err != nil { return nil, err @@ -377,7 +377,7 @@ func (c *valdClient) StreamRemove(ctx context.Context, opts ...grpc.CallOption) type Vald_StreamRemoveClient interface { Send(*payload.Object_ID) error - Recv() (*payload.Empty, error) + Recv() (*payload.Object_Location, error) grpc.ClientStream } @@ -389,16 +389,16 @@ func (x *valdStreamRemoveClient) Send(m *payload.Object_ID) error { return x.ClientStream.SendMsg(m) } -func (x *valdStreamRemoveClient) Recv() (*payload.Empty, error) { - m := new(payload.Empty) +func (x *valdStreamRemoveClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } -func (c *valdClient) MultiRemove(ctx context.Context, in *payload.Object_IDs, opts ...grpc.CallOption) (*payload.Empty, error) { - out := new(payload.Empty) +func (c *valdClient) MultiRemove(ctx context.Context, in *payload.Object_IDs, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) err := c.cc.Invoke(ctx, "/vald.Vald/MultiRemove", in, out, opts...) if err != nil { return nil, err @@ -406,8 +406,8 @@ func (c *valdClient) MultiRemove(ctx context.Context, in *payload.Object_IDs, op return out, nil } -func (c *valdClient) GetObject(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Backup_MetaVector, error) { - out := new(payload.Backup_MetaVector) +func (c *valdClient) GetObject(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_Vector, error) { + out := new(payload.Object_Vector) err := c.cc.Invoke(ctx, "/vald.Vald/GetObject", in, out, opts...) if err != nil { return nil, err @@ -426,7 +426,7 @@ func (c *valdClient) StreamGetObject(ctx context.Context, opts ...grpc.CallOptio type Vald_StreamGetObjectClient interface { Send(*payload.Object_ID) error - Recv() (*payload.Backup_MetaVector, error) + Recv() (*payload.Object_Vector, error) grpc.ClientStream } @@ -438,8 +438,8 @@ func (x *valdStreamGetObjectClient) Send(m *payload.Object_ID) error { return x.ClientStream.SendMsg(m) } -func (x *valdStreamGetObjectClient) Recv() (*payload.Backup_MetaVector, error) { - m := new(payload.Backup_MetaVector) +func (x *valdStreamGetObjectClient) Recv() (*payload.Object_Vector, error) { + m := new(payload.Object_Vector) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } @@ -453,19 +453,19 @@ type ValdServer interface { SearchByID(context.Context, *payload.Search_IDRequest) (*payload.Search_Response, error) StreamSearch(Vald_StreamSearchServer) error StreamSearchByID(Vald_StreamSearchByIDServer) error - Insert(context.Context, *payload.Object_Vector) (*payload.Empty, error) + Insert(context.Context, *payload.Object_Vector) (*payload.Object_Location, error) StreamInsert(Vald_StreamInsertServer) error - MultiInsert(context.Context, *payload.Object_Vectors) (*payload.Empty, error) - Update(context.Context, *payload.Object_Vector) (*payload.Empty, error) + MultiInsert(context.Context, *payload.Object_Vectors) (*payload.Object_Locations, error) + Update(context.Context, *payload.Object_Vector) (*payload.Object_Location, error) StreamUpdate(Vald_StreamUpdateServer) error - MultiUpdate(context.Context, *payload.Object_Vectors) (*payload.Empty, error) - Upsert(context.Context, *payload.Object_Vector) (*payload.Empty, error) + MultiUpdate(context.Context, *payload.Object_Vectors) (*payload.Object_Locations, error) + Upsert(context.Context, *payload.Object_Vector) (*payload.Object_Location, error) StreamUpsert(Vald_StreamUpsertServer) error - MultiUpsert(context.Context, *payload.Object_Vectors) (*payload.Empty, error) - Remove(context.Context, *payload.Object_ID) (*payload.Empty, error) + MultiUpsert(context.Context, *payload.Object_Vectors) (*payload.Object_Locations, error) + Remove(context.Context, *payload.Object_ID) (*payload.Object_Location, error) StreamRemove(Vald_StreamRemoveServer) error - MultiRemove(context.Context, *payload.Object_IDs) (*payload.Empty, error) - GetObject(context.Context, *payload.Object_ID) (*payload.Backup_MetaVector, error) + MultiRemove(context.Context, *payload.Object_IDs) (*payload.Object_Locations, error) + GetObject(context.Context, *payload.Object_ID) (*payload.Object_Vector, error) StreamGetObject(Vald_StreamGetObjectServer) error } @@ -488,43 +488,43 @@ func (*UnimplementedValdServer) StreamSearch(srv Vald_StreamSearchServer) error func (*UnimplementedValdServer) StreamSearchByID(srv Vald_StreamSearchByIDServer) error { return status.Errorf(codes.Unimplemented, "method StreamSearchByID not implemented") } -func (*UnimplementedValdServer) Insert(ctx context.Context, req *payload.Object_Vector) (*payload.Empty, error) { +func (*UnimplementedValdServer) Insert(ctx context.Context, req *payload.Object_Vector) (*payload.Object_Location, error) { return nil, status.Errorf(codes.Unimplemented, "method Insert not implemented") } func (*UnimplementedValdServer) StreamInsert(srv Vald_StreamInsertServer) error { return status.Errorf(codes.Unimplemented, "method StreamInsert not implemented") } -func (*UnimplementedValdServer) MultiInsert(ctx context.Context, req *payload.Object_Vectors) (*payload.Empty, error) { +func (*UnimplementedValdServer) MultiInsert(ctx context.Context, req *payload.Object_Vectors) (*payload.Object_Locations, error) { return nil, status.Errorf(codes.Unimplemented, "method MultiInsert not implemented") } -func (*UnimplementedValdServer) Update(ctx context.Context, req *payload.Object_Vector) (*payload.Empty, error) { +func (*UnimplementedValdServer) Update(ctx context.Context, req *payload.Object_Vector) (*payload.Object_Location, error) { return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") } func (*UnimplementedValdServer) StreamUpdate(srv Vald_StreamUpdateServer) error { return status.Errorf(codes.Unimplemented, "method StreamUpdate not implemented") } -func (*UnimplementedValdServer) MultiUpdate(ctx context.Context, req *payload.Object_Vectors) (*payload.Empty, error) { +func (*UnimplementedValdServer) MultiUpdate(ctx context.Context, req *payload.Object_Vectors) (*payload.Object_Locations, error) { return nil, status.Errorf(codes.Unimplemented, "method MultiUpdate not implemented") } -func (*UnimplementedValdServer) Upsert(ctx context.Context, req *payload.Object_Vector) (*payload.Empty, error) { +func (*UnimplementedValdServer) Upsert(ctx context.Context, req *payload.Object_Vector) (*payload.Object_Location, error) { return nil, status.Errorf(codes.Unimplemented, "method Upsert not implemented") } func (*UnimplementedValdServer) StreamUpsert(srv Vald_StreamUpsertServer) error { return status.Errorf(codes.Unimplemented, "method StreamUpsert not implemented") } -func (*UnimplementedValdServer) MultiUpsert(ctx context.Context, req *payload.Object_Vectors) (*payload.Empty, error) { +func (*UnimplementedValdServer) MultiUpsert(ctx context.Context, req *payload.Object_Vectors) (*payload.Object_Locations, error) { return nil, status.Errorf(codes.Unimplemented, "method MultiUpsert not implemented") } -func (*UnimplementedValdServer) Remove(ctx context.Context, req *payload.Object_ID) (*payload.Empty, error) { +func (*UnimplementedValdServer) Remove(ctx context.Context, req *payload.Object_ID) (*payload.Object_Location, error) { return nil, status.Errorf(codes.Unimplemented, "method Remove not implemented") } func (*UnimplementedValdServer) StreamRemove(srv Vald_StreamRemoveServer) error { return status.Errorf(codes.Unimplemented, "method StreamRemove not implemented") } -func (*UnimplementedValdServer) MultiRemove(ctx context.Context, req *payload.Object_IDs) (*payload.Empty, error) { +func (*UnimplementedValdServer) MultiRemove(ctx context.Context, req *payload.Object_IDs) (*payload.Object_Locations, error) { return nil, status.Errorf(codes.Unimplemented, "method MultiRemove not implemented") } -func (*UnimplementedValdServer) GetObject(ctx context.Context, req *payload.Object_ID) (*payload.Backup_MetaVector, error) { +func (*UnimplementedValdServer) GetObject(ctx context.Context, req *payload.Object_ID) (*payload.Object_Vector, error) { return nil, status.Errorf(codes.Unimplemented, "method GetObject not implemented") } func (*UnimplementedValdServer) StreamGetObject(srv Vald_StreamGetObjectServer) error { @@ -664,7 +664,7 @@ func _Vald_StreamInsert_Handler(srv interface{}, stream grpc.ServerStream) error } type Vald_StreamInsertServer interface { - Send(*payload.Empty) error + Send(*payload.Object_Location) error Recv() (*payload.Object_Vector, error) grpc.ServerStream } @@ -673,7 +673,7 @@ type valdStreamInsertServer struct { grpc.ServerStream } -func (x *valdStreamInsertServer) Send(m *payload.Empty) error { +func (x *valdStreamInsertServer) Send(m *payload.Object_Location) error { return x.ServerStream.SendMsg(m) } @@ -726,7 +726,7 @@ func _Vald_StreamUpdate_Handler(srv interface{}, stream grpc.ServerStream) error } type Vald_StreamUpdateServer interface { - Send(*payload.Empty) error + Send(*payload.Object_Location) error Recv() (*payload.Object_Vector, error) grpc.ServerStream } @@ -735,7 +735,7 @@ type valdStreamUpdateServer struct { grpc.ServerStream } -func (x *valdStreamUpdateServer) Send(m *payload.Empty) error { +func (x *valdStreamUpdateServer) Send(m *payload.Object_Location) error { return x.ServerStream.SendMsg(m) } @@ -788,7 +788,7 @@ func _Vald_StreamUpsert_Handler(srv interface{}, stream grpc.ServerStream) error } type Vald_StreamUpsertServer interface { - Send(*payload.Empty) error + Send(*payload.Object_Location) error Recv() (*payload.Object_Vector, error) grpc.ServerStream } @@ -797,7 +797,7 @@ type valdStreamUpsertServer struct { grpc.ServerStream } -func (x *valdStreamUpsertServer) Send(m *payload.Empty) error { +func (x *valdStreamUpsertServer) Send(m *payload.Object_Location) error { return x.ServerStream.SendMsg(m) } @@ -850,7 +850,7 @@ func _Vald_StreamRemove_Handler(srv interface{}, stream grpc.ServerStream) error } type Vald_StreamRemoveServer interface { - Send(*payload.Empty) error + Send(*payload.Object_Location) error Recv() (*payload.Object_ID, error) grpc.ServerStream } @@ -859,7 +859,7 @@ type valdStreamRemoveServer struct { grpc.ServerStream } -func (x *valdStreamRemoveServer) Send(m *payload.Empty) error { +func (x *valdStreamRemoveServer) Send(m *payload.Object_Location) error { return x.ServerStream.SendMsg(m) } @@ -912,7 +912,7 @@ func _Vald_StreamGetObject_Handler(srv interface{}, stream grpc.ServerStream) er } type Vald_StreamGetObjectServer interface { - Send(*payload.Backup_MetaVector) error + Send(*payload.Object_Vector) error Recv() (*payload.Object_ID, error) grpc.ServerStream } @@ -921,7 +921,7 @@ type valdStreamGetObjectServer struct { grpc.ServerStream } -func (x *valdStreamGetObjectServer) Send(m *payload.Backup_MetaVector) error { +func (x *valdStreamGetObjectServer) Send(m *payload.Object_Vector) error { return x.ServerStream.SendMsg(m) } @@ -1030,5 +1030,5 @@ var _Vald_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "vald/vald.proto", + Metadata: "apis/proto/gateway/vald/vald.proto", } diff --git a/apis/grpc/manager/traffic/traffic_manager.pb.go b/apis/grpc/manager/traffic/traffic_manager.pb.go deleted file mode 100644 index 4202bbd940..0000000000 --- a/apis/grpc/manager/traffic/traffic_manager.pb.go +++ /dev/null @@ -1,53 +0,0 @@ -// -// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package traffic - -import ( - fmt "fmt" - math "math" - - _ "github.com/danielvladco/go-proto-gql/pb" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func init() { proto.RegisterFile("traffic/traffic_manager.proto", fileDescriptor_fe442397473f4bc7) } - -var fileDescriptor_fe442397473f4bc7 = []byte{ - // 146 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2d, 0x29, 0x4a, 0x4c, - 0x4b, 0xcb, 0x4c, 0xd6, 0x87, 0xd2, 0xf1, 0xb9, 0x89, 0x79, 0x89, 0xe9, 0xa9, 0x45, 0x7a, 0x05, - 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xfc, 0x68, 0xc2, 0x52, 0x32, 0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, - 0xfa, 0x89, 0x05, 0x99, 0xfa, 0x89, 0x79, 0x79, 0xf9, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xc5, - 0x10, 0xe5, 0x52, 0x3c, 0x05, 0x49, 0xfa, 0xe9, 0x85, 0x39, 0x10, 0x9e, 0x93, 0xed, 0x89, 0x47, - 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x18, 0xa5, 0x9f, 0x9e, 0x59, 0x92, - 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x5f, 0x96, 0x92, 0x98, 0x58, 0xac, 0x5f, 0x96, 0x98, - 0x93, 0x02, 0x32, 0xa8, 0x58, 0x3f, 0xbd, 0xa8, 0x20, 0x59, 0x1f, 0x6a, 0x05, 0xcc, 0x25, 0x49, - 0x6c, 0x60, 0x53, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x90, 0x8d, 0xc1, 0x22, 0xa3, 0x00, - 0x00, 0x00, -} diff --git a/apis/grpc/payload/payload.pb.go b/apis/grpc/payload/payload.pb.go index fb94a103e6..4887ffcf48 100644 --- a/apis/grpc/payload/payload.pb.go +++ b/apis/grpc/payload/payload.pb.go @@ -48,7 +48,7 @@ func (m *Search) Reset() { *m = Search{} } func (m *Search) String() string { return proto.CompactTextString(m) } func (*Search) ProtoMessage() {} func (*Search) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{0} + return fileDescriptor_34c50f4952bdcbdd, []int{0} } func (m *Search) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -89,7 +89,7 @@ func (m *Search_Request) Reset() { *m = Search_Request{} } func (m *Search_Request) String() string { return proto.CompactTextString(m) } func (*Search_Request) ProtoMessage() {} func (*Search_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{0, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{0, 0} } func (m *Search_Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,6 +132,53 @@ func (m *Search_Request) GetConfig() *Search_Config { return nil } +type Search_MultiRequest struct { + Requests []*Search_Request `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Search_MultiRequest) Reset() { *m = Search_MultiRequest{} } +func (m *Search_MultiRequest) String() string { return proto.CompactTextString(m) } +func (*Search_MultiRequest) ProtoMessage() {} +func (*Search_MultiRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_34c50f4952bdcbdd, []int{0, 1} +} +func (m *Search_MultiRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Search_MultiRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Search_MultiRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Search_MultiRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_Search_MultiRequest.Merge(m, src) +} +func (m *Search_MultiRequest) XXX_Size() int { + return m.Size() +} +func (m *Search_MultiRequest) XXX_DiscardUnknown() { + xxx_messageInfo_Search_MultiRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_Search_MultiRequest proto.InternalMessageInfo + +func (m *Search_MultiRequest) GetRequests() []*Search_Request { + if m != nil { + return m.Requests + } + return nil +} + type Search_IDRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Config *Search_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` @@ -144,7 +191,7 @@ func (m *Search_IDRequest) Reset() { *m = Search_IDRequest{} } func (m *Search_IDRequest) String() string { return proto.CompactTextString(m) } func (*Search_IDRequest) ProtoMessage() {} func (*Search_IDRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{0, 1} + return fileDescriptor_34c50f4952bdcbdd, []int{0, 2} } func (m *Search_IDRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,11 +234,114 @@ func (m *Search_IDRequest) GetConfig() *Search_Config { return nil } +type Search_MultiIDRequest struct { + Requests []*Search_IDRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Search_MultiIDRequest) Reset() { *m = Search_MultiIDRequest{} } +func (m *Search_MultiIDRequest) String() string { return proto.CompactTextString(m) } +func (*Search_MultiIDRequest) ProtoMessage() {} +func (*Search_MultiIDRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_34c50f4952bdcbdd, []int{0, 3} +} +func (m *Search_MultiIDRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Search_MultiIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Search_MultiIDRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Search_MultiIDRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_Search_MultiIDRequest.Merge(m, src) +} +func (m *Search_MultiIDRequest) XXX_Size() int { + return m.Size() +} +func (m *Search_MultiIDRequest) XXX_DiscardUnknown() { + xxx_messageInfo_Search_MultiIDRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_Search_MultiIDRequest proto.InternalMessageInfo + +func (m *Search_MultiIDRequest) GetRequests() []*Search_IDRequest { + if m != nil { + return m.Requests + } + return nil +} + +type Search_ObjectRequest struct { + Object []byte `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + Config *Search_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Search_ObjectRequest) Reset() { *m = Search_ObjectRequest{} } +func (m *Search_ObjectRequest) String() string { return proto.CompactTextString(m) } +func (*Search_ObjectRequest) ProtoMessage() {} +func (*Search_ObjectRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_34c50f4952bdcbdd, []int{0, 4} +} +func (m *Search_ObjectRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Search_ObjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Search_ObjectRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Search_ObjectRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_Search_ObjectRequest.Merge(m, src) +} +func (m *Search_ObjectRequest) XXX_Size() int { + return m.Size() +} +func (m *Search_ObjectRequest) XXX_DiscardUnknown() { + xxx_messageInfo_Search_ObjectRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_Search_ObjectRequest proto.InternalMessageInfo + +func (m *Search_ObjectRequest) GetObject() []byte { + if m != nil { + return m.Object + } + return nil +} + +func (m *Search_ObjectRequest) GetConfig() *Search_Config { + if m != nil { + return m.Config + } + return nil +} + type Search_Config struct { - Num uint32 `protobuf:"varint,1,opt,name=num,proto3" json:"num,omitempty"` - Radius float32 `protobuf:"fixed32,2,opt,name=radius,proto3" json:"radius,omitempty"` - Epsilon float32 `protobuf:"fixed32,3,opt,name=epsilon,proto3" json:"epsilon,omitempty"` - Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Num uint32 `protobuf:"varint,2,opt,name=num,proto3" json:"num,omitempty"` + Radius float32 `protobuf:"fixed32,3,opt,name=radius,proto3" json:"radius,omitempty"` + Epsilon float32 `protobuf:"fixed32,4,opt,name=epsilon,proto3" json:"epsilon,omitempty"` + Timeout int64 `protobuf:"varint,5,opt,name=timeout,proto3" json:"timeout,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -201,7 +351,7 @@ func (m *Search_Config) Reset() { *m = Search_Config{} } func (m *Search_Config) String() string { return proto.CompactTextString(m) } func (*Search_Config) ProtoMessage() {} func (*Search_Config) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{0, 2} + return fileDescriptor_34c50f4952bdcbdd, []int{0, 5} } func (m *Search_Config) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -230,6 +380,13 @@ func (m *Search_Config) XXX_DiscardUnknown() { var xxx_messageInfo_Search_Config proto.InternalMessageInfo +func (m *Search_Config) GetRequestId() string { + if m != nil { + return m.RequestId + } + return "" +} + func (m *Search_Config) GetNum() uint32 { if m != nil { return m.Num @@ -259,7 +416,8 @@ func (m *Search_Config) GetTimeout() int64 { } type Search_Response struct { - Results []*Object_Distance `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Results []*Object_Distance `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -269,7 +427,7 @@ func (m *Search_Response) Reset() { *m = Search_Response{} } func (m *Search_Response) String() string { return proto.CompactTextString(m) } func (*Search_Response) ProtoMessage() {} func (*Search_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{0, 3} + return fileDescriptor_34c50f4952bdcbdd, []int{0, 6} } func (m *Search_Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,6 +456,13 @@ func (m *Search_Response) XXX_DiscardUnknown() { var xxx_messageInfo_Search_Response proto.InternalMessageInfo +func (m *Search_Response) GetRequestId() string { + if m != nil { + return m.RequestId + } + return "" +} + func (m *Search_Response) GetResults() []*Object_Distance { if m != nil { return m.Results @@ -305,6 +470,53 @@ func (m *Search_Response) GetResults() []*Object_Distance { return nil } +type Search_Responses struct { + Responses []*Search_Response `protobuf:"bytes,1,rep,name=responses,proto3" json:"responses,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Search_Responses) Reset() { *m = Search_Responses{} } +func (m *Search_Responses) String() string { return proto.CompactTextString(m) } +func (*Search_Responses) ProtoMessage() {} +func (*Search_Responses) Descriptor() ([]byte, []int) { + return fileDescriptor_34c50f4952bdcbdd, []int{0, 7} +} +func (m *Search_Responses) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Search_Responses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Search_Responses.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Search_Responses) XXX_Merge(src proto.Message) { + xxx_messageInfo_Search_Responses.Merge(m, src) +} +func (m *Search_Responses) XXX_Size() int { + return m.Size() +} +func (m *Search_Responses) XXX_DiscardUnknown() { + xxx_messageInfo_Search_Responses.DiscardUnknown(m) +} + +var xxx_messageInfo_Search_Responses proto.InternalMessageInfo + +func (m *Search_Responses) GetResponses() []*Search_Response { + if m != nil { + return m.Responses + } + return nil +} + type Meta struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -315,7 +527,7 @@ func (m *Meta) Reset() { *m = Meta{} } func (m *Meta) String() string { return proto.CompactTextString(m) } func (*Meta) ProtoMessage() {} func (*Meta) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{1} + return fileDescriptor_34c50f4952bdcbdd, []int{1} } func (m *Meta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +567,7 @@ func (m *Meta_Key) Reset() { *m = Meta_Key{} } func (m *Meta_Key) String() string { return proto.CompactTextString(m) } func (*Meta_Key) ProtoMessage() {} func (*Meta_Key) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{1, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{1, 0} } func (m *Meta_Key) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -402,7 +614,7 @@ func (m *Meta_Keys) Reset() { *m = Meta_Keys{} } func (m *Meta_Keys) String() string { return proto.CompactTextString(m) } func (*Meta_Keys) ProtoMessage() {} func (*Meta_Keys) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{1, 1} + return fileDescriptor_34c50f4952bdcbdd, []int{1, 1} } func (m *Meta_Keys) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -449,7 +661,7 @@ func (m *Meta_Val) Reset() { *m = Meta_Val{} } func (m *Meta_Val) String() string { return proto.CompactTextString(m) } func (*Meta_Val) ProtoMessage() {} func (*Meta_Val) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{1, 2} + return fileDescriptor_34c50f4952bdcbdd, []int{1, 2} } func (m *Meta_Val) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -496,7 +708,7 @@ func (m *Meta_Vals) Reset() { *m = Meta_Vals{} } func (m *Meta_Vals) String() string { return proto.CompactTextString(m) } func (*Meta_Vals) ProtoMessage() {} func (*Meta_Vals) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{1, 3} + return fileDescriptor_34c50f4952bdcbdd, []int{1, 3} } func (m *Meta_Vals) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -544,7 +756,7 @@ func (m *Meta_KeyVal) Reset() { *m = Meta_KeyVal{} } func (m *Meta_KeyVal) String() string { return proto.CompactTextString(m) } func (*Meta_KeyVal) ProtoMessage() {} func (*Meta_KeyVal) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{1, 4} + return fileDescriptor_34c50f4952bdcbdd, []int{1, 4} } func (m *Meta_KeyVal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -598,7 +810,7 @@ func (m *Meta_KeyVals) Reset() { *m = Meta_KeyVals{} } func (m *Meta_KeyVals) String() string { return proto.CompactTextString(m) } func (*Meta_KeyVals) ProtoMessage() {} func (*Meta_KeyVals) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{1, 5} + return fileDescriptor_34c50f4952bdcbdd, []int{1, 5} } func (m *Meta_KeyVals) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -644,7 +856,7 @@ func (m *Object) Reset() { *m = Object{} } func (m *Object) String() string { return proto.CompactTextString(m) } func (*Object) ProtoMessage() {} func (*Object) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{2} + return fileDescriptor_34c50f4952bdcbdd, []int{2} } func (m *Object) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -685,7 +897,7 @@ func (m *Object_Distance) Reset() { *m = Object_Distance{} } func (m *Object_Distance) String() string { return proto.CompactTextString(m) } func (*Object_Distance) ProtoMessage() {} func (*Object_Distance) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{2, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{2, 0} } func (m *Object_Distance) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -739,7 +951,7 @@ func (m *Object_ID) Reset() { *m = Object_ID{} } func (m *Object_ID) String() string { return proto.CompactTextString(m) } func (*Object_ID) ProtoMessage() {} func (*Object_ID) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{2, 1} + return fileDescriptor_34c50f4952bdcbdd, []int{2, 1} } func (m *Object_ID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -786,7 +998,7 @@ func (m *Object_IDs) Reset() { *m = Object_IDs{} } func (m *Object_IDs) String() string { return proto.CompactTextString(m) } func (*Object_IDs) ProtoMessage() {} func (*Object_IDs) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{2, 2} + return fileDescriptor_34c50f4952bdcbdd, []int{2, 2} } func (m *Object_IDs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -834,7 +1046,7 @@ func (m *Object_Vector) Reset() { *m = Object_Vector{} } func (m *Object_Vector) String() string { return proto.CompactTextString(m) } func (*Object_Vector) ProtoMessage() {} func (*Object_Vector) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{2, 3} + return fileDescriptor_34c50f4952bdcbdd, []int{2, 3} } func (m *Object_Vector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -888,7 +1100,7 @@ func (m *Object_Vectors) Reset() { *m = Object_Vectors{} } func (m *Object_Vectors) String() string { return proto.CompactTextString(m) } func (*Object_Vectors) ProtoMessage() {} func (*Object_Vectors) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{2, 4} + return fileDescriptor_34c50f4952bdcbdd, []int{2, 4} } func (m *Object_Vectors) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -924,6 +1136,171 @@ func (m *Object_Vectors) GetVectors() []*Object_Vector { return nil } +type Object_Blob struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Object []byte `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object_Blob) Reset() { *m = Object_Blob{} } +func (m *Object_Blob) String() string { return proto.CompactTextString(m) } +func (*Object_Blob) ProtoMessage() {} +func (*Object_Blob) Descriptor() ([]byte, []int) { + return fileDescriptor_34c50f4952bdcbdd, []int{2, 5} +} +func (m *Object_Blob) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Object_Blob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Object_Blob.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Object_Blob) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object_Blob.Merge(m, src) +} +func (m *Object_Blob) XXX_Size() int { + return m.Size() +} +func (m *Object_Blob) XXX_DiscardUnknown() { + xxx_messageInfo_Object_Blob.DiscardUnknown(m) +} + +var xxx_messageInfo_Object_Blob proto.InternalMessageInfo + +func (m *Object_Blob) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Object_Blob) GetObject() []byte { + if m != nil { + return m.Object + } + return nil +} + +type Object_Location struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` + Ips []string `protobuf:"bytes,3,rep,name=ips,proto3" json:"ips,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object_Location) Reset() { *m = Object_Location{} } +func (m *Object_Location) String() string { return proto.CompactTextString(m) } +func (*Object_Location) ProtoMessage() {} +func (*Object_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_34c50f4952bdcbdd, []int{2, 6} +} +func (m *Object_Location) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Object_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Object_Location.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Object_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object_Location.Merge(m, src) +} +func (m *Object_Location) XXX_Size() int { + return m.Size() +} +func (m *Object_Location) XXX_DiscardUnknown() { + xxx_messageInfo_Object_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_Object_Location proto.InternalMessageInfo + +func (m *Object_Location) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Object_Location) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + +func (m *Object_Location) GetIps() []string { + if m != nil { + return m.Ips + } + return nil +} + +type Object_Locations struct { + Locations []*Object_Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object_Locations) Reset() { *m = Object_Locations{} } +func (m *Object_Locations) String() string { return proto.CompactTextString(m) } +func (*Object_Locations) ProtoMessage() {} +func (*Object_Locations) Descriptor() ([]byte, []int) { + return fileDescriptor_34c50f4952bdcbdd, []int{2, 7} +} +func (m *Object_Locations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Object_Locations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Object_Locations.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Object_Locations) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object_Locations.Merge(m, src) +} +func (m *Object_Locations) XXX_Size() int { + return m.Size() +} +func (m *Object_Locations) XXX_DiscardUnknown() { + xxx_messageInfo_Object_Locations.DiscardUnknown(m) +} + +var xxx_messageInfo_Object_Locations proto.InternalMessageInfo + +func (m *Object_Locations) GetLocations() []*Object_Location { + if m != nil { + return m.Locations + } + return nil +} + type Control struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -934,7 +1311,7 @@ func (m *Control) Reset() { *m = Control{} } func (m *Control) String() string { return proto.CompactTextString(m) } func (*Control) ProtoMessage() {} func (*Control) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{3} + return fileDescriptor_34c50f4952bdcbdd, []int{3} } func (m *Control) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -974,7 +1351,7 @@ func (m *Control_CreateIndexRequest) Reset() { *m = Control_CreateIndexR func (m *Control_CreateIndexRequest) String() string { return proto.CompactTextString(m) } func (*Control_CreateIndexRequest) ProtoMessage() {} func (*Control_CreateIndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{3, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{3, 0} } func (m *Control_CreateIndexRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1020,7 +1397,7 @@ func (m *Replication) Reset() { *m = Replication{} } func (m *Replication) String() string { return proto.CompactTextString(m) } func (*Replication) ProtoMessage() {} func (*Replication) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{4} + return fileDescriptor_34c50f4952bdcbdd, []int{4} } func (m *Replication) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1437,7 @@ func (m *Replication_Recovery) Reset() { *m = Replication_Recovery{} } func (m *Replication_Recovery) String() string { return proto.CompactTextString(m) } func (*Replication_Recovery) ProtoMessage() {} func (*Replication_Recovery) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{4, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{4, 0} } func (m *Replication_Recovery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1108,7 +1485,7 @@ func (m *Replication_Rebalance) Reset() { *m = Replication_Rebalance{} } func (m *Replication_Rebalance) String() string { return proto.CompactTextString(m) } func (*Replication_Rebalance) ProtoMessage() {} func (*Replication_Rebalance) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{4, 1} + return fileDescriptor_34c50f4952bdcbdd, []int{4, 1} } func (m *Replication_Rebalance) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1164,7 +1541,7 @@ func (m *Replication_Agents) Reset() { *m = Replication_Agents{} } func (m *Replication_Agents) String() string { return proto.CompactTextString(m) } func (*Replication_Agents) ProtoMessage() {} func (*Replication_Agents) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{4, 2} + return fileDescriptor_34c50f4952bdcbdd, []int{4, 2} } func (m *Replication_Agents) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1224,7 +1601,7 @@ func (m *Discoverer) Reset() { *m = Discoverer{} } func (m *Discoverer) String() string { return proto.CompactTextString(m) } func (*Discoverer) ProtoMessage() {} func (*Discoverer) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{5} + return fileDescriptor_34c50f4952bdcbdd, []int{5} } func (m *Discoverer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1266,7 +1643,7 @@ func (m *Discoverer_Request) Reset() { *m = Discoverer_Request{} } func (m *Discoverer_Request) String() string { return proto.CompactTextString(m) } func (*Discoverer_Request) ProtoMessage() {} func (*Discoverer_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{5, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{5, 0} } func (m *Discoverer_Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1326,7 +1703,7 @@ func (m *Backup) Reset() { *m = Backup{} } func (m *Backup) String() string { return proto.CompactTextString(m) } func (*Backup) ProtoMessage() {} func (*Backup) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6} + return fileDescriptor_34c50f4952bdcbdd, []int{6} } func (m *Backup) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1365,7 +1742,7 @@ func (m *Backup_GetVector) Reset() { *m = Backup_GetVector{} } func (m *Backup_GetVector) String() string { return proto.CompactTextString(m) } func (*Backup_GetVector) ProtoMessage() {} func (*Backup_GetVector) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 0} } func (m *Backup_GetVector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1405,7 +1782,7 @@ func (m *Backup_GetVector_Request) Reset() { *m = Backup_GetVector_Reque func (m *Backup_GetVector_Request) String() string { return proto.CompactTextString(m) } func (*Backup_GetVector_Request) ProtoMessage() {} func (*Backup_GetVector_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 0, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 0, 0} } func (m *Backup_GetVector_Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1829,7 @@ func (m *Backup_GetVector_Owner) Reset() { *m = Backup_GetVector_Owner{} func (m *Backup_GetVector_Owner) String() string { return proto.CompactTextString(m) } func (*Backup_GetVector_Owner) ProtoMessage() {} func (*Backup_GetVector_Owner) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 0, 1} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 0, 1} } func (m *Backup_GetVector_Owner) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1498,7 +1875,7 @@ func (m *Backup_Locations) Reset() { *m = Backup_Locations{} } func (m *Backup_Locations) String() string { return proto.CompactTextString(m) } func (*Backup_Locations) ProtoMessage() {} func (*Backup_Locations) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 1} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 1} } func (m *Backup_Locations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1538,7 +1915,7 @@ func (m *Backup_Locations_Request) Reset() { *m = Backup_Locations_Reque func (m *Backup_Locations_Request) String() string { return proto.CompactTextString(m) } func (*Backup_Locations_Request) ProtoMessage() {} func (*Backup_Locations_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 1, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 1, 0} } func (m *Backup_Locations_Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1584,7 +1961,7 @@ func (m *Backup_Remove) Reset() { *m = Backup_Remove{} } func (m *Backup_Remove) String() string { return proto.CompactTextString(m) } func (*Backup_Remove) ProtoMessage() {} func (*Backup_Remove) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 2} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 2} } func (m *Backup_Remove) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1624,7 +2001,7 @@ func (m *Backup_Remove_Request) Reset() { *m = Backup_Remove_Request{} } func (m *Backup_Remove_Request) String() string { return proto.CompactTextString(m) } func (*Backup_Remove_Request) ProtoMessage() {} func (*Backup_Remove_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 2, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 2, 0} } func (m *Backup_Remove_Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1671,7 +2048,7 @@ func (m *Backup_Remove_RequestMulti) Reset() { *m = Backup_Remove_Reques func (m *Backup_Remove_RequestMulti) String() string { return proto.CompactTextString(m) } func (*Backup_Remove_RequestMulti) ProtoMessage() {} func (*Backup_Remove_RequestMulti) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 2, 1} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 2, 1} } func (m *Backup_Remove_RequestMulti) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1717,7 +2094,7 @@ func (m *Backup_IP) Reset() { *m = Backup_IP{} } func (m *Backup_IP) String() string { return proto.CompactTextString(m) } func (*Backup_IP) ProtoMessage() {} func (*Backup_IP) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 3} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 3} } func (m *Backup_IP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1756,7 +2133,7 @@ func (m *Backup_IP_Register) Reset() { *m = Backup_IP_Register{} } func (m *Backup_IP_Register) String() string { return proto.CompactTextString(m) } func (*Backup_IP_Register) ProtoMessage() {} func (*Backup_IP_Register) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 3, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 3, 0} } func (m *Backup_IP_Register) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1797,7 +2174,7 @@ func (m *Backup_IP_Register_Request) Reset() { *m = Backup_IP_Register_R func (m *Backup_IP_Register_Request) String() string { return proto.CompactTextString(m) } func (*Backup_IP_Register_Request) ProtoMessage() {} func (*Backup_IP_Register_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 3, 0, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 3, 0, 0} } func (m *Backup_IP_Register_Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1850,7 +2227,7 @@ func (m *Backup_IP_Remove) Reset() { *m = Backup_IP_Remove{} } func (m *Backup_IP_Remove) String() string { return proto.CompactTextString(m) } func (*Backup_IP_Remove) ProtoMessage() {} func (*Backup_IP_Remove) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 3, 1} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 3, 1} } func (m *Backup_IP_Remove) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1890,7 +2267,7 @@ func (m *Backup_IP_Remove_Request) Reset() { *m = Backup_IP_Remove_Reque func (m *Backup_IP_Remove_Request) String() string { return proto.CompactTextString(m) } func (*Backup_IP_Remove_Request) ProtoMessage() {} func (*Backup_IP_Remove_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 3, 1, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 3, 1, 0} } func (m *Backup_IP_Remove_Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1928,7 +2305,6 @@ func (m *Backup_IP_Remove_Request) GetIps() []string { type Backup_MetaVector struct { Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` - Meta string `protobuf:"bytes,2,opt,name=meta,proto3" json:"meta,omitempty"` Vector []float32 `protobuf:"fixed32,3,rep,packed,name=vector,proto3" json:"vector,omitempty"` Ips []string `protobuf:"bytes,4,rep,name=ips,proto3" json:"ips,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1940,7 +2316,7 @@ func (m *Backup_MetaVector) Reset() { *m = Backup_MetaVector{} } func (m *Backup_MetaVector) String() string { return proto.CompactTextString(m) } func (*Backup_MetaVector) ProtoMessage() {} func (*Backup_MetaVector) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 4} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 4} } func (m *Backup_MetaVector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1976,13 +2352,6 @@ func (m *Backup_MetaVector) GetUuid() string { return "" } -func (m *Backup_MetaVector) GetMeta() string { - if m != nil { - return m.Meta - } - return "" -} - func (m *Backup_MetaVector) GetVector() []float32 { if m != nil { return m.Vector @@ -2008,7 +2377,7 @@ func (m *Backup_MetaVectors) Reset() { *m = Backup_MetaVectors{} } func (m *Backup_MetaVectors) String() string { return proto.CompactTextString(m) } func (*Backup_MetaVectors) ProtoMessage() {} func (*Backup_MetaVectors) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 5} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 5} } func (m *Backup_MetaVectors) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2054,7 +2423,7 @@ func (m *Backup_Compressed) Reset() { *m = Backup_Compressed{} } func (m *Backup_Compressed) String() string { return proto.CompactTextString(m) } func (*Backup_Compressed) ProtoMessage() {} func (*Backup_Compressed) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 6} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 6} } func (m *Backup_Compressed) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2085,7 +2454,6 @@ var xxx_messageInfo_Backup_Compressed proto.InternalMessageInfo type Backup_Compressed_MetaVector struct { Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` - Meta string `protobuf:"bytes,2,opt,name=meta,proto3" json:"meta,omitempty"` Vector []byte `protobuf:"bytes,3,opt,name=vector,proto3" json:"vector,omitempty"` Ips []string `protobuf:"bytes,4,rep,name=ips,proto3" json:"ips,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -2097,7 +2465,7 @@ func (m *Backup_Compressed_MetaVector) Reset() { *m = Backup_Compressed_ func (m *Backup_Compressed_MetaVector) String() string { return proto.CompactTextString(m) } func (*Backup_Compressed_MetaVector) ProtoMessage() {} func (*Backup_Compressed_MetaVector) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 6, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 6, 0} } func (m *Backup_Compressed_MetaVector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2133,13 +2501,6 @@ func (m *Backup_Compressed_MetaVector) GetUuid() string { return "" } -func (m *Backup_Compressed_MetaVector) GetMeta() string { - if m != nil { - return m.Meta - } - return "" -} - func (m *Backup_Compressed_MetaVector) GetVector() []byte { if m != nil { return m.Vector @@ -2165,7 +2526,7 @@ func (m *Backup_Compressed_MetaVectors) Reset() { *m = Backup_Compressed func (m *Backup_Compressed_MetaVectors) String() string { return proto.CompactTextString(m) } func (*Backup_Compressed_MetaVectors) ProtoMessage() {} func (*Backup_Compressed_MetaVectors) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{6, 6, 1} + return fileDescriptor_34c50f4952bdcbdd, []int{6, 6, 1} } func (m *Backup_Compressed_MetaVectors) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2211,7 +2572,7 @@ func (m *Info) Reset() { *m = Info{} } func (m *Info) String() string { return proto.CompactTextString(m) } func (*Info) ProtoMessage() {} func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{7} + return fileDescriptor_34c50f4952bdcbdd, []int{7} } func (m *Info) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2250,7 +2611,7 @@ func (m *Info_Index) Reset() { *m = Info_Index{} } func (m *Info_Index) String() string { return proto.CompactTextString(m) } func (*Info_Index) ProtoMessage() {} func (*Info_Index) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{7, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{7, 0} } func (m *Info_Index) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2653,7 @@ func (m *Info_Index_Count) Reset() { *m = Info_Index_Count{} } func (m *Info_Index_Count) String() string { return proto.CompactTextString(m) } func (*Info_Index_Count) ProtoMessage() {} func (*Info_Index_Count) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{7, 0, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{7, 0, 0} } func (m *Info_Index_Count) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2352,7 +2713,7 @@ func (m *Info_Index_UUID) Reset() { *m = Info_Index_UUID{} } func (m *Info_Index_UUID) String() string { return proto.CompactTextString(m) } func (*Info_Index_UUID) ProtoMessage() {} func (*Info_Index_UUID) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{7, 0, 1} + return fileDescriptor_34c50f4952bdcbdd, []int{7, 0, 1} } func (m *Info_Index_UUID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2392,7 +2753,7 @@ func (m *Info_Index_UUID_Committed) Reset() { *m = Info_Index_UUID_Commi func (m *Info_Index_UUID_Committed) String() string { return proto.CompactTextString(m) } func (*Info_Index_UUID_Committed) ProtoMessage() {} func (*Info_Index_UUID_Committed) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{7, 0, 1, 0} + return fileDescriptor_34c50f4952bdcbdd, []int{7, 0, 1, 0} } func (m *Info_Index_UUID_Committed) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2439,7 +2800,7 @@ func (m *Info_Index_UUID_Uncommitted) Reset() { *m = Info_Index_UUID_Unc func (m *Info_Index_UUID_Uncommitted) String() string { return proto.CompactTextString(m) } func (*Info_Index_UUID_Uncommitted) ProtoMessage() {} func (*Info_Index_UUID_Uncommitted) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{7, 0, 1, 1} + return fileDescriptor_34c50f4952bdcbdd, []int{7, 0, 1, 1} } func (m *Info_Index_UUID_Uncommitted) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2492,7 +2853,7 @@ func (m *Info_Pod) Reset() { *m = Info_Pod{} } func (m *Info_Pod) String() string { return proto.CompactTextString(m) } func (*Info_Pod) ProtoMessage() {} func (*Info_Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{7, 1} + return fileDescriptor_34c50f4952bdcbdd, []int{7, 1} } func (m *Info_Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2586,7 +2947,7 @@ func (m *Info_Node) Reset() { *m = Info_Node{} } func (m *Info_Node) String() string { return proto.CompactTextString(m) } func (*Info_Node) ProtoMessage() {} func (*Info_Node) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{7, 2} + return fileDescriptor_34c50f4952bdcbdd, []int{7, 2} } func (m *Info_Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2670,7 +3031,7 @@ func (m *Info_CPU) Reset() { *m = Info_CPU{} } func (m *Info_CPU) String() string { return proto.CompactTextString(m) } func (*Info_CPU) ProtoMessage() {} func (*Info_CPU) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{7, 3} + return fileDescriptor_34c50f4952bdcbdd, []int{7, 3} } func (m *Info_CPU) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2733,7 +3094,7 @@ func (m *Info_Memory) Reset() { *m = Info_Memory{} } func (m *Info_Memory) String() string { return proto.CompactTextString(m) } func (*Info_Memory) ProtoMessage() {} func (*Info_Memory) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{7, 4} + return fileDescriptor_34c50f4952bdcbdd, []int{7, 4} } func (m *Info_Memory) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2794,7 +3155,7 @@ func (m *Info_Pods) Reset() { *m = Info_Pods{} } func (m *Info_Pods) String() string { return proto.CompactTextString(m) } func (*Info_Pods) ProtoMessage() {} func (*Info_Pods) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{7, 5} + return fileDescriptor_34c50f4952bdcbdd, []int{7, 5} } func (m *Info_Pods) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2841,7 +3202,7 @@ func (m *Info_Nodes) Reset() { *m = Info_Nodes{} } func (m *Info_Nodes) String() string { return proto.CompactTextString(m) } func (*Info_Nodes) ProtoMessage() {} func (*Info_Nodes) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{7, 6} + return fileDescriptor_34c50f4952bdcbdd, []int{7, 6} } func (m *Info_Nodes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2888,7 +3249,7 @@ func (m *Info_IPs) Reset() { *m = Info_IPs{} } func (m *Info_IPs) String() string { return proto.CompactTextString(m) } func (*Info_IPs) ProtoMessage() {} func (*Info_IPs) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{7, 7} + return fileDescriptor_34c50f4952bdcbdd, []int{7, 7} } func (m *Info_IPs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2934,7 +3295,7 @@ func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} func (*Empty) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{8} + return fileDescriptor_34c50f4952bdcbdd, []int{8} } func (m *Empty) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2966,9 +3327,13 @@ var xxx_messageInfo_Empty proto.InternalMessageInfo func init() { proto.RegisterType((*Search)(nil), "payload.Search") proto.RegisterType((*Search_Request)(nil), "payload.Search.Request") + proto.RegisterType((*Search_MultiRequest)(nil), "payload.Search.MultiRequest") proto.RegisterType((*Search_IDRequest)(nil), "payload.Search.IDRequest") + proto.RegisterType((*Search_MultiIDRequest)(nil), "payload.Search.MultiIDRequest") + proto.RegisterType((*Search_ObjectRequest)(nil), "payload.Search.ObjectRequest") proto.RegisterType((*Search_Config)(nil), "payload.Search.Config") proto.RegisterType((*Search_Response)(nil), "payload.Search.Response") + proto.RegisterType((*Search_Responses)(nil), "payload.Search.Responses") proto.RegisterType((*Meta)(nil), "payload.Meta") proto.RegisterType((*Meta_Key)(nil), "payload.Meta.Key") proto.RegisterType((*Meta_Keys)(nil), "payload.Meta.Keys") @@ -2982,6 +3347,9 @@ func init() { proto.RegisterType((*Object_IDs)(nil), "payload.Object.IDs") proto.RegisterType((*Object_Vector)(nil), "payload.Object.Vector") proto.RegisterType((*Object_Vectors)(nil), "payload.Object.Vectors") + proto.RegisterType((*Object_Blob)(nil), "payload.Object.Blob") + proto.RegisterType((*Object_Location)(nil), "payload.Object.Location") + proto.RegisterType((*Object_Locations)(nil), "payload.Object.Locations") proto.RegisterType((*Control)(nil), "payload.Control") proto.RegisterType((*Control_CreateIndexRequest)(nil), "payload.Control.CreateIndexRequest") proto.RegisterType((*Replication)(nil), "payload.Replication") @@ -3025,93 +3393,104 @@ func init() { proto.RegisterType((*Empty)(nil), "payload.Empty") } -func init() { proto.RegisterFile("payload.proto", fileDescriptor_678c914f1bee6d56) } - -var fileDescriptor_678c914f1bee6d56 = []byte{ - // 1327 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xdd, 0x6e, 0x1b, 0xc5, - 0x17, 0xff, 0xef, 0x87, 0xd7, 0xf6, 0x49, 0xdc, 0x7f, 0x32, 0x2a, 0xa9, 0x3b, 0x40, 0x30, 0x2e, - 0x2d, 0x56, 0xa9, 0x6c, 0x9a, 0x22, 0x90, 0xa8, 0x04, 0xaa, 0x9d, 0x0a, 0x99, 0xd0, 0xd4, 0x9a, - 0xca, 0x11, 0xe2, 0x2b, 0x4c, 0xbc, 0x53, 0x67, 0xc9, 0x7a, 0x67, 0xd9, 0x9d, 0x75, 0xeb, 0xde, - 0xf3, 0x02, 0xdc, 0xf1, 0x16, 0x88, 0x6b, 0x1e, 0x80, 0x4b, 0x78, 0x02, 0x50, 0x91, 0xb8, 0xe2, - 0x09, 0x7a, 0x85, 0xe6, 0x63, 0xed, 0xb5, 0x93, 0xa2, 0x80, 0xb8, 0xca, 0x9c, 0x33, 0xbf, 0x73, - 0xe6, 0xfc, 0xce, 0x97, 0x37, 0x50, 0x8b, 0xe9, 0x2c, 0xe4, 0xd4, 0x6f, 0xc7, 0x09, 0x17, 0x1c, - 0x95, 0x8d, 0x88, 0x2f, 0x4d, 0x69, 0x18, 0xf8, 0x54, 0xb0, 0x4e, 0x7e, 0xd0, 0x88, 0xe6, 0xef, - 0x36, 0x78, 0x0f, 0x18, 0x4d, 0x46, 0xc7, 0xf8, 0x53, 0x28, 0x13, 0xf6, 0x75, 0xc6, 0x52, 0x81, - 0x1a, 0xe0, 0x4d, 0xd9, 0x48, 0xf0, 0xa4, 0x6e, 0x35, 0x9c, 0x96, 0xdd, 0xad, 0x3c, 0xeb, 0x96, - 0xbe, 0xb5, 0xec, 0x8a, 0x4d, 0x8c, 0x1e, 0xb5, 0xc1, 0x1b, 0xf1, 0xe8, 0x61, 0x30, 0xae, 0xdb, - 0x0d, 0xab, 0xb5, 0xb6, 0xb3, 0xd5, 0xce, 0x5f, 0xd6, 0xde, 0xda, 0x3d, 0x75, 0x4b, 0x0c, 0x0a, - 0xef, 0x41, 0xb5, 0xbf, 0x9b, 0xbb, 0xbf, 0x00, 0x76, 0xe0, 0xd7, 0xad, 0x86, 0xd5, 0xaa, 0x12, - 0x3b, 0xf0, 0xff, 0xb1, 0x33, 0x0e, 0x9e, 0xd6, 0xa0, 0xcb, 0xe0, 0x44, 0xd9, 0x44, 0xb9, 0xaa, - 0x75, 0xcb, 0xcf, 0xba, 0xee, 0x75, 0xbb, 0x65, 0x11, 0xa9, 0x43, 0x5b, 0xe0, 0x25, 0xd4, 0x0f, - 0xb2, 0x54, 0x39, 0xb5, 0x89, 0x91, 0x50, 0x1d, 0xca, 0x2c, 0x4e, 0x83, 0x90, 0x47, 0x75, 0x47, - 0x5d, 0xe4, 0xa2, 0xbc, 0x11, 0xc1, 0x84, 0xf1, 0x4c, 0xd4, 0xdd, 0x86, 0xd5, 0x72, 0x48, 0x2e, - 0xe2, 0xf7, 0xa0, 0x42, 0x58, 0x1a, 0xf3, 0x28, 0x65, 0x68, 0x07, 0xca, 0x09, 0x4b, 0xb3, 0x50, - 0xa4, 0x2a, 0x39, 0x6b, 0x3b, 0xf5, 0x79, 0xb4, 0xf7, 0x8f, 0xbe, 0x62, 0x23, 0xd1, 0xde, 0x0d, - 0x52, 0x41, 0xa3, 0x11, 0x23, 0x39, 0xb0, 0xf9, 0x8b, 0x05, 0xee, 0x3d, 0x26, 0x28, 0xbe, 0x04, - 0xce, 0x1e, 0x9b, 0xa1, 0x0d, 0x70, 0x4e, 0xd8, 0xcc, 0x64, 0x40, 0x1e, 0x31, 0x06, 0x77, 0x8f, - 0xcd, 0x52, 0x84, 0xc0, 0x3d, 0x61, 0x33, 0xed, 0xba, 0x4a, 0xd4, 0x59, 0x1a, 0x1d, 0xd0, 0x50, - 0x1a, 0x4d, 0x69, 0x98, 0x1b, 0x4d, 0x69, 0x28, 0x8d, 0x0e, 0x68, 0xa8, 0x8c, 0xa6, 0x34, 0x9c, - 0x1b, 0xc9, 0x33, 0xbe, 0x01, 0xde, 0x1e, 0x9b, 0x19, 0xbb, 0xe5, 0xc7, 0x72, 0x4f, 0xf6, 0xc2, - 0xd3, 0x4d, 0x28, 0x6b, 0x74, 0x8a, 0xae, 0x81, 0x73, 0x32, 0xcd, 0xb9, 0x5d, 0x9c, 0x73, 0x93, - 0xe1, 0xb7, 0x35, 0x86, 0x48, 0x40, 0xf3, 0x99, 0x05, 0x9e, 0x26, 0x8c, 0xdf, 0x86, 0x4a, 0xce, - 0xf9, 0x54, 0x6d, 0x31, 0x54, 0x7c, 0x73, 0x67, 0x0a, 0x31, 0x97, 0xf1, 0xcb, 0x60, 0xf7, 0x77, - 0xd1, 0xa5, 0x85, 0x85, 0x2a, 0x61, 0x62, 0x6f, 0x58, 0xd2, 0x54, 0xf2, 0xee, 0xef, 0xa6, 0x32, - 0xda, 0xc0, 0xcf, 0xc9, 0xc9, 0x23, 0xee, 0x81, 0x77, 0xa0, 0xdb, 0xf0, 0x79, 0xb6, 0x85, 0x0e, - 0xb6, 0xcf, 0xee, 0x60, 0x7c, 0x1b, 0xca, 0xda, 0x49, 0x8a, 0xde, 0x84, 0xb2, 0x56, 0xe6, 0xb4, - 0xb7, 0x56, 0x4b, 0xaa, 0x91, 0x24, 0x87, 0x35, 0xef, 0x42, 0xb9, 0xc7, 0x23, 0x91, 0xf0, 0x10, - 0xbf, 0x0b, 0xa8, 0x97, 0x30, 0x2a, 0x58, 0x3f, 0xf2, 0xd9, 0xe3, 0xbc, 0xc5, 0x5f, 0x83, 0x6a, - 0xcc, 0x79, 0x78, 0x98, 0x06, 0x4f, 0xd8, 0x72, 0x7b, 0xfe, 0x8f, 0x54, 0xe4, 0xcd, 0x83, 0xe0, - 0x09, 0x6b, 0x7e, 0x67, 0xc3, 0x1a, 0x61, 0x71, 0x18, 0x8c, 0xa8, 0x08, 0x78, 0x84, 0x6f, 0xca, - 0x3e, 0x1b, 0xf1, 0x29, 0x4b, 0x66, 0xe8, 0x2a, 0x5c, 0xf0, 0x59, 0xc8, 0x04, 0xf3, 0x0f, 0xe9, - 0x98, 0x45, 0x22, 0xcf, 0x40, 0xcd, 0x68, 0xef, 0x28, 0x25, 0xa6, 0x50, 0x25, 0xec, 0x88, 0x86, - 0x2a, 0xf9, 0xd7, 0x61, 0xf3, 0x38, 0x18, 0x1f, 0x1f, 0x66, 0x29, 0x1d, 0xb3, 0x65, 0xb3, 0xff, - 0xcb, 0x8b, 0xa1, 0xd4, 0x6b, 0x43, 0xd4, 0x82, 0x8d, 0x90, 0x3f, 0x5a, 0x86, 0xda, 0x0a, 0x7a, - 0x21, 0xe4, 0x8f, 0x0a, 0x48, 0x2c, 0xc0, 0x33, 0x36, 0x5b, 0xe0, 0x2d, 0x39, 0x35, 0x92, 0x8c, - 0x35, 0x61, 0x13, 0x3e, 0x5d, 0xc4, 0xaa, 0x3d, 0xd5, 0x8c, 0xd6, 0x98, 0xbf, 0x01, 0x9b, 0x49, - 0xce, 0x36, 0x1a, 0x6b, 0x68, 0xdd, 0x51, 0xc8, 0x8d, 0xc2, 0x85, 0x42, 0x37, 0x1f, 0x02, 0xec, - 0x06, 0xa9, 0x4a, 0x06, 0x4b, 0xf0, 0xc7, 0x8b, 0xe5, 0xf4, 0x22, 0xb8, 0x11, 0x9d, 0xb0, 0xd5, - 0xaa, 0x2b, 0x25, 0x7a, 0x09, 0xaa, 0xf2, 0x6f, 0x1a, 0x53, 0xd3, 0x6f, 0x55, 0xb2, 0x50, 0xc8, - 0x41, 0x89, 0xb8, 0xcf, 0xd4, 0xe0, 0x57, 0x89, 0x3a, 0x37, 0xff, 0x2c, 0x81, 0xd7, 0xa5, 0xa3, - 0x93, 0x2c, 0xc6, 0x43, 0xa8, 0x7e, 0xc0, 0x84, 0xae, 0x35, 0xbe, 0xb6, 0xf4, 0x62, 0x96, 0x9d, - 0xee, 0x33, 0xa5, 0xc4, 0x0d, 0x28, 0xdd, 0x7f, 0x14, 0x31, 0xdd, 0x8b, 0xf1, 0xe9, 0x5e, 0x8c, - 0xf1, 0x2d, 0xa8, 0x7e, 0xc4, 0x75, 0x85, 0xd3, 0x73, 0xbb, 0xfd, 0x12, 0x3c, 0xa2, 0x92, 0x77, - 0x6e, 0x8b, 0x36, 0xac, 0x1b, 0xdc, 0xbd, 0x2c, 0x14, 0x01, 0xda, 0x86, 0x92, 0xd4, 0x9b, 0x5a, - 0xcd, 0x27, 0xc0, 0x22, 0x5a, 0x8d, 0xbf, 0xb1, 0xc0, 0xee, 0x0f, 0xf0, 0xbe, 0xec, 0xb9, 0x71, - 0x90, 0x0a, 0x96, 0xe0, 0xee, 0xf9, 0x9e, 0x42, 0x18, 0x9c, 0x20, 0x36, 0x45, 0x2e, 0x38, 0x96, - 0x4a, 0xdc, 0x99, 0x07, 0x7e, 0x75, 0xe1, 0xcd, 0x18, 0x58, 0x67, 0x19, 0x1c, 0x03, 0xc8, 0xe5, - 0x62, 0x26, 0x1a, 0x15, 0xdf, 0x35, 0xcf, 0x21, 0x70, 0x27, 0x4c, 0x50, 0x53, 0x4f, 0x75, 0x2e, - 0x0c, 0xb8, 0xf3, 0x9c, 0x9f, 0xa8, 0x0d, 0xfd, 0xa6, 0x6b, 0xf6, 0x46, 0x2c, 0xf7, 0xc6, 0xda, - 0xe2, 0xa5, 0x14, 0xbd, 0xb5, 0x3a, 0xf6, 0x78, 0x3e, 0xf6, 0xba, 0x21, 0xda, 0x0b, 0xf4, 0x7c, - 0xf4, 0xf1, 0x8f, 0x16, 0x40, 0x8f, 0x4f, 0xe2, 0x84, 0xa5, 0x29, 0xf3, 0xf1, 0x17, 0xff, 0x2a, - 0xfa, 0xad, 0x42, 0xf4, 0x56, 0x6b, 0xfd, 0x6f, 0x62, 0xde, 0x5f, 0x8e, 0xf9, 0xfd, 0xd5, 0x98, - 0xaf, 0xae, 0xc6, 0xbc, 0x88, 0xed, 0xac, 0xf0, 0x9b, 0xdf, 0x97, 0xc1, 0xed, 0x47, 0x0f, 0x39, - 0xfe, 0xc1, 0x82, 0x92, 0x5a, 0x59, 0xf8, 0x73, 0x28, 0xf5, 0x78, 0x16, 0x09, 0x19, 0x55, 0x2a, - 0x78, 0xc2, 0x74, 0xfc, 0x35, 0x62, 0x24, 0xd4, 0x80, 0xb5, 0x2c, 0x1a, 0xf1, 0xc9, 0x24, 0x10, - 0x82, 0xf9, 0x8a, 0x48, 0x8d, 0x14, 0x55, 0x72, 0xcb, 0x07, 0xd2, 0x57, 0x10, 0x8d, 0x15, 0xa3, - 0x0a, 0x99, 0xcb, 0xf8, 0x43, 0x70, 0x87, 0xc3, 0xfe, 0x2e, 0x7e, 0x05, 0xaa, 0xbd, 0xb9, 0xc1, - 0x19, 0x89, 0xc2, 0xaf, 0xc2, 0xda, 0xb0, 0xe0, 0xf3, 0x2c, 0xc8, 0x1f, 0x16, 0x38, 0x03, 0xee, - 0xa3, 0xcb, 0x50, 0xa1, 0x71, 0x7c, 0xb8, 0xd8, 0x03, 0xa4, 0x4c, 0xe3, 0x78, 0x5f, 0x6e, 0x00, - 0x64, 0xd6, 0x83, 0x49, 0xf7, 0xe9, 0xad, 0xe0, 0xac, 0x6e, 0x05, 0x3d, 0xb8, 0x6e, 0xa1, 0xd1, - 0x1f, 0xab, 0xc1, 0x45, 0x57, 0xc0, 0x19, 0xc5, 0x59, 0xbd, 0xa4, 0x3e, 0x4a, 0x36, 0xe7, 0x89, - 0x96, 0xe9, 0x6b, 0xf7, 0x06, 0x43, 0x22, 0x6f, 0xd1, 0x0d, 0xf0, 0x26, 0x6c, 0xc2, 0x93, 0x59, - 0xdd, 0x53, 0xb8, 0x8b, 0xcb, 0xb8, 0x7b, 0xea, 0x8e, 0x18, 0x0c, 0xba, 0x66, 0x36, 0x50, 0x59, - 0x61, 0xd1, 0x32, 0x76, 0x9f, 0xfb, 0x4c, 0x6f, 0x25, 0xfc, 0xab, 0x05, 0xae, 0x14, 0xe7, 0x74, - 0xac, 0x02, 0x9d, 0x2b, 0x50, 0x0b, 0x22, 0xc1, 0x92, 0x88, 0x86, 0x87, 0xd4, 0xf7, 0x13, 0xc3, - 0x75, 0x3d, 0x57, 0xde, 0xf1, 0xfd, 0x44, 0x82, 0xd8, 0xe3, 0x22, 0x48, 0xf3, 0x5e, 0xcf, 0x95, - 0x06, 0xa4, 0x18, 0xba, 0xe7, 0x64, 0x58, 0x3a, 0x1f, 0xc3, 0x01, 0xf7, 0x53, 0x93, 0x8d, 0x15, - 0x86, 0xf2, 0x86, 0xa8, 0x7b, 0xbc, 0x07, 0x4e, 0x6f, 0x30, 0x44, 0x17, 0xa1, 0x14, 0x06, 0x93, - 0x40, 0x28, 0x82, 0x16, 0xd1, 0x82, 0xfc, 0x14, 0x4b, 0xf4, 0xea, 0x50, 0xdc, 0x2c, 0x92, 0x8b, - 0x12, 0xaf, 0x7e, 0xb2, 0x14, 0x1d, 0xb9, 0xcb, 0xa4, 0x80, 0xf7, 0xc1, 0xd3, 0x61, 0xfc, 0x47, - 0xfe, 0xde, 0xd1, 0x24, 0x50, 0x07, 0xdc, 0x98, 0xfb, 0xf9, 0xac, 0x6d, 0x9e, 0x22, 0x53, 0xd8, - 0x65, 0x0a, 0x88, 0x6f, 0x43, 0x49, 0x96, 0x2d, 0x45, 0x3b, 0x50, 0x92, 0x85, 0xcc, 0x4d, 0xcf, - 0xa8, 0x74, 0x71, 0x23, 0x2b, 0x28, 0x7e, 0x01, 0x9c, 0xfe, 0x20, 0x55, 0x9f, 0x50, 0xb1, 0xf9, - 0x85, 0xb5, 0x83, 0xb8, 0x59, 0x86, 0xd2, 0xdd, 0x49, 0x2c, 0x66, 0xdd, 0xcf, 0x7e, 0x7a, 0xba, - 0x6d, 0xfd, 0xfc, 0x74, 0xdb, 0xfa, 0xed, 0xe9, 0xb6, 0x05, 0x5b, 0x3c, 0x19, 0xb7, 0xa7, 0x3e, - 0xa5, 0x69, 0x7b, 0x4a, 0x43, 0x3f, 0x7f, 0xa0, 0xbb, 0x76, 0x40, 0x43, 0x7f, 0xa0, 0x85, 0x81, - 0xf5, 0xc9, 0xeb, 0xe3, 0x40, 0x1c, 0x67, 0x47, 0xed, 0x11, 0x9f, 0x74, 0x14, 0x5a, 0xfe, 0x0b, - 0xe0, 0x77, 0x68, 0x1c, 0xa4, 0x9d, 0x71, 0x12, 0x8f, 0x3a, 0xc6, 0xee, 0xc8, 0x53, 0xff, 0x11, - 0xdc, 0xfa, 0x2b, 0x00, 0x00, 0xff, 0xff, 0xa2, 0x9a, 0x9b, 0x8a, 0x44, 0x0c, 0x00, 0x00, +func init() { proto.RegisterFile("apis/proto/payload/payload.proto", fileDescriptor_34c50f4952bdcbdd) } + +var fileDescriptor_34c50f4952bdcbdd = []byte{ + // 1497 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4f, 0x73, 0x1b, 0xc5, + 0x12, 0x7f, 0xbb, 0x92, 0x56, 0x52, 0xcb, 0xf6, 0xb3, 0xb7, 0xf2, 0x1c, 0x65, 0xde, 0x8b, 0x9f, + 0x50, 0x48, 0x70, 0x85, 0x44, 0x26, 0x0e, 0x24, 0x55, 0xe4, 0x40, 0x45, 0x72, 0x2a, 0xa5, 0x98, + 0x38, 0xaa, 0x09, 0x36, 0x14, 0x90, 0x32, 0x63, 0xed, 0x44, 0x5e, 0xbc, 0xda, 0x59, 0x76, 0x56, + 0x8a, 0x95, 0x3b, 0x57, 0x0e, 0xdc, 0xf8, 0x06, 0x1c, 0x29, 0x3e, 0x00, 0x27, 0x0e, 0x9c, 0x28, + 0xf8, 0x04, 0x50, 0x39, 0xf0, 0x21, 0x72, 0xa2, 0xe6, 0xdf, 0xee, 0x4a, 0x96, 0x2b, 0x4e, 0x15, + 0x27, 0x4d, 0xf7, 0xfc, 0xba, 0xe7, 0xd7, 0x3d, 0xdd, 0xbd, 0x23, 0x68, 0x90, 0xc8, 0xe7, 0x1b, + 0x51, 0xcc, 0x12, 0xb6, 0x11, 0x91, 0x49, 0xc0, 0x88, 0x67, 0x7e, 0x5b, 0x52, 0xeb, 0x96, 0xb5, + 0x88, 0xee, 0x0e, 0xfc, 0xe4, 0x70, 0x74, 0xd0, 0xea, 0xb3, 0xe1, 0x06, 0x0d, 0xc7, 0x6c, 0x12, + 0xc5, 0xec, 0x78, 0xa2, 0x6c, 0xfb, 0xd7, 0x07, 0x34, 0xbc, 0x3e, 0x26, 0x81, 0xef, 0x91, 0x84, + 0x6e, 0x9c, 0x58, 0x28, 0x5f, 0xcd, 0x5f, 0x4b, 0xe0, 0x3c, 0xa6, 0x24, 0xee, 0x1f, 0xa2, 0xcf, + 0xa0, 0x8c, 0xe9, 0x57, 0x23, 0xca, 0x13, 0xb7, 0x01, 0xce, 0x98, 0xf6, 0x13, 0x16, 0xd7, 0xad, + 0x46, 0x61, 0xdd, 0x6e, 0x57, 0x5e, 0xb6, 0x4b, 0xdf, 0x5a, 0x76, 0xc5, 0xc6, 0x5a, 0xef, 0xb6, + 0xc0, 0xe9, 0xb3, 0xf0, 0xa9, 0x3f, 0xa8, 0xdb, 0x0d, 0x6b, 0xbd, 0xb6, 0xb9, 0xda, 0x32, 0x1c, + 0x95, 0xb7, 0x56, 0x47, 0xee, 0x62, 0x8d, 0x42, 0x1d, 0x58, 0x78, 0x38, 0x0a, 0x12, 0xdf, 0x9c, + 0x70, 0x13, 0x2a, 0xb1, 0x5a, 0x72, 0x79, 0x46, 0x6d, 0xf3, 0xfc, 0xac, 0x07, 0x0d, 0xc5, 0x29, + 0x10, 0x6d, 0x43, 0xb5, 0xbb, 0x65, 0x3c, 0x2c, 0x81, 0xed, 0x7b, 0x75, 0xab, 0x61, 0xad, 0x57, + 0xb1, 0xed, 0x7b, 0xaf, 0xcd, 0xe8, 0x3e, 0x2c, 0x49, 0x46, 0x99, 0xc7, 0xf7, 0x4e, 0x70, 0xba, + 0x30, 0xeb, 0x23, 0x05, 0xe7, 0x58, 0x7d, 0x0c, 0x8b, 0x8f, 0x0e, 0xbe, 0xa4, 0xfd, 0xc4, 0xf8, + 0x59, 0x05, 0x87, 0x49, 0x85, 0x64, 0xb7, 0x80, 0xb5, 0xf4, 0xda, 0x0c, 0xbf, 0xb1, 0xc0, 0x51, + 0x2a, 0xf7, 0x22, 0x80, 0x3e, 0x6f, 0x3f, 0x0d, 0xba, 0xaa, 0x35, 0x5d, 0xcf, 0xbd, 0x00, 0x85, + 0x70, 0x34, 0x94, 0x6e, 0x17, 0xdb, 0xe5, 0x97, 0xed, 0xe2, 0x55, 0x7b, 0xdd, 0xc2, 0x42, 0x27, + 0xc8, 0xc4, 0xc4, 0xf3, 0x47, 0xbc, 0x5e, 0x68, 0x58, 0xeb, 0x36, 0xd6, 0x92, 0x5b, 0x87, 0x32, + 0x8d, 0xb8, 0x1f, 0xb0, 0xb0, 0x5e, 0x94, 0x1b, 0x46, 0x14, 0x3b, 0x89, 0x3f, 0xa4, 0x6c, 0x94, + 0xd4, 0x4b, 0x0d, 0x6b, 0xbd, 0x80, 0x8d, 0x88, 0x9e, 0x40, 0x05, 0x53, 0x1e, 0xb1, 0x90, 0xd3, + 0x57, 0x31, 0xda, 0x84, 0x72, 0x4c, 0xf9, 0x28, 0x48, 0x78, 0xdd, 0x96, 0xa9, 0xac, 0xa7, 0xc1, + 0xaa, 0x64, 0xb5, 0xb6, 0x7c, 0x9e, 0x90, 0xb0, 0x4f, 0xb1, 0x01, 0xa2, 0x0e, 0x54, 0x8d, 0x7b, + 0xee, 0xde, 0x82, 0x6a, 0x6c, 0x04, 0x7d, 0x1b, 0xf5, 0x93, 0x15, 0xa2, 0x00, 0x38, 0x83, 0x36, + 0x7f, 0xb7, 0xa0, 0xf8, 0x90, 0x26, 0x04, 0x9d, 0x87, 0xc2, 0x36, 0x9d, 0xb8, 0xcb, 0x50, 0x38, + 0xa2, 0x13, 0x4d, 0x50, 0x2c, 0x11, 0x82, 0xe2, 0x36, 0x9d, 0x70, 0xd7, 0x85, 0xe2, 0x11, 0x9d, + 0x28, 0xe7, 0x55, 0x2c, 0xd7, 0xc2, 0x68, 0x8f, 0x04, 0xc2, 0x68, 0x4c, 0x02, 0x63, 0x34, 0x26, + 0x81, 0x30, 0xda, 0x23, 0x81, 0x34, 0x1a, 0x93, 0x20, 0x35, 0x12, 0x6b, 0x74, 0x0d, 0x9c, 0x6d, + 0x3a, 0xd1, 0x76, 0xd3, 0x87, 0x19, 0x4f, 0x76, 0xe6, 0xe9, 0x06, 0x94, 0x15, 0x9a, 0xbb, 0x57, + 0xa0, 0x70, 0x34, 0x36, 0xd1, 0x9d, 0x4b, 0xa3, 0x13, 0xf4, 0x5b, 0x0a, 0x83, 0x05, 0xa0, 0xf9, + 0x53, 0x01, 0x1c, 0x95, 0x35, 0x74, 0x0b, 0x2a, 0x26, 0x71, 0x27, 0x3a, 0x00, 0x41, 0xc5, 0xd3, + 0x7b, 0xf2, 0x40, 0x1b, 0xa7, 0x32, 0xba, 0x08, 0x76, 0x77, 0xcb, 0x3d, 0x9f, 0x59, 0xc8, 0x32, + 0x89, 0xed, 0x65, 0x4b, 0x98, 0x8a, 0xb8, 0xbb, 0x5b, 0x5c, 0xb0, 0xf5, 0x3d, 0x13, 0x9c, 0x58, + 0xa2, 0x0e, 0x38, 0x7b, 0xaa, 0xe3, 0x4f, 0xb3, 0xcd, 0x0d, 0x0b, 0x7b, 0xfe, 0xb0, 0x40, 0x77, + 0xa0, 0xac, 0x9c, 0x70, 0xf7, 0x1d, 0x28, 0x2b, 0xa5, 0x09, 0x7b, 0x75, 0xb6, 0x2e, 0x14, 0x12, + 0x1b, 0x18, 0xba, 0x0d, 0xc5, 0x76, 0xc0, 0x0e, 0x4e, 0x3f, 0x3f, 0x6b, 0x37, 0x3b, 0xdf, 0x6e, + 0x68, 0x0b, 0x2a, 0x1f, 0xb2, 0x3e, 0x49, 0x7c, 0x16, 0x8a, 0x6b, 0x0b, 0xc9, 0x90, 0xea, 0x64, + 0xc9, 0xb5, 0xd0, 0x8d, 0x46, 0xbe, 0xa7, 0xef, 0x46, 0xae, 0x65, 0x02, 0x22, 0xd1, 0x2a, 0x2a, + 0x01, 0x91, 0x2c, 0x4a, 0xe3, 0x45, 0x16, 0x65, 0x60, 0x84, 0x13, 0x45, 0xa9, 0xf9, 0x1b, 0x34, + 0xce, 0xa0, 0xcd, 0x7b, 0x50, 0xee, 0xb0, 0x30, 0x89, 0x59, 0x80, 0xde, 0x07, 0xb7, 0x13, 0x53, + 0x92, 0xd0, 0x6e, 0xe8, 0xd1, 0x63, 0x33, 0x32, 0xde, 0x84, 0x6a, 0xc4, 0x58, 0xb0, 0xcf, 0xfd, + 0xe7, 0x8a, 0x64, 0xda, 0xc6, 0xff, 0xc2, 0x15, 0xb1, 0xf3, 0xd8, 0x7f, 0x4e, 0x9b, 0xdf, 0xd9, + 0x50, 0xc3, 0x34, 0x0a, 0x7c, 0xe5, 0x17, 0xdd, 0x10, 0xfd, 0xd8, 0x67, 0x63, 0x1a, 0x4f, 0xdc, + 0xcb, 0xb0, 0xe4, 0xd1, 0x80, 0x26, 0xd4, 0xdb, 0x27, 0x03, 0x1a, 0x26, 0xe6, 0x16, 0x17, 0xb5, + 0xf6, 0xae, 0x54, 0x22, 0x22, 0x7a, 0xec, 0x80, 0x04, 0xb2, 0x80, 0xae, 0xc2, 0xca, 0xa1, 0x3f, + 0x38, 0xdc, 0x1f, 0x71, 0x32, 0xa0, 0xd3, 0x66, 0xff, 0x16, 0x1b, 0xbb, 0x42, 0xaf, 0x0c, 0xdd, + 0x75, 0x58, 0x0e, 0xd8, 0xb3, 0x69, 0xa8, 0x2d, 0xa1, 0x4b, 0x01, 0x7b, 0x96, 0x43, 0xa2, 0x04, + 0x1c, 0x6d, 0xb3, 0x0a, 0xce, 0x94, 0x53, 0x2d, 0x09, 0xae, 0x31, 0x1d, 0xb2, 0x71, 0xc6, 0x55, + 0x79, 0x5a, 0xd4, 0x5a, 0x6d, 0xfe, 0x36, 0xac, 0xc4, 0x26, 0xda, 0x70, 0xa0, 0xa0, 0xfa, 0x6a, + 0x96, 0x73, 0x1b, 0x12, 0xdd, 0x7c, 0x0a, 0xb0, 0xe5, 0x73, 0x99, 0x0c, 0x1a, 0xa3, 0x4f, 0xb2, + 0x6f, 0xd9, 0x7f, 0xf3, 0x57, 0x9f, 0x55, 0x8e, 0xaa, 0x81, 0xff, 0x41, 0x55, 0xfc, 0xf2, 0x88, + 0xe8, 0x9e, 0xa9, 0xe2, 0x4c, 0x21, 0xab, 0x86, 0x79, 0x54, 0x4e, 0x4e, 0x51, 0x35, 0xcc, 0xa3, + 0xcd, 0x9f, 0x4b, 0xe0, 0xb4, 0x49, 0xff, 0x68, 0x14, 0xa1, 0x5d, 0xa8, 0xde, 0xa7, 0x89, 0xaa, + 0x57, 0x74, 0x65, 0xea, 0x44, 0x59, 0x58, 0xb3, 0x27, 0x0a, 0x25, 0x6a, 0x40, 0xe9, 0xd1, 0xb3, + 0x90, 0xaa, 0x7e, 0x8a, 0x4e, 0xd6, 0x73, 0x84, 0x6e, 0xe6, 0x2a, 0xee, 0xcc, 0x6e, 0xbf, 0x00, + 0x07, 0xcb, 0xe4, 0x9d, 0xd9, 0xa2, 0x05, 0x0b, 0x1a, 0x27, 0x3f, 0x83, 0xee, 0x1a, 0x94, 0x84, + 0x5e, 0xdf, 0x55, 0xda, 0xc5, 0x16, 0x56, 0x6a, 0xf4, 0xb5, 0x05, 0x76, 0xb7, 0x87, 0x76, 0x44, + 0xcd, 0x0d, 0x7c, 0x9e, 0xd0, 0x18, 0xb5, 0xcf, 0x76, 0x94, 0x8b, 0x54, 0x57, 0xd9, 0x33, 0x8e, + 0x65, 0x7f, 0x6d, 0xa4, 0xc4, 0x2f, 0x67, 0xde, 0xb4, 0x81, 0x35, 0xcf, 0xe0, 0x23, 0x00, 0x31, + 0x20, 0xf5, 0x54, 0x72, 0xf3, 0xe7, 0xea, 0xe3, 0xb2, 0x81, 0x54, 0x38, 0xe5, 0xf5, 0xa2, 0xdb, + 0xbc, 0x98, 0x6f, 0xf3, 0x5a, 0xe6, 0x95, 0xbb, 0xef, 0xce, 0x8e, 0x29, 0x94, 0xb6, 0xb9, 0xba, + 0xfc, 0x56, 0x86, 0xce, 0x46, 0xd5, 0xf7, 0x16, 0x40, 0x87, 0x0d, 0xa3, 0x98, 0x72, 0x4e, 0x3d, + 0xf4, 0xe0, 0x95, 0x4c, 0x57, 0x73, 0x4c, 0xe5, 0xe8, 0x3a, 0x95, 0xdf, 0xce, 0x34, 0xbf, 0x0f, + 0x66, 0xf9, 0x5d, 0x9e, 0xe5, 0x97, 0xf1, 0x98, 0x47, 0xb5, 0xf9, 0x43, 0x19, 0x8a, 0xdd, 0xf0, + 0x29, 0x43, 0x3f, 0x5a, 0x50, 0x92, 0xa3, 0x08, 0x3d, 0x81, 0x52, 0x87, 0x8d, 0x42, 0xf9, 0x7e, + 0xe1, 0x09, 0x8b, 0xa9, 0xe2, 0xba, 0x88, 0xb5, 0xe4, 0x36, 0xa0, 0x36, 0x0a, 0xfb, 0x6c, 0x38, + 0xf4, 0x93, 0x84, 0xaa, 0xb9, 0xb9, 0x88, 0xf3, 0x2a, 0xf1, 0x05, 0xf2, 0x85, 0x2f, 0x3f, 0x1c, + 0xc8, 0x88, 0x2a, 0x38, 0x95, 0xd1, 0x03, 0x28, 0xee, 0xee, 0x76, 0xb7, 0xd0, 0xff, 0xa1, 0xda, + 0x49, 0x0d, 0xe6, 0x24, 0x05, 0xbd, 0x01, 0xb5, 0xdd, 0x9c, 0xcf, 0x79, 0x90, 0xbf, 0x2c, 0x28, + 0xf4, 0x98, 0x78, 0xf7, 0x54, 0x48, 0x14, 0xed, 0xe7, 0x46, 0x7b, 0x99, 0x44, 0xd1, 0x8e, 0x9e, + 0xee, 0x52, 0x6d, 0xe7, 0x26, 0xfe, 0x54, 0xb7, 0x17, 0x66, 0xbb, 0x5d, 0x35, 0x64, 0x31, 0x57, + 0xc0, 0xc7, 0xb2, 0x21, 0xdd, 0x4b, 0x50, 0xe8, 0x47, 0x23, 0xf9, 0x18, 0xaa, 0x6d, 0xae, 0xa4, + 0x89, 0x16, 0xe9, 0x6b, 0x75, 0x7a, 0xbb, 0x58, 0xec, 0xba, 0xd7, 0xc0, 0x19, 0xd2, 0x21, 0x8b, + 0x27, 0x75, 0x47, 0xe2, 0xce, 0x4d, 0xe3, 0x1e, 0xca, 0x3d, 0xac, 0x31, 0xee, 0x15, 0x3d, 0x59, + 0xca, 0x12, 0xeb, 0x4e, 0x63, 0x77, 0x98, 0x47, 0xd5, 0xb4, 0x41, 0x7f, 0x58, 0x50, 0x14, 0xe2, + 0xdc, 0x0f, 0xd8, 0x25, 0x58, 0xf4, 0xc3, 0x84, 0xc6, 0x21, 0x09, 0xf6, 0x89, 0xe7, 0xc5, 0x3a, + 0xd6, 0x05, 0xa3, 0xbc, 0xeb, 0x79, 0xb1, 0x00, 0xd1, 0xe3, 0x3c, 0x48, 0xc5, 0xbd, 0x60, 0x94, + 0x1a, 0x24, 0x23, 0x2c, 0x9e, 0x31, 0xc2, 0xd2, 0xd9, 0x22, 0xec, 0x31, 0x8f, 0xeb, 0x6c, 0xcc, + 0x44, 0x28, 0x76, 0xb0, 0xdc, 0x47, 0xdb, 0x50, 0xe8, 0xf4, 0x76, 0xdd, 0x73, 0x50, 0x0a, 0xfc, + 0xa1, 0xaf, 0x9e, 0xcc, 0x16, 0x56, 0x82, 0x78, 0x8a, 0xea, 0x27, 0xa5, 0x8c, 0xcd, 0xc2, 0x46, + 0x14, 0x78, 0xf9, 0x29, 0x92, 0xe1, 0x88, 0x19, 0x25, 0x04, 0xb4, 0x03, 0x8e, 0xa2, 0xf1, 0x0f, + 0xf9, 0xbb, 0xad, 0x82, 0x70, 0x37, 0xa0, 0x18, 0x31, 0xcf, 0xf4, 0xda, 0xca, 0x89, 0x60, 0x72, + 0x33, 0x4a, 0x02, 0xd1, 0x1d, 0x28, 0x89, 0x6b, 0xe3, 0xee, 0x26, 0x94, 0xc4, 0x45, 0x1a, 0xd3, + 0x39, 0x37, 0x9d, 0x9f, 0xb4, 0x12, 0x8a, 0xfe, 0x03, 0x85, 0x6e, 0x8f, 0xcb, 0xe7, 0x5d, 0xa4, + 0xbf, 0x9c, 0xb6, 0x1f, 0x35, 0xcb, 0x50, 0xba, 0x37, 0x8c, 0x92, 0x49, 0xfb, 0xf3, 0x5f, 0x5e, + 0xac, 0x59, 0xbf, 0xbd, 0x58, 0xb3, 0xfe, 0x7c, 0xb1, 0x66, 0xc1, 0x2a, 0x8b, 0x07, 0xad, 0xb1, + 0x47, 0x08, 0x6f, 0x8d, 0x49, 0xe0, 0x99, 0x03, 0xda, 0xb5, 0x3d, 0x12, 0x78, 0x3d, 0x25, 0xf4, + 0xac, 0x4f, 0xdf, 0xca, 0xfd, 0x57, 0x94, 0x68, 0xf1, 0x4f, 0xd0, 0xdb, 0x90, 0x7f, 0x36, 0x07, + 0x71, 0xd4, 0x37, 0xff, 0x31, 0x0f, 0x1c, 0xf9, 0xc7, 0xf0, 0xe6, 0xdf, 0x01, 0x00, 0x00, 0xff, + 0xff, 0xf4, 0xd6, 0xf1, 0x02, 0x88, 0x0e, 0x00, 0x00, } func (m *Search) Marshal() (dAtA []byte, err error) { @@ -3190,7 +3569,7 @@ func (m *Search_Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Search_IDRequest) Marshal() (dAtA []byte, err error) { +func (m *Search_MultiRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3200,12 +3579,12 @@ func (m *Search_IDRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Search_IDRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *Search_MultiRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Search_IDRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Search_MultiRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3214,14 +3593,55 @@ func (m *Search_IDRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.Config != nil { - { - size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPayload(dAtA, i, uint64(size)) + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Search_IDRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Search_IDRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Search_IDRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 @@ -3236,6 +3656,93 @@ func (m *Search_IDRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Search_MultiIDRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Search_MultiIDRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Search_MultiIDRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Search_ObjectRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Search_ObjectRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Search_ObjectRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Object) > 0 { + i -= len(m.Object) + copy(dAtA[i:], m.Object) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Object))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Search_Config) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3263,24 +3770,31 @@ func (m *Search_Config) MarshalToSizedBuffer(dAtA []byte) (int, error) { if m.Timeout != 0 { i = encodeVarintPayload(dAtA, i, uint64(m.Timeout)) i-- - dAtA[i] = 0x20 + dAtA[i] = 0x28 } if m.Epsilon != 0 { i -= 4 encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Epsilon)))) i-- - dAtA[i] = 0x1d + dAtA[i] = 0x25 } if m.Radius != 0 { i -= 4 encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Radius)))) i-- - dAtA[i] = 0x15 + dAtA[i] = 0x1d } if m.Num != 0 { i = encodeVarintPayload(dAtA, i, uint64(m.Num)) i-- - dAtA[i] = 0x8 + dAtA[i] = 0x10 + } + if len(m.RequestId) > 0 { + i -= len(m.RequestId) + copy(dAtA[i:], m.RequestId) + i = encodeVarintPayload(dAtA, i, uint64(len(m.RequestId))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } @@ -3320,6 +3834,54 @@ func (m *Search_Response) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintPayload(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x12 + } + } + if len(m.RequestId) > 0 { + i -= len(m.RequestId) + copy(dAtA[i:], m.RequestId) + i = encodeVarintPayload(dAtA, i, uint64(len(m.RequestId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Search_Responses) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Search_Responses) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Search_Responses) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Responses) > 0 { + for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa } } @@ -3738,9 +4300,9 @@ func (m *Object_Vector) MarshalToSizedBuffer(dAtA []byte) (int, error) { } if len(m.Vector) > 0 { for iNdEx := len(m.Vector) - 1; iNdEx >= 0; iNdEx-- { - f4 := math.Float32bits(float32(m.Vector[iNdEx])) + f5 := math.Float32bits(float32(m.Vector[iNdEx])) i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(f4)) + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(f5)) } i = encodeVarintPayload(dAtA, i, uint64(len(m.Vector)*4)) i-- @@ -3797,7 +4359,7 @@ func (m *Object_Vectors) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Control) Marshal() (dAtA []byte, err error) { +func (m *Object_Blob) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3807,12 +4369,12 @@ func (m *Control) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Control) MarshalTo(dAtA []byte) (int, error) { +func (m *Object_Blob) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Control) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Object_Blob) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3821,10 +4383,24 @@ func (m *Control) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Object) > 0 { + i -= len(m.Object) + copy(dAtA[i:], m.Object) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Object))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *Control_CreateIndexRequest) Marshal() (dAtA []byte, err error) { +func (m *Object_Location) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3834,12 +4410,12 @@ func (m *Control_CreateIndexRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Control_CreateIndexRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *Object_Location) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Control_CreateIndexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Object_Location) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3848,15 +4424,33 @@ func (m *Control_CreateIndexRequest) MarshalToSizedBuffer(dAtA []byte) (int, err i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.PoolSize != 0 { - i = encodeVarintPayload(dAtA, i, uint64(m.PoolSize)) + if len(m.Ips) > 0 { + for iNdEx := len(m.Ips) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Ips[iNdEx]) + copy(dAtA[i:], m.Ips[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Ips[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Uuid))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *Replication) Marshal() (dAtA []byte, err error) { +func (m *Object_Locations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3866,12 +4460,12 @@ func (m *Replication) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Replication) MarshalTo(dAtA []byte) (int, error) { +func (m *Object_Locations) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Replication) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Object_Locations) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3880,10 +4474,24 @@ func (m *Replication) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Locations) > 0 { + for iNdEx := len(m.Locations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Locations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *Replication_Recovery) Marshal() (dAtA []byte, err error) { +func (m *Control) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3893,12 +4501,12 @@ func (m *Replication_Recovery) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Replication_Recovery) MarshalTo(dAtA []byte) (int, error) { +func (m *Control) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Replication_Recovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Control) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3907,19 +4515,10 @@ func (m *Replication_Recovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.DeletedAgents) > 0 { - for iNdEx := len(m.DeletedAgents) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DeletedAgents[iNdEx]) - copy(dAtA[i:], m.DeletedAgents[iNdEx]) - i = encodeVarintPayload(dAtA, i, uint64(len(m.DeletedAgents[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } -func (m *Replication_Rebalance) Marshal() (dAtA []byte, err error) { +func (m *Control_CreateIndexRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3929,12 +4528,12 @@ func (m *Replication_Rebalance) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Replication_Rebalance) MarshalTo(dAtA []byte) (int, error) { +func (m *Control_CreateIndexRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Replication_Rebalance) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Control_CreateIndexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3943,16 +4542,111 @@ func (m *Replication_Rebalance) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.LowUsageAgents) > 0 { - for iNdEx := len(m.LowUsageAgents) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.LowUsageAgents[iNdEx]) - copy(dAtA[i:], m.LowUsageAgents[iNdEx]) - i = encodeVarintPayload(dAtA, i, uint64(len(m.LowUsageAgents[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + if m.PoolSize != 0 { + i = encodeVarintPayload(dAtA, i, uint64(m.PoolSize)) + i-- + dAtA[i] = 0x8 } - if len(m.HighUsageAgents) > 0 { + return len(dAtA) - i, nil +} + +func (m *Replication) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Replication) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Replication) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Replication_Recovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Replication_Recovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Replication_Recovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.DeletedAgents) > 0 { + for iNdEx := len(m.DeletedAgents) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DeletedAgents[iNdEx]) + copy(dAtA[i:], m.DeletedAgents[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.DeletedAgents[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Replication_Rebalance) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Replication_Rebalance) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Replication_Rebalance) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.LowUsageAgents) > 0 { + for iNdEx := len(m.LowUsageAgents) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LowUsageAgents[iNdEx]) + copy(dAtA[i:], m.LowUsageAgents[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.LowUsageAgents[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.HighUsageAgents) > 0 { for iNdEx := len(m.HighUsageAgents) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.HighUsageAgents[iNdEx]) copy(dAtA[i:], m.HighUsageAgents[iNdEx]) @@ -4568,21 +5262,14 @@ func (m *Backup_MetaVector) MarshalToSizedBuffer(dAtA []byte) (int, error) { } if len(m.Vector) > 0 { for iNdEx := len(m.Vector) - 1; iNdEx >= 0; iNdEx-- { - f5 := math.Float32bits(float32(m.Vector[iNdEx])) + f6 := math.Float32bits(float32(m.Vector[iNdEx])) i -= 4 - encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(f5)) + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(f6)) } i = encodeVarintPayload(dAtA, i, uint64(len(m.Vector)*4)) i-- dAtA[i] = 0x1a } - if len(m.Meta) > 0 { - i -= len(m.Meta) - copy(dAtA[i:], m.Meta) - i = encodeVarintPayload(dAtA, i, uint64(len(m.Meta))) - i-- - dAtA[i] = 0x12 - } if len(m.Uuid) > 0 { i -= len(m.Uuid) copy(dAtA[i:], m.Uuid) @@ -4701,13 +5388,6 @@ func (m *Backup_Compressed_MetaVector) MarshalToSizedBuffer(dAtA []byte) (int, e i-- dAtA[i] = 0x1a } - if len(m.Meta) > 0 { - i -= len(m.Meta) - copy(dAtA[i:], m.Meta) - i = encodeVarintPayload(dAtA, i, uint64(len(m.Meta))) - i-- - dAtA[i] = 0x12 - } if len(m.Uuid) > 0 { i -= len(m.Uuid) copy(dAtA[i:], m.Uuid) @@ -5407,6 +6087,24 @@ func (m *Search_Request) Size() (n int) { return n } +func (m *Search_MultiRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Search_IDRequest) Size() (n int) { if m == nil { return 0 @@ -5427,12 +6125,54 @@ func (m *Search_IDRequest) Size() (n int) { return n } +func (m *Search_MultiIDRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Search_ObjectRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Object) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Search_Config) Size() (n int) { if m == nil { return 0 } var l int _ = l + l = len(m.RequestId) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } if m.Num != 0 { n += 1 + sovPayload(uint64(m.Num)) } @@ -5457,6 +6197,10 @@ func (m *Search_Response) Size() (n int) { } var l int _ = l + l = len(m.RequestId) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } if len(m.Results) > 0 { for _, e := range m.Results { l = e.Size() @@ -5469,6 +6213,24 @@ func (m *Search_Response) Size() (n int) { return n } +func (m *Search_Responses) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Responses) > 0 { + for _, e := range m.Responses { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Meta) Size() (n int) { if m == nil { return 0 @@ -5689,6 +6451,70 @@ func (m *Object_Vectors) Size() (n int) { return n } +func (m *Object_Blob) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + l = len(m.Object) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Object_Location) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if len(m.Ips) > 0 { + for _, s := range m.Ips { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Object_Locations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Locations) > 0 { + for _, e := range m.Locations { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Control) Size() (n int) { if m == nil { return 0 @@ -6052,10 +6878,6 @@ func (m *Backup_MetaVector) Size() (n int) { if l > 0 { n += 1 + l + sovPayload(uint64(l)) } - l = len(m.Meta) - if l > 0 { - n += 1 + l + sovPayload(uint64(l)) - } if len(m.Vector) > 0 { n += 1 + sovPayload(uint64(len(m.Vector)*4)) + len(m.Vector)*4 } @@ -6111,10 +6933,6 @@ func (m *Backup_Compressed_MetaVector) Size() (n int) { if l > 0 { n += 1 + l + sovPayload(uint64(l)) } - l = len(m.Meta) - if l > 0 { - n += 1 + l + sovPayload(uint64(l)) - } l = len(m.Vector) if l > 0 { n += 1 + l + sovPayload(uint64(l)) @@ -6626,7 +7444,7 @@ func (m *Search_Request) Unmarshal(dAtA []byte) error { } return nil } -func (m *Search_IDRequest) Unmarshal(dAtA []byte) error { +func (m *Search_MultiRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6649,17 +7467,17 @@ func (m *Search_IDRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IDRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MultiRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MultiRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPayload @@ -6669,27 +7487,595 @@ func (m *Search_IDRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthPayload } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthPayload } if postIndex > l { return io.ErrUnexpectedEOF } - m.Id = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Requests = append(m.Requests, &Search_Request{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Search_IDRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IDRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Search_Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Search_MultiIDRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MultiIDRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MultiIDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, &Search_IDRequest{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Search_ObjectRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Object = append(m.Object[:0], dAtA[iNdEx:postIndex]...) + if m.Object == nil { + m.Object = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Search_Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Search_Config) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Num", wireType) + } + m.Num = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Num |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Radius", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Radius = float32(math.Float32frombits(v)) + case 4: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Epsilon", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Epsilon = float32(math.Float32frombits(v)) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + m.Timeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timeout |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Search_Response) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Response: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6716,10 +8102,8 @@ func (m *Search_IDRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Config == nil { - m.Config = &Search_Config{} - } - if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Results = append(m.Results, &Object_Distance{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6748,7 +8132,7 @@ func (m *Search_IDRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *Search_Config) Unmarshal(dAtA []byte) error { +func (m *Search_Responses) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6771,17 +8155,17 @@ func (m *Search_Config) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Config: wiretype end group for non-group") + return fmt.Errorf("proto: Responses: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Responses: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Num", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType) } - m.Num = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPayload @@ -6791,38 +8175,139 @@ func (m *Search_Config) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Num |= uint32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field Radius", wireType) + if msglen < 0 { + return ErrInvalidLengthPayload } - var v uint32 - if (iNdEx + 4) > l { + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { return io.ErrUnexpectedEOF } - v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - m.Radius = float32(math.Float32frombits(v)) - case 3: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field Epsilon", wireType) + m.Responses = append(m.Responses, &Search_Response{}) + if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - var v uint32 - if (iNdEx + 4) > l { + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - m.Epsilon = float32(math.Float32frombits(v)) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Meta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload } - m.Timeout = 0 + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Meta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Meta_Key) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Key: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Key: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPayload @@ -6832,11 +8317,24 @@ func (m *Search_Config) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Timeout |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPayload(dAtA[iNdEx:]) @@ -6862,7 +8360,7 @@ func (m *Search_Config) Unmarshal(dAtA []byte) error { } return nil } -func (m *Search_Response) Unmarshal(dAtA []byte) error { +func (m *Meta_Keys) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6885,17 +8383,17 @@ func (m *Search_Response) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Response: wiretype end group for non-group") + return fmt.Errorf("proto: Keys: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Keys: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPayload @@ -6905,25 +8403,23 @@ func (m *Search_Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthPayload } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthPayload } if postIndex > l { return io.ErrUnexpectedEOF } - m.Results = append(m.Results, &Object_Distance{}) - if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keys = append(m.Keys, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -6950,7 +8446,7 @@ func (m *Search_Response) Unmarshal(dAtA []byte) error { } return nil } -func (m *Meta) Unmarshal(dAtA []byte) error { +func (m *Meta_Val) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6973,12 +8469,44 @@ func (m *Meta) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Meta: wiretype end group for non-group") + return fmt.Errorf("proto: Val: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Val: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Val = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPayload(dAtA[iNdEx:]) @@ -7004,7 +8532,7 @@ func (m *Meta) Unmarshal(dAtA []byte) error { } return nil } -func (m *Meta_Key) Unmarshal(dAtA []byte) error { +func (m *Meta_Vals) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7027,15 +8555,15 @@ func (m *Meta_Key) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Key: wiretype end group for non-group") + return fmt.Errorf("proto: Vals: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Key: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Vals: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Vals", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7063,7 +8591,7 @@ func (m *Meta_Key) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.Vals = append(m.Vals, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -7090,7 +8618,7 @@ func (m *Meta_Key) Unmarshal(dAtA []byte) error { } return nil } -func (m *Meta_Keys) Unmarshal(dAtA []byte) error { +func (m *Meta_KeyVal) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7113,15 +8641,15 @@ func (m *Meta_Keys) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Keys: wiretype end group for non-group") + return fmt.Errorf("proto: KeyVal: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Keys: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KeyVal: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7149,7 +8677,39 @@ func (m *Meta_Keys) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keys = append(m.Keys, string(dAtA[iNdEx:postIndex])) + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Val = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -7176,7 +8736,7 @@ func (m *Meta_Keys) Unmarshal(dAtA []byte) error { } return nil } -func (m *Meta_Val) Unmarshal(dAtA []byte) error { +func (m *Meta_KeyVals) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7199,17 +8759,17 @@ func (m *Meta_Val) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Val: wiretype end group for non-group") + return fmt.Errorf("proto: KeyVals: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Val: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KeyVals: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPayload @@ -7219,23 +8779,25 @@ func (m *Meta_Val) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthPayload } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthPayload } if postIndex > l { return io.ErrUnexpectedEOF } - m.Val = string(dAtA[iNdEx:postIndex]) + m.Kvs = append(m.Kvs, &Meta_KeyVal{}) + if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -7262,7 +8824,7 @@ func (m *Meta_Val) Unmarshal(dAtA []byte) error { } return nil } -func (m *Meta_Vals) Unmarshal(dAtA []byte) error { +func (m *Object) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7285,44 +8847,12 @@ func (m *Meta_Vals) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Vals: wiretype end group for non-group") + return fmt.Errorf("proto: Object: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Vals: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Object: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vals", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Vals = append(m.Vals, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPayload(dAtA[iNdEx:]) @@ -7348,7 +8878,7 @@ func (m *Meta_Vals) Unmarshal(dAtA []byte) error { } return nil } -func (m *Meta_KeyVal) Unmarshal(dAtA []byte) error { +func (m *Object_Distance) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7371,15 +8901,15 @@ func (m *Meta_KeyVal) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KeyVal: wiretype end group for non-group") + return fmt.Errorf("proto: Distance: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KeyVal: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Distance: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7407,40 +8937,19 @@ func (m *Meta_KeyVal) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.Id = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPayload + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Distance", wireType) } - if postIndex > l { + var v uint32 + if (iNdEx + 4) > l { return io.ErrUnexpectedEOF } - m.Val = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Distance = float32(math.Float32frombits(v)) default: iNdEx = preIndex skippy, err := skipPayload(dAtA[iNdEx:]) @@ -7466,7 +8975,7 @@ func (m *Meta_KeyVal) Unmarshal(dAtA []byte) error { } return nil } -func (m *Meta_KeyVals) Unmarshal(dAtA []byte) error { +func (m *Object_ID) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7489,17 +8998,17 @@ func (m *Meta_KeyVals) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KeyVals: wiretype end group for non-group") + return fmt.Errorf("proto: ID: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KeyVals: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ID: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPayload @@ -7509,25 +9018,23 @@ func (m *Meta_KeyVals) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthPayload } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthPayload } if postIndex > l { return io.ErrUnexpectedEOF } - m.Kvs = append(m.Kvs, &Meta_KeyVal{}) - if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Id = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -7554,7 +9061,7 @@ func (m *Meta_KeyVals) Unmarshal(dAtA []byte) error { } return nil } -func (m *Object) Unmarshal(dAtA []byte) error { +func (m *Object_IDs) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7577,12 +9084,44 @@ func (m *Object) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Object: wiretype end group for non-group") + return fmt.Errorf("proto: IDs: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Object: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IDs: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ids", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ids = append(m.Ids, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPayload(dAtA[iNdEx:]) @@ -7608,7 +9147,7 @@ func (m *Object) Unmarshal(dAtA []byte) error { } return nil } -func (m *Object_Distance) Unmarshal(dAtA []byte) error { +func (m *Object_Vector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7631,10 +9170,10 @@ func (m *Object_Distance) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Distance: wiretype end group for non-group") + return fmt.Errorf("proto: Vector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Distance: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Vector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -7670,16 +9209,59 @@ func (m *Object_Distance) Unmarshal(dAtA []byte) error { m.Id = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 5 { - return fmt.Errorf("proto: wrong wireType = %d for field Distance", wireType) - } - var v uint32 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF + if wireType == 5 { + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + v2 := float32(math.Float32frombits(v)) + m.Vector = append(m.Vector, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 4 + if elementCount != 0 && len(m.Vector) == 0 { + m.Vector = make([]float32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + v2 := float32(math.Float32frombits(v)) + m.Vector = append(m.Vector, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Vector", wireType) } - v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - m.Distance = float32(math.Float32frombits(v)) default: iNdEx = preIndex skippy, err := skipPayload(dAtA[iNdEx:]) @@ -7705,7 +9287,7 @@ func (m *Object_Distance) Unmarshal(dAtA []byte) error { } return nil } -func (m *Object_ID) Unmarshal(dAtA []byte) error { +func (m *Object_Vectors) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7728,17 +9310,17 @@ func (m *Object_ID) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ID: wiretype end group for non-group") + return fmt.Errorf("proto: Vectors: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ID: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Vectors: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Vectors", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPayload @@ -7748,23 +9330,25 @@ func (m *Object_ID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthPayload } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthPayload } if postIndex > l { return io.ErrUnexpectedEOF } - m.Id = string(dAtA[iNdEx:postIndex]) + m.Vectors = append(m.Vectors, &Object_Vector{}) + if err := m.Vectors[len(m.Vectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -7791,7 +9375,7 @@ func (m *Object_ID) Unmarshal(dAtA []byte) error { } return nil } -func (m *Object_IDs) Unmarshal(dAtA []byte) error { +func (m *Object_Blob) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7814,15 +9398,15 @@ func (m *Object_IDs) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IDs: wiretype end group for non-group") + return fmt.Errorf("proto: Blob: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IDs: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Blob: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ids", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7850,7 +9434,41 @@ func (m *Object_IDs) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ids = append(m.Ids, string(dAtA[iNdEx:postIndex])) + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Object = append(m.Object[:0], dAtA[iNdEx:postIndex]...) + if m.Object == nil { + m.Object = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex @@ -7877,7 +9495,7 @@ func (m *Object_IDs) Unmarshal(dAtA []byte) error { } return nil } -func (m *Object_Vector) Unmarshal(dAtA []byte) error { +func (m *Object_Location) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7900,15 +9518,15 @@ func (m *Object_Vector) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Vector: wiretype end group for non-group") + return fmt.Errorf("proto: Location: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Vector: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Location: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7936,62 +9554,72 @@ func (m *Object_Vector) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Id = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType == 5 { - var v uint32 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload } - v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - v2 := float32(math.Float32frombits(v)) - m.Vector = append(m.Vector, v2) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - if packedLen < 0 { - return ErrInvalidLengthPayload + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthPayload + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ips", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - var elementCount int - elementCount = packedLen / 4 - if elementCount != 0 && len(m.Vector) == 0 { - m.Vector = make([]float32, 0, elementCount) - } - for iNdEx < postIndex { - var v uint32 - if (iNdEx + 4) > l { - return io.ErrUnexpectedEOF - } - v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) - iNdEx += 4 - v2 := float32(math.Float32frombits(v)) - m.Vector = append(m.Vector, v2) + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Vector", wireType) } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ips = append(m.Ips, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPayload(dAtA[iNdEx:]) @@ -8017,7 +9645,7 @@ func (m *Object_Vector) Unmarshal(dAtA []byte) error { } return nil } -func (m *Object_Vectors) Unmarshal(dAtA []byte) error { +func (m *Object_Locations) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8040,15 +9668,15 @@ func (m *Object_Vectors) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Vectors: wiretype end group for non-group") + return fmt.Errorf("proto: Locations: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Vectors: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Locations: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vectors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Locations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8075,8 +9703,8 @@ func (m *Object_Vectors) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Vectors = append(m.Vectors, &Object_Vector{}) - if err := m.Vectors[len(m.Vectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Locations = append(m.Locations, &Object_Location{}) + if err := m.Locations[len(m.Locations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9917,38 +11545,6 @@ func (m *Backup_MetaVector) Unmarshal(dAtA []byte) error { } m.Uuid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Meta = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 3: if wireType == 5 { var v uint32 @@ -10263,38 +11859,6 @@ func (m *Backup_Compressed_MetaVector) Unmarshal(dAtA []byte) error { } m.Uuid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Meta = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Vector", wireType) diff --git a/apis/grpc/v1/agent/core/agent.pb.go b/apis/grpc/v1/agent/core/agent.pb.go new file mode 100644 index 0000000000..46edc1b26c --- /dev/null +++ b/apis/grpc/v1/agent/core/agent.pb.go @@ -0,0 +1,259 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package core + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("apis/proto/v1/agent/core/agent.proto", fileDescriptor_dc5722b42aaec2d2) +} + +var fileDescriptor_dc5722b42aaec2d2 = []byte{ + // 339 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0xcd, 0x4a, 0x3b, 0x31, + 0x14, 0xc5, 0x99, 0xc2, 0xff, 0x2f, 0x4d, 0x29, 0x62, 0xfc, 0x58, 0x94, 0x52, 0x44, 0xc5, 0x9d, + 0x09, 0xd5, 0x27, 0x68, 0x8b, 0x42, 0x77, 0x7e, 0x80, 0x0b, 0x37, 0x72, 0x3b, 0x49, 0xc7, 0xc8, + 0x34, 0x37, 0x66, 0xd2, 0x60, 0xb7, 0xbe, 0x82, 0x2f, 0xe5, 0x52, 0xf0, 0x05, 0xa4, 0xf8, 0x04, + 0x3e, 0x81, 0x24, 0xd3, 0x62, 0x4b, 0xbb, 0x73, 0x35, 0x43, 0x72, 0xce, 0xef, 0x9e, 0x70, 0x2e, + 0x39, 0x02, 0xa3, 0x0a, 0x6e, 0x2c, 0x3a, 0xe4, 0xbe, 0xcd, 0x21, 0x93, 0xda, 0xf1, 0x14, 0xad, + 0x2c, 0x7f, 0x59, 0xbc, 0xa1, 0x1b, 0xe1, 0x84, 0xf9, 0x76, 0xe3, 0x70, 0x59, 0x6e, 0x60, 0x92, + 0x23, 0x88, 0xf9, 0xb7, 0x54, 0x37, 0x9a, 0x19, 0x62, 0x96, 0x4b, 0x0e, 0x46, 0x71, 0xd0, 0x1a, + 0x1d, 0x38, 0x85, 0xba, 0x28, 0x6f, 0x4f, 0xbf, 0x2b, 0xe4, 0x5f, 0x27, 0xb0, 0xe9, 0x3d, 0xa9, + 0xf5, 0xac, 0x04, 0x27, 0xfb, 0x5a, 0xc8, 0x67, 0x7a, 0xcc, 0xe6, 0x18, 0xdf, 0x66, 0x3d, 0xd4, + 0xce, 0x62, 0xce, 0x16, 0x04, 0xd7, 0xf2, 0x69, 0x2c, 0x0b, 0xd7, 0xd8, 0x5a, 0xd4, 0x9d, 0x8f, + 0x8c, 0x9b, 0x1c, 0xec, 0xbe, 0x7c, 0x7c, 0xbd, 0x56, 0x36, 0x69, 0x9d, 0xab, 0xa0, 0xe4, 0x69, + 0x74, 0xd1, 0x0b, 0x52, 0xbd, 0x01, 0x3f, 0xc3, 0xaf, 0xda, 0xd6, 0x91, 0xb6, 0x23, 0xa9, 0x4e, + 0x6b, 0x33, 0x52, 0x01, 0x5e, 0xd2, 0x11, 0xa1, 0x65, 0x8e, 0x8e, 0x16, 0xbf, 0xc0, 0x3f, 0xe4, + 0x6d, 0xc6, 0x29, 0x7b, 0x74, 0x67, 0x29, 0x2f, 0x68, 0x11, 0xc7, 0x5d, 0x91, 0x6a, 0x04, 0xf4, + 0xf5, 0x10, 0xd7, 0xc5, 0x6e, 0x2e, 0x1e, 0x05, 0x11, 0x8b, 0x72, 0xd6, 0xc3, 0xb1, 0x76, 0x2b, + 0x2f, 0x50, 0x7a, 0x88, 0xdd, 0xc7, 0xb7, 0x69, 0x2b, 0x79, 0x9f, 0xb6, 0x92, 0xcf, 0x69, 0x2b, + 0x21, 0xfb, 0x68, 0x33, 0xe6, 0x05, 0x40, 0xc1, 0x3c, 0xe4, 0x82, 0x81, 0x51, 0x01, 0x55, 0x36, + 0x1e, 0xaa, 0xee, 0x56, 0x6f, 0x21, 0x17, 0xb1, 0xa5, 0xcb, 0xe4, 0xee, 0x24, 0x53, 0xee, 0x61, + 0x3c, 0x60, 0x29, 0x8e, 0x78, 0x74, 0xf1, 0xe0, 0xe2, 0x71, 0x15, 0x32, 0x6b, 0xd2, 0xe5, 0xc5, + 0x19, 0xfc, 0x8f, 0x3d, 0x9f, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0x41, 0xe0, 0x80, 0x4b, 0x5b, + 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// AgentClient is the client API for Agent service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type AgentClient interface { + CreateIndex(ctx context.Context, in *payload.Control_CreateIndexRequest, opts ...grpc.CallOption) (*payload.Empty, error) + SaveIndex(ctx context.Context, in *payload.Empty, opts ...grpc.CallOption) (*payload.Empty, error) + CreateAndSaveIndex(ctx context.Context, in *payload.Control_CreateIndexRequest, opts ...grpc.CallOption) (*payload.Empty, error) + IndexInfo(ctx context.Context, in *payload.Empty, opts ...grpc.CallOption) (*payload.Info_Index_Count, error) +} + +type agentClient struct { + cc *grpc.ClientConn +} + +func NewAgentClient(cc *grpc.ClientConn) AgentClient { + return &agentClient{cc} +} + +func (c *agentClient) CreateIndex(ctx context.Context, in *payload.Control_CreateIndexRequest, opts ...grpc.CallOption) (*payload.Empty, error) { + out := new(payload.Empty) + err := c.cc.Invoke(ctx, "/core.v1.Agent/CreateIndex", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *agentClient) SaveIndex(ctx context.Context, in *payload.Empty, opts ...grpc.CallOption) (*payload.Empty, error) { + out := new(payload.Empty) + err := c.cc.Invoke(ctx, "/core.v1.Agent/SaveIndex", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *agentClient) CreateAndSaveIndex(ctx context.Context, in *payload.Control_CreateIndexRequest, opts ...grpc.CallOption) (*payload.Empty, error) { + out := new(payload.Empty) + err := c.cc.Invoke(ctx, "/core.v1.Agent/CreateAndSaveIndex", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *agentClient) IndexInfo(ctx context.Context, in *payload.Empty, opts ...grpc.CallOption) (*payload.Info_Index_Count, error) { + out := new(payload.Info_Index_Count) + err := c.cc.Invoke(ctx, "/core.v1.Agent/IndexInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AgentServer is the server API for Agent service. +type AgentServer interface { + CreateIndex(context.Context, *payload.Control_CreateIndexRequest) (*payload.Empty, error) + SaveIndex(context.Context, *payload.Empty) (*payload.Empty, error) + CreateAndSaveIndex(context.Context, *payload.Control_CreateIndexRequest) (*payload.Empty, error) + IndexInfo(context.Context, *payload.Empty) (*payload.Info_Index_Count, error) +} + +// UnimplementedAgentServer can be embedded to have forward compatible implementations. +type UnimplementedAgentServer struct { +} + +func (*UnimplementedAgentServer) CreateIndex(ctx context.Context, req *payload.Control_CreateIndexRequest) (*payload.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateIndex not implemented") +} +func (*UnimplementedAgentServer) SaveIndex(ctx context.Context, req *payload.Empty) (*payload.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SaveIndex not implemented") +} +func (*UnimplementedAgentServer) CreateAndSaveIndex(ctx context.Context, req *payload.Control_CreateIndexRequest) (*payload.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateAndSaveIndex not implemented") +} +func (*UnimplementedAgentServer) IndexInfo(ctx context.Context, req *payload.Empty) (*payload.Info_Index_Count, error) { + return nil, status.Errorf(codes.Unimplemented, "method IndexInfo not implemented") +} + +func RegisterAgentServer(s *grpc.Server, srv AgentServer) { + s.RegisterService(&_Agent_serviceDesc, srv) +} + +func _Agent_CreateIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Control_CreateIndexRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AgentServer).CreateIndex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/core.v1.Agent/CreateIndex", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AgentServer).CreateIndex(ctx, req.(*payload.Control_CreateIndexRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Agent_SaveIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AgentServer).SaveIndex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/core.v1.Agent/SaveIndex", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AgentServer).SaveIndex(ctx, req.(*payload.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Agent_CreateAndSaveIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Control_CreateIndexRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AgentServer).CreateAndSaveIndex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/core.v1.Agent/CreateAndSaveIndex", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AgentServer).CreateAndSaveIndex(ctx, req.(*payload.Control_CreateIndexRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Agent_IndexInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AgentServer).IndexInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/core.v1.Agent/IndexInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AgentServer).IndexInfo(ctx, req.(*payload.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _Agent_serviceDesc = grpc.ServiceDesc{ + ServiceName: "core.v1.Agent", + HandlerType: (*AgentServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateIndex", + Handler: _Agent_CreateIndex_Handler, + }, + { + MethodName: "SaveIndex", + Handler: _Agent_SaveIndex_Handler, + }, + { + MethodName: "CreateAndSaveIndex", + Handler: _Agent_CreateAndSaveIndex_Handler, + }, + { + MethodName: "IndexInfo", + Handler: _Agent_IndexInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "apis/proto/v1/agent/core/agent.proto", +} diff --git a/apis/grpc/agent/sidecar/sidecar.pb.go b/apis/grpc/v1/agent/sidecar/sidecar.pb.go similarity index 62% rename from apis/grpc/agent/sidecar/sidecar.pb.go rename to apis/grpc/v1/agent/sidecar/sidecar.pb.go index 08e363b775..59771f236d 100644 --- a/apis/grpc/agent/sidecar/sidecar.pb.go +++ b/apis/grpc/v1/agent/sidecar/sidecar.pb.go @@ -21,10 +21,7 @@ import ( fmt "fmt" math "math" - _ "github.com/danielvladco/go-proto-gql/pb" proto "github.com/gogo/protobuf/proto" - _ "github.com/vdaas/vald/apis/grpc/payload" - _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" ) @@ -39,22 +36,22 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func init() { proto.RegisterFile("sidecar/sidecar.proto", fileDescriptor_a79f12d5eccb8a6a) } - -var fileDescriptor_a79f12d5eccb8a6a = []byte{ - // 188 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2d, 0xce, 0x4c, 0x49, - 0x4d, 0x4e, 0x2c, 0xd2, 0x87, 0xd2, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xec, 0x50, 0xae, - 0x14, 0x6f, 0x41, 0x62, 0x65, 0x4e, 0x7e, 0x62, 0x0a, 0x44, 0x5c, 0x4a, 0x26, 0x3d, 0x3f, 0x3f, - 0x3d, 0x27, 0x55, 0x3f, 0xb1, 0x20, 0x53, 0x3f, 0x31, 0x2f, 0x2f, 0xbf, 0x24, 0xb1, 0x24, 0x33, - 0x3f, 0xaf, 0x18, 0x2a, 0xcb, 0x53, 0x90, 0xa4, 0x9f, 0x5e, 0x98, 0x03, 0xe1, 0x19, 0x71, 0x72, - 0xb1, 0x07, 0x43, 0x4c, 0x71, 0xca, 0x3d, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, - 0x8f, 0xe4, 0x18, 0xb9, 0x64, 0xf2, 0x8b, 0xd2, 0xf5, 0xca, 0x52, 0x12, 0x13, 0x8b, 0xf5, 0xca, - 0x12, 0x73, 0x52, 0xf4, 0x12, 0xd3, 0x53, 0xf3, 0x4a, 0xf4, 0xa0, 0x36, 0x3a, 0x09, 0x84, 0x25, - 0xe6, 0xa4, 0x38, 0x82, 0x84, 0xa0, 0xba, 0x03, 0x18, 0xa3, 0x74, 0xd3, 0x33, 0x4b, 0x32, 0x4a, - 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xc1, 0x1a, 0xf5, 0x41, 0x1a, 0x41, 0xae, 0x28, 0xd6, 0x4f, - 0x2f, 0x2a, 0x48, 0xd6, 0x07, 0x1b, 0x01, 0xf3, 0x43, 0x12, 0x1b, 0xd8, 0x01, 0xc6, 0x80, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x1c, 0x2f, 0x76, 0x1a, 0xdd, 0x00, 0x00, 0x00, +func init() { + proto.RegisterFile("apis/proto/v1/agent/sidecar/sidecar.proto", fileDescriptor_c78d66f1184a1433) +} + +var fileDescriptor_c78d66f1184a1433 = []byte{ + // 160 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0x2c, 0xc8, 0x2c, + 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x33, 0xd4, 0x4f, 0x4c, 0x4f, 0xcd, 0x2b, 0xd1, + 0x2f, 0xce, 0x4c, 0x49, 0x4d, 0x4e, 0x2c, 0x82, 0xd1, 0x7a, 0x60, 0x69, 0x21, 0x2e, 0x18, 0xb7, + 0xcc, 0xd0, 0x88, 0x93, 0x8b, 0x3d, 0x18, 0xc2, 0x73, 0x2a, 0x3f, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, + 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0xb9, 0x94, 0xf3, 0x8b, 0xd2, 0xf5, 0xca, 0x52, 0x12, + 0x13, 0x8b, 0xf5, 0xca, 0x12, 0x73, 0x52, 0xf4, 0x12, 0x0b, 0x32, 0xf5, 0xca, 0x0c, 0xf5, 0xc0, + 0xe6, 0xea, 0x41, 0x0d, 0x70, 0x12, 0x08, 0x4b, 0xcc, 0x49, 0x71, 0x04, 0x09, 0x41, 0x0d, 0x09, + 0x60, 0x8c, 0x32, 0x48, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x07, 0xeb, + 0xd7, 0x07, 0xe9, 0xd7, 0x07, 0x3b, 0x2f, 0xbd, 0xa8, 0x20, 0x19, 0xc3, 0x75, 0x49, 0x6c, 0x60, + 0x67, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x05, 0x1b, 0xf4, 0xc3, 0x00, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -92,9 +89,9 @@ func RegisterSidecarServer(s *grpc.Server, srv SidecarServer) { } var _Sidecar_serviceDesc = grpc.ServiceDesc{ - ServiceName: "sidecar.Sidecar", + ServiceName: "sidecar.v1.Sidecar", HandlerType: (*SidecarServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{}, - Metadata: "sidecar/sidecar.proto", + Metadata: "apis/proto/v1/agent/sidecar/sidecar.proto", } diff --git a/apis/grpc/discoverer/discoverer.pb.go b/apis/grpc/v1/discoverer/discoverer.pb.go similarity index 69% rename from apis/grpc/discoverer/discoverer.pb.go rename to apis/grpc/v1/discoverer/discoverer.pb.go index 0f373a8f1b..ae1a2d1bb8 100644 --- a/apis/grpc/discoverer/discoverer.pb.go +++ b/apis/grpc/v1/discoverer/discoverer.pb.go @@ -21,9 +21,8 @@ import ( fmt "fmt" math "math" - _ "github.com/danielvladco/go-proto-gql/pb" proto "github.com/gogo/protobuf/proto" - payload "github.com/vdaas/vald/apis/grpc/payload" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -41,27 +40,30 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func init() { proto.RegisterFile("discoverer.proto", fileDescriptor_9fa655cb815aa581) } - -var fileDescriptor_9fa655cb815aa581 = []byte{ - // 266 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0xc9, 0x2c, 0x4e, - 0xce, 0x2f, 0x4b, 0x2d, 0x4a, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88, - 0x48, 0xf1, 0x16, 0x24, 0x56, 0xe6, 0xe4, 0x27, 0xa6, 0x40, 0xa4, 0xa4, 0x64, 0xd2, 0xf3, 0xf3, - 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13, 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, - 0xf3, 0xf3, 0x8a, 0xa1, 0xb2, 0x3c, 0x05, 0x49, 0xfa, 0xe9, 0x85, 0x39, 0x10, 0x9e, 0xd1, 0x5e, - 0x46, 0x2e, 0x2e, 0x17, 0xb8, 0x49, 0x42, 0x41, 0x5c, 0x2c, 0x01, 0xf9, 0x29, 0xc5, 0x42, 0xd2, - 0x7a, 0x30, 0x23, 0x11, 0x92, 0x7a, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x52, 0x42, 0x70, - 0x49, 0xcf, 0xbc, 0xb4, 0x7c, 0x3d, 0x90, 0x06, 0x25, 0xc9, 0xa6, 0xcb, 0x4f, 0x26, 0x33, 0x09, - 0x2b, 0xf1, 0xe9, 0xc3, 0xdc, 0xa5, 0x5f, 0x90, 0x9f, 0x52, 0x6c, 0xc5, 0xa8, 0x25, 0x14, 0xca, - 0xc5, 0xea, 0x97, 0x9f, 0x92, 0x4a, 0xc0, 0x50, 0x61, 0x54, 0x43, 0xc1, 0x3a, 0x94, 0xa4, 0xc0, - 0xa6, 0x8a, 0x28, 0xf1, 0x23, 0x4c, 0xcd, 0x03, 0x49, 0x58, 0x31, 0x6a, 0x49, 0xb1, 0x6c, 0x78, - 0x20, 0xcf, 0xe4, 0x94, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, - 0x31, 0x72, 0x49, 0xe6, 0x17, 0xa5, 0xeb, 0x95, 0xa5, 0x24, 0x26, 0x16, 0xeb, 0x95, 0x25, 0xe6, - 0xa4, 0xe8, 0x21, 0xc2, 0xc8, 0x09, 0xc9, 0x97, 0x01, 0x8c, 0x51, 0x5a, 0xe9, 0x99, 0x25, 0x19, - 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x60, 0xf5, 0xfa, 0x20, 0xf5, 0xa0, 0x10, 0x2b, 0xd6, - 0x4f, 0x2f, 0x2a, 0x48, 0xd6, 0x47, 0xe8, 0x4c, 0x62, 0x03, 0x87, 0x94, 0x31, 0x20, 0x00, 0x00, - 0xff, 0xff, 0x76, 0x50, 0xc3, 0x83, 0x84, 0x01, 0x00, 0x00, +func init() { + proto.RegisterFile("apis/proto/v1/discoverer/discoverer.proto", fileDescriptor_374200cbacdb4f39) +} + +var fileDescriptor_374200cbacdb4f39 = []byte{ + // 280 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0xbf, 0x4a, 0xc4, 0x30, + 0x18, 0xc0, 0x89, 0xa8, 0x43, 0xc0, 0x13, 0xea, 0x1f, 0xb0, 0x48, 0x91, 0x3a, 0x79, 0x60, 0x42, + 0x75, 0xbb, 0xf1, 0x70, 0x71, 0x91, 0xc3, 0x41, 0x44, 0xa7, 0xef, 0x9a, 0x58, 0x03, 0x35, 0x5f, + 0x4c, 0x72, 0x01, 0x57, 0x5f, 0xc1, 0x47, 0x72, 0x71, 0x14, 0x7c, 0x01, 0x29, 0x3e, 0x88, 0x34, + 0xc7, 0x5d, 0x7b, 0x38, 0x38, 0x25, 0xe4, 0xc7, 0xef, 0x97, 0xf0, 0x85, 0x9e, 0x80, 0x51, 0x8e, + 0x1b, 0x8b, 0x1e, 0x79, 0x28, 0xb8, 0x50, 0xae, 0xc4, 0x20, 0xad, 0xb4, 0xbd, 0x2d, 0x8b, 0x38, + 0xd9, 0xea, 0x9d, 0x84, 0x22, 0x3d, 0x5e, 0x35, 0x0d, 0xbc, 0xd4, 0x08, 0x62, 0xb1, 0xce, 0x9d, + 0xf4, 0xb0, 0x42, 0xac, 0x6a, 0xc9, 0xc1, 0x28, 0x0e, 0x5a, 0xa3, 0x07, 0xaf, 0x50, 0xbb, 0x39, + 0x3d, 0x7b, 0x27, 0x94, 0x5e, 0x2c, 0xa3, 0xc9, 0x2d, 0x5d, 0x9f, 0xa0, 0x70, 0x49, 0xc6, 0x16, + 0x91, 0x50, 0xb0, 0x8e, 0xb3, 0x6b, 0xf9, 0x3c, 0x93, 0xce, 0xa7, 0x7b, 0x7d, 0x7e, 0xa9, 0x1f, + 0x90, 0xb5, 0x5a, 0x7e, 0xf0, 0xfa, 0xf5, 0xf3, 0xb6, 0xb6, 0x93, 0x0f, 0x96, 0x4f, 0xe7, 0x06, + 0x85, 0x1b, 0x91, 0x61, 0x72, 0x4f, 0x37, 0xae, 0x50, 0xc8, 0xff, 0xd3, 0xfb, 0x7f, 0xd2, 0xd1, + 0xcb, 0xd3, 0xd8, 0xde, 0xcd, 0xb7, 0xbb, 0xb6, 0x6e, 0xc1, 0x88, 0x0c, 0xc7, 0xf8, 0xd1, 0x64, + 0xe4, 0xb3, 0xc9, 0xc8, 0x77, 0x93, 0x11, 0x7a, 0x84, 0xb6, 0x62, 0x41, 0x00, 0x38, 0x16, 0xa0, + 0x16, 0x0c, 0x8c, 0x6a, 0x5b, 0xdd, 0xec, 0xc6, 0x83, 0x1b, 0xa8, 0x45, 0x77, 0xf7, 0x84, 0xdc, + 0x9d, 0x56, 0xca, 0x3f, 0xce, 0xa6, 0xac, 0xc4, 0x27, 0x1e, 0x55, 0xde, 0xaa, 0x3c, 0x0e, 0xb8, + 0xb2, 0xa6, 0x5c, 0xfd, 0x99, 0xe9, 0x66, 0x9c, 0xde, 0xf9, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x2f, 0x88, 0x6b, 0xe9, 0xbc, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -90,7 +92,7 @@ func NewDiscovererClient(cc *grpc.ClientConn) DiscovererClient { func (c *discovererClient) Pods(ctx context.Context, in *payload.Discoverer_Request, opts ...grpc.CallOption) (*payload.Info_Pods, error) { out := new(payload.Info_Pods) - err := c.cc.Invoke(ctx, "/discoverer.Discoverer/Pods", in, out, opts...) + err := c.cc.Invoke(ctx, "/discoverer.v1.Discoverer/Pods", in, out, opts...) if err != nil { return nil, err } @@ -99,7 +101,7 @@ func (c *discovererClient) Pods(ctx context.Context, in *payload.Discoverer_Requ func (c *discovererClient) Nodes(ctx context.Context, in *payload.Discoverer_Request, opts ...grpc.CallOption) (*payload.Info_Nodes, error) { out := new(payload.Info_Nodes) - err := c.cc.Invoke(ctx, "/discoverer.Discoverer/Nodes", in, out, opts...) + err := c.cc.Invoke(ctx, "/discoverer.v1.Discoverer/Nodes", in, out, opts...) if err != nil { return nil, err } @@ -137,7 +139,7 @@ func _Discoverer_Pods_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/discoverer.Discoverer/Pods", + FullMethod: "/discoverer.v1.Discoverer/Pods", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DiscovererServer).Pods(ctx, req.(*payload.Discoverer_Request)) @@ -155,7 +157,7 @@ func _Discoverer_Nodes_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/discoverer.Discoverer/Nodes", + FullMethod: "/discoverer.v1.Discoverer/Nodes", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DiscovererServer).Nodes(ctx, req.(*payload.Discoverer_Request)) @@ -164,7 +166,7 @@ func _Discoverer_Nodes_Handler(srv interface{}, ctx context.Context, dec func(in } var _Discoverer_serviceDesc = grpc.ServiceDesc{ - ServiceName: "discoverer.Discoverer", + ServiceName: "discoverer.v1.Discoverer", HandlerType: (*DiscovererServer)(nil), Methods: []grpc.MethodDesc{ { @@ -177,5 +179,5 @@ var _Discoverer_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "discoverer.proto", + Metadata: "apis/proto/v1/discoverer/discoverer.proto", } diff --git a/apis/grpc/errors/errors.pb.go b/apis/grpc/v1/errors/errors.pb.go similarity index 89% rename from apis/grpc/errors/errors.pb.go rename to apis/grpc/v1/errors/errors.pb.go index 9c780a00bb..cde2169922 100644 --- a/apis/grpc/errors/errors.pb.go +++ b/apis/grpc/v1/errors/errors.pb.go @@ -22,7 +22,6 @@ import ( math "math" math_bits "math/bits" - _ "github.com/envoyproxy/protoc-gen-validate/validate" proto "github.com/gogo/protobuf/proto" ) @@ -47,7 +46,7 @@ func (m *Errors) Reset() { *m = Errors{} } func (m *Errors) String() string { return proto.CompactTextString(m) } func (*Errors) ProtoMessage() {} func (*Errors) Descriptor() ([]byte, []int) { - return fileDescriptor_24fe73c7f0ddb19c, []int{0} + return fileDescriptor_3da6f8f1f37746ee, []int{0} } func (m *Errors) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -93,7 +92,7 @@ func (m *Errors_RPC) Reset() { *m = Errors_RPC{} } func (m *Errors_RPC) String() string { return proto.CompactTextString(m) } func (*Errors_RPC) ProtoMessage() {} func (*Errors_RPC) Descriptor() ([]byte, []int) { - return fileDescriptor_24fe73c7f0ddb19c, []int{0, 0} + return fileDescriptor_3da6f8f1f37746ee, []int{0, 0} } func (m *Errors_RPC) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -172,31 +171,31 @@ func (m *Errors_RPC) GetRoots() []*Errors_RPC { } func init() { - proto.RegisterType((*Errors)(nil), "errors.Errors") - proto.RegisterType((*Errors_RPC)(nil), "errors.Errors.RPC") -} - -func init() { proto.RegisterFile("errors.proto", fileDescriptor_24fe73c7f0ddb19c) } - -var fileDescriptor_24fe73c7f0ddb19c = []byte{ - // 266 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x90, 0xbd, 0x6a, 0xc3, 0x30, - 0x14, 0x85, 0x51, 0x95, 0x38, 0xcd, 0x6d, 0x87, 0x72, 0xe9, 0x8f, 0xc8, 0x60, 0x4c, 0x87, 0xe2, - 0x49, 0x86, 0xf6, 0x0d, 0x12, 0xba, 0x1b, 0x0d, 0x85, 0x76, 0xbb, 0xb1, 0x8c, 0x6b, 0x70, 0x22, - 0x23, 0x29, 0x86, 0x3e, 0x5a, 0xf7, 0x0e, 0x1d, 0xfb, 0x08, 0xc5, 0x4f, 0x52, 0x22, 0x39, 0xdd, - 0xce, 0x27, 0xce, 0x07, 0x57, 0x07, 0x2e, 0x6b, 0x6b, 0x8d, 0x75, 0xb2, 0xb7, 0xc6, 0x1b, 0x4c, - 0x22, 0xad, 0xee, 0x06, 0xea, 0x5a, 0x4d, 0xbe, 0x2e, 0x4e, 0x21, 0x16, 0xee, 0xbf, 0x18, 0x24, - 0xcf, 0xb1, 0xf3, 0xc9, 0x80, 0xab, 0x72, 0x83, 0x08, 0x33, 0xff, 0xd1, 0xd7, 0x82, 0x65, 0x2c, - 0x5f, 0xaa, 0x90, 0xf1, 0x0a, 0xf8, 0xce, 0x35, 0xe2, 0x2c, 0x3c, 0x1d, 0x23, 0x0a, 0x58, 0xe8, - 0xda, 0x53, 0xdb, 0x39, 0xc1, 0x33, 0x9e, 0x2f, 0xd5, 0x09, 0x71, 0x05, 0xe7, 0xed, 0xde, 0x79, - 0xda, 0x57, 0xb5, 0x98, 0x05, 0xe1, 0x9f, 0xf1, 0x16, 0x12, 0xe7, 0xc9, 0x1f, 0x9c, 0x98, 0x67, - 0x2c, 0xe7, 0x6a, 0x22, 0xbc, 0x86, 0x79, 0xb8, 0x54, 0x24, 0x41, 0x88, 0x80, 0x39, 0xcc, 0xad, - 0x31, 0xde, 0x89, 0x45, 0xc6, 0xf3, 0x8b, 0x47, 0x94, 0xd3, 0xdf, 0xe2, 0xc1, 0x52, 0x95, 0x1b, - 0x15, 0x0b, 0xeb, 0xd7, 0xef, 0x31, 0x65, 0x3f, 0x63, 0xca, 0x7e, 0xc7, 0x94, 0xc1, 0x8d, 0xb1, - 0x8d, 0x1c, 0x34, 0x91, 0x93, 0x03, 0x75, 0x7a, 0xd2, 0xd6, 0xf0, 0x42, 0x9d, 0x8e, 0x6e, 0xc9, - 0xde, 0x1e, 0x9a, 0xd6, 0xbf, 0x1f, 0xb6, 0xb2, 0x32, 0xbb, 0x22, 0x74, 0x8f, 0xd3, 0xe8, 0x82, - 0xfa, 0xd6, 0x15, 0x8d, 0xed, 0xab, 0x22, 0x5a, 0xdb, 0x24, 0x0c, 0xf5, 0xf4, 0x17, 0x00, 0x00, - 0xff, 0xff, 0x27, 0xce, 0xc6, 0x25, 0x59, 0x01, 0x00, 0x00, + proto.RegisterType((*Errors)(nil), "errors.v1.Errors") + proto.RegisterType((*Errors_RPC)(nil), "errors.v1.Errors.RPC") +} + +func init() { proto.RegisterFile("apis/proto/v1/errors/errors.proto", fileDescriptor_3da6f8f1f37746ee) } + +var fileDescriptor_3da6f8f1f37746ee = []byte{ + // 271 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x90, 0xc1, 0x4a, 0xec, 0x30, + 0x14, 0x86, 0xc9, 0xcd, 0xb4, 0x73, 0x7b, 0xdc, 0x48, 0x50, 0x09, 0x83, 0x94, 0xea, 0xaa, 0x22, + 0x24, 0x8c, 0xbe, 0xc1, 0x0c, 0xee, 0x4b, 0x16, 0x2e, 0xdc, 0x65, 0xda, 0x52, 0x0b, 0x9d, 0x49, + 0x49, 0x32, 0x05, 0xdf, 0xcd, 0xbd, 0x2e, 0x7d, 0x04, 0xe9, 0x93, 0x48, 0x4f, 0xea, 0xb8, 0xca, + 0xf9, 0x0e, 0xf9, 0x0e, 0x3f, 0x3f, 0xdc, 0xe8, 0xbe, 0x75, 0xb2, 0xb7, 0xc6, 0x1b, 0x39, 0xac, + 0x65, 0x6d, 0xad, 0xb1, 0x6e, 0x7e, 0x04, 0xae, 0x59, 0x32, 0xd3, 0xb0, 0xbe, 0xfd, 0x20, 0x10, + 0x3f, 0x21, 0xad, 0xde, 0x09, 0x50, 0x55, 0x6c, 0x19, 0x83, 0x85, 0x7f, 0xeb, 0x6b, 0x4e, 0x32, + 0x92, 0x27, 0x0a, 0x67, 0x76, 0x0e, 0x74, 0xef, 0x1a, 0xfe, 0x0f, 0x57, 0xd3, 0xc8, 0x38, 0x2c, + 0xab, 0xda, 0xeb, 0xb6, 0x73, 0x9c, 0x66, 0x34, 0x4f, 0xd4, 0x2f, 0xb2, 0x15, 0xfc, 0x6f, 0x0f, + 0xce, 0xeb, 0x43, 0x59, 0xf3, 0x05, 0x0a, 0x27, 0x66, 0x57, 0x10, 0x3b, 0xaf, 0xfd, 0xd1, 0xf1, + 0x28, 0x23, 0x39, 0x55, 0x33, 0xb1, 0x0b, 0x88, 0x30, 0x13, 0x8f, 0x51, 0x08, 0xc0, 0xee, 0x21, + 0xb2, 0xc6, 0x78, 0xc7, 0x97, 0x19, 0xcd, 0xcf, 0x1e, 0x2e, 0xc5, 0x29, 0xb7, 0x08, 0x99, 0x85, + 0x2a, 0xb6, 0x2a, 0xfc, 0xd9, 0x94, 0x9f, 0x63, 0x4a, 0xbe, 0xc6, 0x94, 0x7c, 0x8f, 0x29, 0x81, + 0x6b, 0x63, 0x1b, 0x31, 0x54, 0x5a, 0x3b, 0x31, 0xe8, 0xae, 0x12, 0xba, 0x6f, 0x27, 0x2d, 0x1c, + 0xd8, 0xc0, 0xb3, 0xee, 0xaa, 0x70, 0xa2, 0x20, 0x2f, 0x77, 0x4d, 0xeb, 0x5f, 0x8f, 0x3b, 0x51, + 0x9a, 0xbd, 0x44, 0x45, 0x4e, 0x8a, 0xc4, 0x1e, 0x1b, 0xdb, 0x97, 0x7f, 0x35, 0xee, 0x62, 0x2c, + 0xf0, 0xf1, 0x27, 0x00, 0x00, 0xff, 0xff, 0x6c, 0xfd, 0x0b, 0x3c, 0x65, 0x01, 0x00, 0x00, } func (m *Errors) Marshal() (dAtA []byte, err error) { diff --git a/apis/grpc/v1/filter/egress/egress_filter.pb.go b/apis/grpc/v1/filter/egress/egress_filter.pb.go new file mode 100644 index 0000000000..287c93b9a2 --- /dev/null +++ b/apis/grpc/v1/filter/egress/egress_filter.pb.go @@ -0,0 +1,214 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package egress + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("apis/proto/v1/filter/egress/egress_filter.proto", fileDescriptor_7f3e67472eb32d70) +} + +var fileDescriptor_7f3e67472eb32d70 = []byte{ + // 251 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0x2c, 0xc8, 0x2c, + 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x33, 0xd4, 0x4f, 0xcb, 0xcc, 0x29, 0x49, 0x2d, + 0xd2, 0x4f, 0x4d, 0x2f, 0x4a, 0x2d, 0x2e, 0x86, 0x52, 0xf1, 0x10, 0x41, 0x3d, 0xb0, 0x22, 0x21, + 0x01, 0x28, 0x0f, 0x22, 0xa7, 0x57, 0x66, 0x28, 0xa5, 0x8c, 0x6a, 0x44, 0x41, 0x62, 0x65, 0x4e, + 0x7e, 0x62, 0x0a, 0x8c, 0x86, 0x68, 0x93, 0x92, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x05, 0x59, + 0xa7, 0x9f, 0x98, 0x97, 0x97, 0x5f, 0x92, 0x58, 0x92, 0x99, 0x9f, 0x57, 0x0c, 0x91, 0x35, 0x5a, + 0xc2, 0xc8, 0xc5, 0xe3, 0x0a, 0x36, 0xd0, 0x0d, 0x6c, 0xba, 0x90, 0x0b, 0x17, 0x1b, 0x94, 0x25, + 0xad, 0x07, 0x33, 0xa8, 0xcc, 0x50, 0xcf, 0x3f, 0x29, 0x2b, 0x35, 0xb9, 0x44, 0xcf, 0x25, 0xb3, + 0xb8, 0x24, 0x31, 0x2f, 0x39, 0x55, 0x0a, 0x9f, 0xa4, 0x12, 0x83, 0x90, 0x1f, 0x17, 0x4f, 0x70, + 0x49, 0x51, 0x6a, 0x62, 0x2e, 0xe5, 0x66, 0x69, 0x30, 0x1a, 0x30, 0x3a, 0x95, 0x9f, 0x78, 0x24, + 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x5c, 0xca, 0xf9, 0x45, 0xe9, 0x7a, + 0x65, 0x29, 0x89, 0x89, 0xc5, 0x7a, 0x65, 0x89, 0x39, 0x29, 0x7a, 0x89, 0x05, 0x99, 0x20, 0xad, + 0x28, 0xe1, 0xe3, 0x24, 0x10, 0x96, 0x98, 0x93, 0x82, 0xec, 0xb5, 0x00, 0xc6, 0x28, 0x83, 0xf4, + 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0x7e, 0x7d, 0x90, 0x7e, 0x48, + 0x4c, 0xa4, 0x17, 0x15, 0x24, 0x63, 0x44, 0x44, 0x12, 0x1b, 0x38, 0x98, 0x8c, 0x01, 0x01, 0x00, + 0x00, 0xff, 0xff, 0x92, 0x05, 0x02, 0x97, 0xae, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// EgressFilterClient is the client API for EgressFilter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type EgressFilterClient interface { + Filter(ctx context.Context, in *payload.Object_Distance, opts ...grpc.CallOption) (*payload.Object_Distance, error) + StreamFilter(ctx context.Context, opts ...grpc.CallOption) (EgressFilter_StreamFilterClient, error) +} + +type egressFilterClient struct { + cc *grpc.ClientConn +} + +func NewEgressFilterClient(cc *grpc.ClientConn) EgressFilterClient { + return &egressFilterClient{cc} +} + +func (c *egressFilterClient) Filter(ctx context.Context, in *payload.Object_Distance, opts ...grpc.CallOption) (*payload.Object_Distance, error) { + out := new(payload.Object_Distance) + err := c.cc.Invoke(ctx, "/filter.egress.v1.EgressFilter/Filter", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *egressFilterClient) StreamFilter(ctx context.Context, opts ...grpc.CallOption) (EgressFilter_StreamFilterClient, error) { + stream, err := c.cc.NewStream(ctx, &_EgressFilter_serviceDesc.Streams[0], "/filter.egress.v1.EgressFilter/StreamFilter", opts...) + if err != nil { + return nil, err + } + x := &egressFilterStreamFilterClient{stream} + return x, nil +} + +type EgressFilter_StreamFilterClient interface { + Send(*payload.Object_Distance) error + Recv() (*payload.Object_Distance, error) + grpc.ClientStream +} + +type egressFilterStreamFilterClient struct { + grpc.ClientStream +} + +func (x *egressFilterStreamFilterClient) Send(m *payload.Object_Distance) error { + return x.ClientStream.SendMsg(m) +} + +func (x *egressFilterStreamFilterClient) Recv() (*payload.Object_Distance, error) { + m := new(payload.Object_Distance) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// EgressFilterServer is the server API for EgressFilter service. +type EgressFilterServer interface { + Filter(context.Context, *payload.Object_Distance) (*payload.Object_Distance, error) + StreamFilter(EgressFilter_StreamFilterServer) error +} + +// UnimplementedEgressFilterServer can be embedded to have forward compatible implementations. +type UnimplementedEgressFilterServer struct { +} + +func (*UnimplementedEgressFilterServer) Filter(ctx context.Context, req *payload.Object_Distance) (*payload.Object_Distance, error) { + return nil, status.Errorf(codes.Unimplemented, "method Filter not implemented") +} +func (*UnimplementedEgressFilterServer) StreamFilter(srv EgressFilter_StreamFilterServer) error { + return status.Errorf(codes.Unimplemented, "method StreamFilter not implemented") +} + +func RegisterEgressFilterServer(s *grpc.Server, srv EgressFilterServer) { + s.RegisterService(&_EgressFilter_serviceDesc, srv) +} + +func _EgressFilter_Filter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Distance) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EgressFilterServer).Filter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filter.egress.v1.EgressFilter/Filter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EgressFilterServer).Filter(ctx, req.(*payload.Object_Distance)) + } + return interceptor(ctx, in, info, handler) +} + +func _EgressFilter_StreamFilter_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(EgressFilterServer).StreamFilter(&egressFilterStreamFilterServer{stream}) +} + +type EgressFilter_StreamFilterServer interface { + Send(*payload.Object_Distance) error + Recv() (*payload.Object_Distance, error) + grpc.ServerStream +} + +type egressFilterStreamFilterServer struct { + grpc.ServerStream +} + +func (x *egressFilterStreamFilterServer) Send(m *payload.Object_Distance) error { + return x.ServerStream.SendMsg(m) +} + +func (x *egressFilterStreamFilterServer) Recv() (*payload.Object_Distance, error) { + m := new(payload.Object_Distance) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _EgressFilter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "filter.egress.v1.EgressFilter", + HandlerType: (*EgressFilterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Filter", + Handler: _EgressFilter_Filter_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamFilter", + Handler: _EgressFilter_StreamFilter_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "apis/proto/v1/filter/egress/egress_filter.proto", +} diff --git a/apis/grpc/v1/filter/ingress/ingress_filter.pb.go b/apis/grpc/v1/filter/ingress/ingress_filter.pb.go new file mode 100644 index 0000000000..cf7e522001 --- /dev/null +++ b/apis/grpc/v1/filter/ingress/ingress_filter.pb.go @@ -0,0 +1,322 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ingress + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("apis/proto/v1/filter/ingress/ingress_filter.proto", fileDescriptor_8b82e91ce4fe335b) +} + +var fileDescriptor_8b82e91ce4fe335b = []byte{ + // 305 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0x31, 0x4b, 0x43, 0x31, + 0x14, 0x85, 0x4d, 0x07, 0xc5, 0x50, 0x91, 0x66, 0x11, 0x8b, 0x74, 0xa8, 0x0e, 0xe2, 0x90, 0xf8, + 0x74, 0x73, 0xec, 0xa0, 0x88, 0x83, 0x85, 0x42, 0x11, 0x17, 0xb9, 0xef, 0xbd, 0x18, 0x23, 0xe9, + 0xbb, 0x21, 0x2f, 0x06, 0x74, 0xf4, 0x2f, 0xf8, 0xa7, 0x1c, 0x05, 0x67, 0x41, 0x8a, 0x3f, 0x44, + 0x9a, 0xb4, 0xc3, 0x43, 0x05, 0x41, 0xa7, 0x4b, 0xee, 0xc9, 0xf9, 0x38, 0x70, 0x2e, 0xcd, 0xc0, + 0xea, 0x5a, 0x58, 0x87, 0x1e, 0x45, 0xc8, 0xc4, 0xb5, 0x36, 0x5e, 0x3a, 0xa1, 0x2b, 0xe5, 0x64, + 0x5d, 0x2f, 0xe6, 0x55, 0x5a, 0xf3, 0xf8, 0x8d, 0x75, 0xe6, 0xaf, 0xb9, 0xc8, 0x43, 0xd6, 0xdd, + 0x6e, 0x52, 0x2c, 0xdc, 0x1b, 0x84, 0x72, 0x31, 0x93, 0xaf, 0xbb, 0xa5, 0x10, 0x95, 0x91, 0x02, + 0xac, 0x16, 0x50, 0x55, 0xe8, 0xc1, 0x6b, 0xac, 0xea, 0xa4, 0x1e, 0xbc, 0xb5, 0xe8, 0xda, 0x69, + 0x22, 0x1e, 0x47, 0x3e, 0x1b, 0xd1, 0xd5, 0x13, 0x59, 0x8d, 0x65, 0xe1, 0xd1, 0xb1, 0x0d, 0xbe, + 0x80, 0x85, 0x8c, 0x9f, 0xe7, 0xb7, 0xb2, 0xf0, 0x7c, 0x60, 0x30, 0xef, 0x6e, 0x7e, 0x23, 0x24, + 0x4f, 0x9f, 0x3d, 0xbe, 0x7e, 0x3c, 0xb5, 0xda, 0xfd, 0x15, 0x81, 0x71, 0x7f, 0x44, 0xf6, 0xd8, + 0x19, 0x5d, 0x1f, 0x79, 0x27, 0x61, 0xf2, 0x37, 0xf4, 0xd2, 0x2e, 0xd9, 0x27, 0xec, 0x82, 0xb6, + 0x53, 0xd6, 0x39, 0xe9, 0x67, 0xc3, 0xef, 0x62, 0x86, 0xb8, 0x98, 0xc5, 0x1c, 0x52, 0x96, 0x62, + 0xfe, 0x03, 0x3f, 0x66, 0x1d, 0x3c, 0x3c, 0x4f, 0x7b, 0xe4, 0x65, 0xda, 0x23, 0xef, 0xd3, 0x1e, + 0xa1, 0x3b, 0xe8, 0x14, 0x0f, 0x25, 0x40, 0xcd, 0x03, 0x98, 0x92, 0x83, 0xd5, 0x33, 0x63, 0xb3, + 0xd9, 0x41, 0x67, 0x0c, 0xa6, 0x6c, 0x94, 0x32, 0x24, 0x97, 0x99, 0xd2, 0xfe, 0xe6, 0x2e, 0xe7, + 0x05, 0x4e, 0x44, 0x24, 0x88, 0x19, 0x41, 0xc4, 0x0b, 0x50, 0xce, 0x16, 0x5f, 0xcf, 0x28, 0x5f, + 0x8e, 0x15, 0x1f, 0x7e, 0x06, 0x00, 0x00, 0xff, 0xff, 0x06, 0x3e, 0x7c, 0x2a, 0x6d, 0x02, 0x00, + 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// IngressFilterClient is the client API for IngressFilter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IngressFilterClient interface { + GenVector(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Vector, error) + StreamGenVector(ctx context.Context, opts ...grpc.CallOption) (IngressFilter_StreamGenVectorClient, error) + FilterVector(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Object_Vector, error) + StreamFilterVector(ctx context.Context, opts ...grpc.CallOption) (IngressFilter_StreamFilterVectorClient, error) +} + +type ingressFilterClient struct { + cc *grpc.ClientConn +} + +func NewIngressFilterClient(cc *grpc.ClientConn) IngressFilterClient { + return &ingressFilterClient{cc} +} + +func (c *ingressFilterClient) GenVector(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Vector, error) { + out := new(payload.Object_Vector) + err := c.cc.Invoke(ctx, "/filter.ingress.v1.IngressFilter/GenVector", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingressFilterClient) StreamGenVector(ctx context.Context, opts ...grpc.CallOption) (IngressFilter_StreamGenVectorClient, error) { + stream, err := c.cc.NewStream(ctx, &_IngressFilter_serviceDesc.Streams[0], "/filter.ingress.v1.IngressFilter/StreamGenVector", opts...) + if err != nil { + return nil, err + } + x := &ingressFilterStreamGenVectorClient{stream} + return x, nil +} + +type IngressFilter_StreamGenVectorClient interface { + Send(*payload.Object_Blob) error + Recv() (*payload.Object_Vector, error) + grpc.ClientStream +} + +type ingressFilterStreamGenVectorClient struct { + grpc.ClientStream +} + +func (x *ingressFilterStreamGenVectorClient) Send(m *payload.Object_Blob) error { + return x.ClientStream.SendMsg(m) +} + +func (x *ingressFilterStreamGenVectorClient) Recv() (*payload.Object_Vector, error) { + m := new(payload.Object_Vector) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *ingressFilterClient) FilterVector(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Object_Vector, error) { + out := new(payload.Object_Vector) + err := c.cc.Invoke(ctx, "/filter.ingress.v1.IngressFilter/FilterVector", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ingressFilterClient) StreamFilterVector(ctx context.Context, opts ...grpc.CallOption) (IngressFilter_StreamFilterVectorClient, error) { + stream, err := c.cc.NewStream(ctx, &_IngressFilter_serviceDesc.Streams[1], "/filter.ingress.v1.IngressFilter/StreamFilterVector", opts...) + if err != nil { + return nil, err + } + x := &ingressFilterStreamFilterVectorClient{stream} + return x, nil +} + +type IngressFilter_StreamFilterVectorClient interface { + Send(*payload.Object_Vector) error + Recv() (*payload.Object_Vector, error) + grpc.ClientStream +} + +type ingressFilterStreamFilterVectorClient struct { + grpc.ClientStream +} + +func (x *ingressFilterStreamFilterVectorClient) Send(m *payload.Object_Vector) error { + return x.ClientStream.SendMsg(m) +} + +func (x *ingressFilterStreamFilterVectorClient) Recv() (*payload.Object_Vector, error) { + m := new(payload.Object_Vector) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// IngressFilterServer is the server API for IngressFilter service. +type IngressFilterServer interface { + GenVector(context.Context, *payload.Object_Blob) (*payload.Object_Vector, error) + StreamGenVector(IngressFilter_StreamGenVectorServer) error + FilterVector(context.Context, *payload.Object_Vector) (*payload.Object_Vector, error) + StreamFilterVector(IngressFilter_StreamFilterVectorServer) error +} + +// UnimplementedIngressFilterServer can be embedded to have forward compatible implementations. +type UnimplementedIngressFilterServer struct { +} + +func (*UnimplementedIngressFilterServer) GenVector(ctx context.Context, req *payload.Object_Blob) (*payload.Object_Vector, error) { + return nil, status.Errorf(codes.Unimplemented, "method GenVector not implemented") +} +func (*UnimplementedIngressFilterServer) StreamGenVector(srv IngressFilter_StreamGenVectorServer) error { + return status.Errorf(codes.Unimplemented, "method StreamGenVector not implemented") +} +func (*UnimplementedIngressFilterServer) FilterVector(ctx context.Context, req *payload.Object_Vector) (*payload.Object_Vector, error) { + return nil, status.Errorf(codes.Unimplemented, "method FilterVector not implemented") +} +func (*UnimplementedIngressFilterServer) StreamFilterVector(srv IngressFilter_StreamFilterVectorServer) error { + return status.Errorf(codes.Unimplemented, "method StreamFilterVector not implemented") +} + +func RegisterIngressFilterServer(s *grpc.Server, srv IngressFilterServer) { + s.RegisterService(&_IngressFilter_serviceDesc, srv) +} + +func _IngressFilter_GenVector_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Blob) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngressFilterServer).GenVector(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filter.ingress.v1.IngressFilter/GenVector", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngressFilterServer).GenVector(ctx, req.(*payload.Object_Blob)) + } + return interceptor(ctx, in, info, handler) +} + +func _IngressFilter_StreamGenVector_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IngressFilterServer).StreamGenVector(&ingressFilterStreamGenVectorServer{stream}) +} + +type IngressFilter_StreamGenVectorServer interface { + Send(*payload.Object_Vector) error + Recv() (*payload.Object_Blob, error) + grpc.ServerStream +} + +type ingressFilterStreamGenVectorServer struct { + grpc.ServerStream +} + +func (x *ingressFilterStreamGenVectorServer) Send(m *payload.Object_Vector) error { + return x.ServerStream.SendMsg(m) +} + +func (x *ingressFilterStreamGenVectorServer) Recv() (*payload.Object_Blob, error) { + m := new(payload.Object_Blob) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _IngressFilter_FilterVector_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Vector) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IngressFilterServer).FilterVector(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filter.ingress.v1.IngressFilter/FilterVector", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IngressFilterServer).FilterVector(ctx, req.(*payload.Object_Vector)) + } + return interceptor(ctx, in, info, handler) +} + +func _IngressFilter_StreamFilterVector_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IngressFilterServer).StreamFilterVector(&ingressFilterStreamFilterVectorServer{stream}) +} + +type IngressFilter_StreamFilterVectorServer interface { + Send(*payload.Object_Vector) error + Recv() (*payload.Object_Vector, error) + grpc.ServerStream +} + +type ingressFilterStreamFilterVectorServer struct { + grpc.ServerStream +} + +func (x *ingressFilterStreamFilterVectorServer) Send(m *payload.Object_Vector) error { + return x.ServerStream.SendMsg(m) +} + +func (x *ingressFilterStreamFilterVectorServer) Recv() (*payload.Object_Vector, error) { + m := new(payload.Object_Vector) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _IngressFilter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "filter.ingress.v1.IngressFilter", + HandlerType: (*IngressFilterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GenVector", + Handler: _IngressFilter_GenVector_Handler, + }, + { + MethodName: "FilterVector", + Handler: _IngressFilter_FilterVector_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamGenVector", + Handler: _IngressFilter_StreamGenVector_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamFilterVector", + Handler: _IngressFilter_StreamFilterVector_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "apis/proto/v1/filter/ingress/ingress_filter.proto", +} diff --git a/apis/grpc/v1/gateway/vald/vald.pb.go b/apis/grpc/v1/gateway/vald/vald.pb.go new file mode 100644 index 0000000000..3373d14510 --- /dev/null +++ b/apis/grpc/v1/gateway/vald/vald.pb.go @@ -0,0 +1,1034 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package vald + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("apis/proto/v1/gateway/vald/vald.proto", fileDescriptor_afca6feed49a0850) +} + +var fileDescriptor_afca6feed49a0850 = []byte{ + // 506 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x95, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0x80, 0x37, 0x22, 0x29, 0x8e, 0xc1, 0x95, 0xc1, 0x15, 0x8c, 0xa5, 0x87, 0x88, 0x20, 0x7b, + 0xc8, 0x58, 0xbd, 0x79, 0xb3, 0x74, 0xd1, 0x40, 0x17, 0xa5, 0x75, 0xf7, 0xe0, 0x41, 0x98, 0x26, + 0x43, 0x76, 0x24, 0xcd, 0x8c, 0x99, 0x49, 0xb4, 0x88, 0x07, 0xfd, 0x0b, 0xfe, 0x29, 0x8f, 0x82, + 0x7f, 0x40, 0x8a, 0x3f, 0x44, 0xf2, 0x66, 0x0a, 0xed, 0x9a, 0xae, 0x0b, 0xb3, 0x97, 0xb6, 0xbc, + 0x37, 0xef, 0xe3, 0x7d, 0x7d, 0xf0, 0x1e, 0x7a, 0x48, 0x25, 0x57, 0x44, 0x56, 0x42, 0x0b, 0xd2, + 0x0c, 0x49, 0x4e, 0x35, 0xfb, 0x48, 0x97, 0xa4, 0xa1, 0x45, 0x06, 0x1f, 0x31, 0xa4, 0x70, 0x0f, + 0x7e, 0x37, 0xc3, 0xf0, 0xc1, 0xf6, 0x7b, 0x49, 0x97, 0x85, 0xa0, 0xd9, 0xfa, 0xdb, 0xbc, 0x0e, + 0xfb, 0xb9, 0x10, 0x79, 0xc1, 0x08, 0x95, 0x9c, 0xd0, 0xb2, 0x14, 0x9a, 0x6a, 0x2e, 0x4a, 0x65, + 0xb2, 0x4f, 0xbe, 0x06, 0xe8, 0xfa, 0x29, 0x2d, 0x32, 0x3c, 0x41, 0xfe, 0xd1, 0x27, 0xae, 0xb4, + 0xc2, 0x07, 0xf1, 0x1a, 0xd0, 0x0c, 0xe3, 0x57, 0xf3, 0xf7, 0x2c, 0xd5, 0x71, 0x32, 0x0e, 0xbb, + 0xc3, 0xd1, 0x9d, 0x6f, 0xbf, 0xfe, 0x7c, 0xbf, 0x76, 0x0b, 0x07, 0x84, 0x41, 0x39, 0xf9, 0xcc, + 0xb3, 0x2f, 0xf8, 0x04, 0xf9, 0x33, 0x46, 0xab, 0xf4, 0x0c, 0x87, 0x9b, 0x65, 0x26, 0x16, 0x4f, + 0xd9, 0x87, 0x9a, 0x29, 0x1d, 0xde, 0xef, 0xcc, 0x29, 0x29, 0x4a, 0xc5, 0x22, 0x0c, 0xe0, 0x20, + 0xea, 0x11, 0x05, 0x99, 0x67, 0xde, 0x21, 0x7e, 0x87, 0x90, 0x79, 0x36, 0x5a, 0x26, 0x63, 0xdc, + 0xef, 0x28, 0x4f, 0xc6, 0x97, 0x82, 0x1f, 0x00, 0x7c, 0x3f, 0x42, 0x16, 0x4e, 0x78, 0xd6, 0xf2, + 0x8f, 0x51, 0x30, 0xd3, 0x15, 0xa3, 0x0b, 0xd7, 0xe6, 0xf7, 0x1e, 0x79, 0x8f, 0x3d, 0x3c, 0x43, + 0xb7, 0x37, 0x71, 0xae, 0x4d, 0x1b, 0xe8, 0x1b, 0xe4, 0x27, 0xa5, 0x62, 0x95, 0xc6, 0xf7, 0x3a, + 0x26, 0x72, 0xca, 0x52, 0x2d, 0xaa, 0x6d, 0x8e, 0x4d, 0x4d, 0x44, 0x0a, 0xa3, 0xdf, 0xf8, 0x67, + 0x39, 0x80, 0x5a, 0xf3, 0xc9, 0xda, 0xdc, 0x91, 0x6d, 0x7a, 0x4c, 0xd0, 0xcd, 0xe3, 0xba, 0xd0, + 0xdc, 0xc2, 0xc2, 0x9d, 0x30, 0x15, 0xf6, 0x2f, 0xa0, 0xa9, 0x68, 0xaf, 0xd5, 0x3d, 0x91, 0x19, + 0xd5, 0xec, 0x0a, 0x74, 0x6b, 0x00, 0x6d, 0xe9, 0x3a, 0xb2, 0xb7, 0x75, 0x2d, 0xcc, 0x59, 0xf7, + 0x8a, 0xa6, 0x5b, 0xcb, 0x7f, 0xa6, 0xeb, 0xc8, 0x3e, 0xaf, 0xeb, 0x3c, 0xdd, 0x29, 0xf2, 0xa7, + 0x6c, 0x21, 0x1a, 0xb6, 0x6b, 0xeb, 0x5c, 0xd8, 0x8e, 0xdd, 0x3d, 0x87, 0x01, 0xa9, 0x00, 0x62, + 0x76, 0xcf, 0xcb, 0xb5, 0xac, 0x03, 0xd9, 0x88, 0x1e, 0x59, 0x51, 0x0b, 0xba, 0xdb, 0x09, 0xfa, + 0xbf, 0xe4, 0x0c, 0xdd, 0x78, 0xc1, 0xb4, 0x49, 0xec, 0xea, 0x66, 0xf7, 0x44, 0x36, 0x36, 0xac, + 0x80, 0xb8, 0xb1, 0x4c, 0xd0, 0xbe, 0xb1, 0x74, 0x41, 0x83, 0xe6, 0xa8, 0xf8, 0xb1, 0x1a, 0x78, + 0x3f, 0x57, 0x03, 0xef, 0xf7, 0x6a, 0xe0, 0xa1, 0x48, 0x54, 0x79, 0xdc, 0x64, 0x94, 0xaa, 0x18, + 0xee, 0x0c, 0x95, 0xbc, 0x2d, 0xb4, 0xc7, 0x08, 0x62, 0xa3, 0x5e, 0x7b, 0x32, 0x9e, 0x4b, 0xfe, + 0xda, 0x7b, 0x4b, 0x72, 0xae, 0xcf, 0xea, 0x79, 0x9c, 0x8a, 0x05, 0x81, 0x2a, 0x73, 0xae, 0xe0, + 0x32, 0xe5, 0x95, 0x4c, 0xcf, 0x1f, 0xb2, 0xb9, 0x0f, 0x87, 0xe7, 0xe9, 0xdf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x07, 0x91, 0xe9, 0x1f, 0xed, 0x06, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ValdClient is the client API for Vald service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ValdClient interface { + Exists(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_ID, error) + Search(ctx context.Context, in *payload.Search_Request, opts ...grpc.CallOption) (*payload.Search_Response, error) + SearchByID(ctx context.Context, in *payload.Search_IDRequest, opts ...grpc.CallOption) (*payload.Search_Response, error) + StreamSearch(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamSearchClient, error) + StreamSearchByID(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamSearchByIDClient, error) + Insert(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Object_Location, error) + StreamInsert(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamInsertClient, error) + MultiInsert(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Object_Locations, error) + Update(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Object_Location, error) + StreamUpdate(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamUpdateClient, error) + MultiUpdate(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Object_Locations, error) + Upsert(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Object_Location, error) + StreamUpsert(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamUpsertClient, error) + MultiUpsert(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Object_Locations, error) + Remove(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_Location, error) + StreamRemove(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamRemoveClient, error) + MultiRemove(ctx context.Context, in *payload.Object_IDs, opts ...grpc.CallOption) (*payload.Object_Locations, error) + GetObject(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_Vector, error) + StreamGetObject(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamGetObjectClient, error) +} + +type valdClient struct { + cc *grpc.ClientConn +} + +func NewValdClient(cc *grpc.ClientConn) ValdClient { + return &valdClient{cc} +} + +func (c *valdClient) Exists(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_ID, error) { + out := new(payload.Object_ID) + err := c.cc.Invoke(ctx, "/vald.v1.Vald/Exists", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *valdClient) Search(ctx context.Context, in *payload.Search_Request, opts ...grpc.CallOption) (*payload.Search_Response, error) { + out := new(payload.Search_Response) + err := c.cc.Invoke(ctx, "/vald.v1.Vald/Search", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *valdClient) SearchByID(ctx context.Context, in *payload.Search_IDRequest, opts ...grpc.CallOption) (*payload.Search_Response, error) { + out := new(payload.Search_Response) + err := c.cc.Invoke(ctx, "/vald.v1.Vald/SearchByID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *valdClient) StreamSearch(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamSearchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Vald_serviceDesc.Streams[0], "/vald.v1.Vald/StreamSearch", opts...) + if err != nil { + return nil, err + } + x := &valdStreamSearchClient{stream} + return x, nil +} + +type Vald_StreamSearchClient interface { + Send(*payload.Search_Request) error + Recv() (*payload.Search_Response, error) + grpc.ClientStream +} + +type valdStreamSearchClient struct { + grpc.ClientStream +} + +func (x *valdStreamSearchClient) Send(m *payload.Search_Request) error { + return x.ClientStream.SendMsg(m) +} + +func (x *valdStreamSearchClient) Recv() (*payload.Search_Response, error) { + m := new(payload.Search_Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *valdClient) StreamSearchByID(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamSearchByIDClient, error) { + stream, err := c.cc.NewStream(ctx, &_Vald_serviceDesc.Streams[1], "/vald.v1.Vald/StreamSearchByID", opts...) + if err != nil { + return nil, err + } + x := &valdStreamSearchByIDClient{stream} + return x, nil +} + +type Vald_StreamSearchByIDClient interface { + Send(*payload.Search_IDRequest) error + Recv() (*payload.Search_Response, error) + grpc.ClientStream +} + +type valdStreamSearchByIDClient struct { + grpc.ClientStream +} + +func (x *valdStreamSearchByIDClient) Send(m *payload.Search_IDRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *valdStreamSearchByIDClient) Recv() (*payload.Search_Response, error) { + m := new(payload.Search_Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *valdClient) Insert(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/vald.v1.Vald/Insert", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *valdClient) StreamInsert(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamInsertClient, error) { + stream, err := c.cc.NewStream(ctx, &_Vald_serviceDesc.Streams[2], "/vald.v1.Vald/StreamInsert", opts...) + if err != nil { + return nil, err + } + x := &valdStreamInsertClient{stream} + return x, nil +} + +type Vald_StreamInsertClient interface { + Send(*payload.Object_Vector) error + Recv() (*payload.Object_Location, error) + grpc.ClientStream +} + +type valdStreamInsertClient struct { + grpc.ClientStream +} + +func (x *valdStreamInsertClient) Send(m *payload.Object_Vector) error { + return x.ClientStream.SendMsg(m) +} + +func (x *valdStreamInsertClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *valdClient) MultiInsert(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) + err := c.cc.Invoke(ctx, "/vald.v1.Vald/MultiInsert", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *valdClient) Update(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/vald.v1.Vald/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *valdClient) StreamUpdate(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamUpdateClient, error) { + stream, err := c.cc.NewStream(ctx, &_Vald_serviceDesc.Streams[3], "/vald.v1.Vald/StreamUpdate", opts...) + if err != nil { + return nil, err + } + x := &valdStreamUpdateClient{stream} + return x, nil +} + +type Vald_StreamUpdateClient interface { + Send(*payload.Object_Vector) error + Recv() (*payload.Object_Location, error) + grpc.ClientStream +} + +type valdStreamUpdateClient struct { + grpc.ClientStream +} + +func (x *valdStreamUpdateClient) Send(m *payload.Object_Vector) error { + return x.ClientStream.SendMsg(m) +} + +func (x *valdStreamUpdateClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *valdClient) MultiUpdate(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) + err := c.cc.Invoke(ctx, "/vald.v1.Vald/MultiUpdate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *valdClient) Upsert(ctx context.Context, in *payload.Object_Vector, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/vald.v1.Vald/Upsert", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *valdClient) StreamUpsert(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamUpsertClient, error) { + stream, err := c.cc.NewStream(ctx, &_Vald_serviceDesc.Streams[4], "/vald.v1.Vald/StreamUpsert", opts...) + if err != nil { + return nil, err + } + x := &valdStreamUpsertClient{stream} + return x, nil +} + +type Vald_StreamUpsertClient interface { + Send(*payload.Object_Vector) error + Recv() (*payload.Object_Location, error) + grpc.ClientStream +} + +type valdStreamUpsertClient struct { + grpc.ClientStream +} + +func (x *valdStreamUpsertClient) Send(m *payload.Object_Vector) error { + return x.ClientStream.SendMsg(m) +} + +func (x *valdStreamUpsertClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *valdClient) MultiUpsert(ctx context.Context, in *payload.Object_Vectors, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) + err := c.cc.Invoke(ctx, "/vald.v1.Vald/MultiUpsert", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *valdClient) Remove(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/vald.v1.Vald/Remove", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *valdClient) StreamRemove(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamRemoveClient, error) { + stream, err := c.cc.NewStream(ctx, &_Vald_serviceDesc.Streams[5], "/vald.v1.Vald/StreamRemove", opts...) + if err != nil { + return nil, err + } + x := &valdStreamRemoveClient{stream} + return x, nil +} + +type Vald_StreamRemoveClient interface { + Send(*payload.Object_ID) error + Recv() (*payload.Object_Location, error) + grpc.ClientStream +} + +type valdStreamRemoveClient struct { + grpc.ClientStream +} + +func (x *valdStreamRemoveClient) Send(m *payload.Object_ID) error { + return x.ClientStream.SendMsg(m) +} + +func (x *valdStreamRemoveClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *valdClient) MultiRemove(ctx context.Context, in *payload.Object_IDs, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) + err := c.cc.Invoke(ctx, "/vald.v1.Vald/MultiRemove", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *valdClient) GetObject(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_Vector, error) { + out := new(payload.Object_Vector) + err := c.cc.Invoke(ctx, "/vald.v1.Vald/GetObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *valdClient) StreamGetObject(ctx context.Context, opts ...grpc.CallOption) (Vald_StreamGetObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Vald_serviceDesc.Streams[6], "/vald.v1.Vald/StreamGetObject", opts...) + if err != nil { + return nil, err + } + x := &valdStreamGetObjectClient{stream} + return x, nil +} + +type Vald_StreamGetObjectClient interface { + Send(*payload.Object_ID) error + Recv() (*payload.Object_Vector, error) + grpc.ClientStream +} + +type valdStreamGetObjectClient struct { + grpc.ClientStream +} + +func (x *valdStreamGetObjectClient) Send(m *payload.Object_ID) error { + return x.ClientStream.SendMsg(m) +} + +func (x *valdStreamGetObjectClient) Recv() (*payload.Object_Vector, error) { + m := new(payload.Object_Vector) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ValdServer is the server API for Vald service. +type ValdServer interface { + Exists(context.Context, *payload.Object_ID) (*payload.Object_ID, error) + Search(context.Context, *payload.Search_Request) (*payload.Search_Response, error) + SearchByID(context.Context, *payload.Search_IDRequest) (*payload.Search_Response, error) + StreamSearch(Vald_StreamSearchServer) error + StreamSearchByID(Vald_StreamSearchByIDServer) error + Insert(context.Context, *payload.Object_Vector) (*payload.Object_Location, error) + StreamInsert(Vald_StreamInsertServer) error + MultiInsert(context.Context, *payload.Object_Vectors) (*payload.Object_Locations, error) + Update(context.Context, *payload.Object_Vector) (*payload.Object_Location, error) + StreamUpdate(Vald_StreamUpdateServer) error + MultiUpdate(context.Context, *payload.Object_Vectors) (*payload.Object_Locations, error) + Upsert(context.Context, *payload.Object_Vector) (*payload.Object_Location, error) + StreamUpsert(Vald_StreamUpsertServer) error + MultiUpsert(context.Context, *payload.Object_Vectors) (*payload.Object_Locations, error) + Remove(context.Context, *payload.Object_ID) (*payload.Object_Location, error) + StreamRemove(Vald_StreamRemoveServer) error + MultiRemove(context.Context, *payload.Object_IDs) (*payload.Object_Locations, error) + GetObject(context.Context, *payload.Object_ID) (*payload.Object_Vector, error) + StreamGetObject(Vald_StreamGetObjectServer) error +} + +// UnimplementedValdServer can be embedded to have forward compatible implementations. +type UnimplementedValdServer struct { +} + +func (*UnimplementedValdServer) Exists(ctx context.Context, req *payload.Object_ID) (*payload.Object_ID, error) { + return nil, status.Errorf(codes.Unimplemented, "method Exists not implemented") +} +func (*UnimplementedValdServer) Search(ctx context.Context, req *payload.Search_Request) (*payload.Search_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") +} +func (*UnimplementedValdServer) SearchByID(ctx context.Context, req *payload.Search_IDRequest) (*payload.Search_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method SearchByID not implemented") +} +func (*UnimplementedValdServer) StreamSearch(srv Vald_StreamSearchServer) error { + return status.Errorf(codes.Unimplemented, "method StreamSearch not implemented") +} +func (*UnimplementedValdServer) StreamSearchByID(srv Vald_StreamSearchByIDServer) error { + return status.Errorf(codes.Unimplemented, "method StreamSearchByID not implemented") +} +func (*UnimplementedValdServer) Insert(ctx context.Context, req *payload.Object_Vector) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method Insert not implemented") +} +func (*UnimplementedValdServer) StreamInsert(srv Vald_StreamInsertServer) error { + return status.Errorf(codes.Unimplemented, "method StreamInsert not implemented") +} +func (*UnimplementedValdServer) MultiInsert(ctx context.Context, req *payload.Object_Vectors) (*payload.Object_Locations, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiInsert not implemented") +} +func (*UnimplementedValdServer) Update(ctx context.Context, req *payload.Object_Vector) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (*UnimplementedValdServer) StreamUpdate(srv Vald_StreamUpdateServer) error { + return status.Errorf(codes.Unimplemented, "method StreamUpdate not implemented") +} +func (*UnimplementedValdServer) MultiUpdate(ctx context.Context, req *payload.Object_Vectors) (*payload.Object_Locations, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiUpdate not implemented") +} +func (*UnimplementedValdServer) Upsert(ctx context.Context, req *payload.Object_Vector) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method Upsert not implemented") +} +func (*UnimplementedValdServer) StreamUpsert(srv Vald_StreamUpsertServer) error { + return status.Errorf(codes.Unimplemented, "method StreamUpsert not implemented") +} +func (*UnimplementedValdServer) MultiUpsert(ctx context.Context, req *payload.Object_Vectors) (*payload.Object_Locations, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiUpsert not implemented") +} +func (*UnimplementedValdServer) Remove(ctx context.Context, req *payload.Object_ID) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method Remove not implemented") +} +func (*UnimplementedValdServer) StreamRemove(srv Vald_StreamRemoveServer) error { + return status.Errorf(codes.Unimplemented, "method StreamRemove not implemented") +} +func (*UnimplementedValdServer) MultiRemove(ctx context.Context, req *payload.Object_IDs) (*payload.Object_Locations, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiRemove not implemented") +} +func (*UnimplementedValdServer) GetObject(ctx context.Context, req *payload.Object_ID) (*payload.Object_Vector, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetObject not implemented") +} +func (*UnimplementedValdServer) StreamGetObject(srv Vald_StreamGetObjectServer) error { + return status.Errorf(codes.Unimplemented, "method StreamGetObject not implemented") +} + +func RegisterValdServer(s *grpc.Server, srv ValdServer) { + s.RegisterService(&_Vald_serviceDesc, srv) +} + +func _Vald_Exists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_ID) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValdServer).Exists(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Vald/Exists", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValdServer).Exists(ctx, req.(*payload.Object_ID)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vald_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Search_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValdServer).Search(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Vald/Search", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValdServer).Search(ctx, req.(*payload.Search_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vald_SearchByID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Search_IDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValdServer).SearchByID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Vald/SearchByID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValdServer).SearchByID(ctx, req.(*payload.Search_IDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vald_StreamSearch_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ValdServer).StreamSearch(&valdStreamSearchServer{stream}) +} + +type Vald_StreamSearchServer interface { + Send(*payload.Search_Response) error + Recv() (*payload.Search_Request, error) + grpc.ServerStream +} + +type valdStreamSearchServer struct { + grpc.ServerStream +} + +func (x *valdStreamSearchServer) Send(m *payload.Search_Response) error { + return x.ServerStream.SendMsg(m) +} + +func (x *valdStreamSearchServer) Recv() (*payload.Search_Request, error) { + m := new(payload.Search_Request) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Vald_StreamSearchByID_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ValdServer).StreamSearchByID(&valdStreamSearchByIDServer{stream}) +} + +type Vald_StreamSearchByIDServer interface { + Send(*payload.Search_Response) error + Recv() (*payload.Search_IDRequest, error) + grpc.ServerStream +} + +type valdStreamSearchByIDServer struct { + grpc.ServerStream +} + +func (x *valdStreamSearchByIDServer) Send(m *payload.Search_Response) error { + return x.ServerStream.SendMsg(m) +} + +func (x *valdStreamSearchByIDServer) Recv() (*payload.Search_IDRequest, error) { + m := new(payload.Search_IDRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Vald_Insert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Vector) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValdServer).Insert(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Vald/Insert", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValdServer).Insert(ctx, req.(*payload.Object_Vector)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vald_StreamInsert_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ValdServer).StreamInsert(&valdStreamInsertServer{stream}) +} + +type Vald_StreamInsertServer interface { + Send(*payload.Object_Location) error + Recv() (*payload.Object_Vector, error) + grpc.ServerStream +} + +type valdStreamInsertServer struct { + grpc.ServerStream +} + +func (x *valdStreamInsertServer) Send(m *payload.Object_Location) error { + return x.ServerStream.SendMsg(m) +} + +func (x *valdStreamInsertServer) Recv() (*payload.Object_Vector, error) { + m := new(payload.Object_Vector) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Vald_MultiInsert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Vectors) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValdServer).MultiInsert(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Vald/MultiInsert", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValdServer).MultiInsert(ctx, req.(*payload.Object_Vectors)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vald_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Vector) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValdServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Vald/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValdServer).Update(ctx, req.(*payload.Object_Vector)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vald_StreamUpdate_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ValdServer).StreamUpdate(&valdStreamUpdateServer{stream}) +} + +type Vald_StreamUpdateServer interface { + Send(*payload.Object_Location) error + Recv() (*payload.Object_Vector, error) + grpc.ServerStream +} + +type valdStreamUpdateServer struct { + grpc.ServerStream +} + +func (x *valdStreamUpdateServer) Send(m *payload.Object_Location) error { + return x.ServerStream.SendMsg(m) +} + +func (x *valdStreamUpdateServer) Recv() (*payload.Object_Vector, error) { + m := new(payload.Object_Vector) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Vald_MultiUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Vectors) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValdServer).MultiUpdate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Vald/MultiUpdate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValdServer).MultiUpdate(ctx, req.(*payload.Object_Vectors)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vald_Upsert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Vector) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValdServer).Upsert(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Vald/Upsert", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValdServer).Upsert(ctx, req.(*payload.Object_Vector)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vald_StreamUpsert_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ValdServer).StreamUpsert(&valdStreamUpsertServer{stream}) +} + +type Vald_StreamUpsertServer interface { + Send(*payload.Object_Location) error + Recv() (*payload.Object_Vector, error) + grpc.ServerStream +} + +type valdStreamUpsertServer struct { + grpc.ServerStream +} + +func (x *valdStreamUpsertServer) Send(m *payload.Object_Location) error { + return x.ServerStream.SendMsg(m) +} + +func (x *valdStreamUpsertServer) Recv() (*payload.Object_Vector, error) { + m := new(payload.Object_Vector) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Vald_MultiUpsert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Vectors) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValdServer).MultiUpsert(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Vald/MultiUpsert", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValdServer).MultiUpsert(ctx, req.(*payload.Object_Vectors)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vald_Remove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_ID) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValdServer).Remove(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Vald/Remove", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValdServer).Remove(ctx, req.(*payload.Object_ID)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vald_StreamRemove_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ValdServer).StreamRemove(&valdStreamRemoveServer{stream}) +} + +type Vald_StreamRemoveServer interface { + Send(*payload.Object_Location) error + Recv() (*payload.Object_ID, error) + grpc.ServerStream +} + +type valdStreamRemoveServer struct { + grpc.ServerStream +} + +func (x *valdStreamRemoveServer) Send(m *payload.Object_Location) error { + return x.ServerStream.SendMsg(m) +} + +func (x *valdStreamRemoveServer) Recv() (*payload.Object_ID, error) { + m := new(payload.Object_ID) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Vald_MultiRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_IDs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValdServer).MultiRemove(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Vald/MultiRemove", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValdServer).MultiRemove(ctx, req.(*payload.Object_IDs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vald_GetObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_ID) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValdServer).GetObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Vald/GetObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValdServer).GetObject(ctx, req.(*payload.Object_ID)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vald_StreamGetObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ValdServer).StreamGetObject(&valdStreamGetObjectServer{stream}) +} + +type Vald_StreamGetObjectServer interface { + Send(*payload.Object_Vector) error + Recv() (*payload.Object_ID, error) + grpc.ServerStream +} + +type valdStreamGetObjectServer struct { + grpc.ServerStream +} + +func (x *valdStreamGetObjectServer) Send(m *payload.Object_Vector) error { + return x.ServerStream.SendMsg(m) +} + +func (x *valdStreamGetObjectServer) Recv() (*payload.Object_ID, error) { + m := new(payload.Object_ID) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Vald_serviceDesc = grpc.ServiceDesc{ + ServiceName: "vald.v1.Vald", + HandlerType: (*ValdServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Exists", + Handler: _Vald_Exists_Handler, + }, + { + MethodName: "Search", + Handler: _Vald_Search_Handler, + }, + { + MethodName: "SearchByID", + Handler: _Vald_SearchByID_Handler, + }, + { + MethodName: "Insert", + Handler: _Vald_Insert_Handler, + }, + { + MethodName: "MultiInsert", + Handler: _Vald_MultiInsert_Handler, + }, + { + MethodName: "Update", + Handler: _Vald_Update_Handler, + }, + { + MethodName: "MultiUpdate", + Handler: _Vald_MultiUpdate_Handler, + }, + { + MethodName: "Upsert", + Handler: _Vald_Upsert_Handler, + }, + { + MethodName: "MultiUpsert", + Handler: _Vald_MultiUpsert_Handler, + }, + { + MethodName: "Remove", + Handler: _Vald_Remove_Handler, + }, + { + MethodName: "MultiRemove", + Handler: _Vald_MultiRemove_Handler, + }, + { + MethodName: "GetObject", + Handler: _Vald_GetObject_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamSearch", + Handler: _Vald_StreamSearch_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamSearchByID", + Handler: _Vald_StreamSearchByID_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamInsert", + Handler: _Vald_StreamInsert_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamUpdate", + Handler: _Vald_StreamUpdate_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamUpsert", + Handler: _Vald_StreamUpsert_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamRemove", + Handler: _Vald_StreamRemove_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamGetObject", + Handler: _Vald_StreamGetObject_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "apis/proto/v1/gateway/vald/vald.proto", +} diff --git a/apis/grpc/manager/backup/backup_manager.pb.go b/apis/grpc/v1/manager/backup/backup_manager.pb.go similarity index 70% rename from apis/grpc/manager/backup/backup_manager.pb.go rename to apis/grpc/v1/manager/backup/backup_manager.pb.go index b6bb876020..22197a1c39 100644 --- a/apis/grpc/manager/backup/backup_manager.pb.go +++ b/apis/grpc/v1/manager/backup/backup_manager.pb.go @@ -21,9 +21,8 @@ import ( fmt "fmt" math "math" - _ "github.com/danielvladco/go-proto-gql/pb" proto "github.com/gogo/protobuf/proto" - payload "github.com/vdaas/vald/apis/grpc/payload" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -41,40 +40,42 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func init() { proto.RegisterFile("backup/backup_manager.proto", fileDescriptor_d3d7e5699810d1ca) } - -var fileDescriptor_d3d7e5699810d1ca = []byte{ - // 479 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x4f, 0x8b, 0xd3, 0x40, - 0x18, 0x87, 0xc9, 0x2a, 0x65, 0x3b, 0xda, 0xec, 0x76, 0x14, 0x71, 0xe3, 0x9a, 0x65, 0x23, 0x7a, - 0xe8, 0x61, 0x06, 0xf4, 0xb6, 0xc7, 0x8a, 0x48, 0xc1, 0x42, 0x29, 0x52, 0xf0, 0x0f, 0xe8, 0x34, - 0x79, 0x1d, 0x83, 0x49, 0x66, 0x36, 0x33, 0x29, 0x2c, 0xe2, 0xc5, 0xb3, 0x37, 0xbf, 0xc8, 0x7e, - 0x0c, 0x8f, 0x82, 0x5f, 0xa0, 0x14, 0x3f, 0x88, 0x24, 0x93, 0x19, 0xb0, 0xd1, 0xa5, 0xa7, 0x90, - 0xf9, 0xbd, 0xf3, 0x3c, 0xef, 0xbc, 0xf0, 0xa2, 0x7b, 0x4b, 0x16, 0x7f, 0xaa, 0x24, 0x35, 0x9f, - 0x77, 0x39, 0x2b, 0x18, 0x87, 0x92, 0xc8, 0x52, 0x68, 0x81, 0xfd, 0xbf, 0x4f, 0x83, 0x81, 0x64, - 0x17, 0x99, 0x60, 0x89, 0x89, 0x83, 0x63, 0x2e, 0x04, 0xcf, 0x80, 0x32, 0x99, 0x52, 0x56, 0x14, - 0x42, 0x33, 0x9d, 0x8a, 0x42, 0xb5, 0xe9, 0x4d, 0xb9, 0xa4, 0xfc, 0x3c, 0x33, 0x7f, 0x8f, 0xbf, - 0xf5, 0x50, 0x6f, 0xdc, 0xd0, 0x70, 0x8e, 0xfa, 0xcf, 0x41, 0x2f, 0x20, 0xd6, 0xa2, 0xc4, 0xa7, - 0xc4, 0x32, 0x4d, 0x4a, 0x5c, 0x44, 0xe6, 0x70, 0x5e, 0x81, 0xd2, 0xc1, 0xc3, 0xed, 0x92, 0xa7, - 0x22, 0x97, 0x25, 0x28, 0x05, 0x09, 0x99, 0x82, 0x66, 0xa6, 0x3c, 0xba, 0xf3, 0xf5, 0xd7, 0xef, - 0xef, 0x7b, 0x87, 0xd8, 0xa7, 0xab, 0xe6, 0x80, 0x7e, 0xae, 0xaa, 0x34, 0xf9, 0x82, 0xdf, 0xa2, - 0xfe, 0x0b, 0x11, 0x9b, 0xd6, 0xba, 0x3a, 0x17, 0x39, 0xdd, 0xd0, 0x95, 0x4c, 0x8a, 0x0f, 0x82, - 0x4c, 0x66, 0x2a, 0x3a, 0x6a, 0xd0, 0xb7, 0xf0, 0x90, 0x66, 0xb6, 0xdc, 0xd2, 0xdf, 0xa0, 0xfd, - 0x39, 0xf0, 0x54, 0x69, 0x28, 0xf1, 0x6e, 0x8d, 0x06, 0xbe, 0x2b, 0x7b, 0x96, 0x4b, 0x7d, 0x11, - 0xdd, 0xbd, 0x5c, 0x9f, 0x78, 0x8d, 0xc1, 0x8f, 0xfa, 0xb4, 0x6c, 0x69, 0x67, 0xde, 0x08, 0x73, - 0x34, 0xb0, 0xf0, 0x69, 0x95, 0xe9, 0x14, 0x3f, 0xda, 0xc9, 0xa0, 0x3a, 0x8a, 0xd0, 0x29, 0x6e, - 0x47, 0x07, 0x4e, 0x41, 0xf3, 0x1a, 0x5a, 0x8b, 0x5e, 0xa2, 0xde, 0x1c, 0x72, 0xb1, 0x02, 0x1c, - 0x6e, 0x1b, 0xcc, 0xb9, 0x9b, 0xce, 0x36, 0x39, 0x70, 0xe4, 0xc3, 0x91, 0x4f, 0x13, 0xc8, 0x40, - 0x83, 0x9d, 0xcd, 0x7b, 0x74, 0xc3, 0xdc, 0x36, 0xcd, 0x3f, 0xb8, 0x1a, 0xdd, 0x14, 0x75, 0xf8, - 0xc7, 0x8e, 0x8f, 0xa3, 0x81, 0xe5, 0xbb, 0xbe, 0x17, 0xb5, 0xc1, 0x3c, 0x66, 0x32, 0x53, 0x5d, - 0xc3, 0x64, 0x46, 0x6c, 0xfe, 0xdf, 0x17, 0x60, 0x67, 0xd8, 0x8f, 0xae, 0xd1, 0x54, 0xd6, 0xdc, - 0x57, 0xa8, 0x6f, 0x9a, 0xab, 0xa9, 0xa7, 0xff, 0xa4, 0x5e, 0x39, 0x95, 0x23, 0xc7, 0x3c, 0x88, - 0x10, 0x4d, 0x65, 0xdb, 0xf8, 0x99, 0x37, 0x0a, 0xae, 0x5f, 0xae, 0x4f, 0xf6, 0xc6, 0xf2, 0xc7, - 0x26, 0xf4, 0x7e, 0x6e, 0x42, 0x6f, 0xbd, 0x09, 0x3d, 0x74, 0x5f, 0x94, 0x9c, 0xac, 0x12, 0xc6, - 0x14, 0x59, 0xb1, 0x2c, 0x21, 0x76, 0x0b, 0xcd, 0xfa, 0x8d, 0x87, 0x0b, 0x96, 0x25, 0xc6, 0x3d, - 0x35, 0xc9, 0xcc, 0x7b, 0x4d, 0x78, 0xaa, 0x3f, 0x56, 0x4b, 0x12, 0x8b, 0x9c, 0x36, 0x57, 0x69, - 0x7d, 0xb5, 0xde, 0x45, 0x45, 0x79, 0x29, 0x63, 0xda, 0x42, 0xda, 0xcd, 0x5e, 0xf6, 0x9a, 0x3d, - 0x7c, 0xf2, 0x27, 0x00, 0x00, 0xff, 0xff, 0xbe, 0x33, 0x57, 0x4d, 0xf1, 0x03, 0x00, 0x00, +func init() { + proto.RegisterFile("apis/proto/v1/manager/backup/backup_manager.proto", fileDescriptor_a861c800442e9f9a) +} + +var fileDescriptor_a861c800442e9f9a = []byte{ + // 474 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xdf, 0x8a, 0xd3, 0x40, + 0x14, 0x87, 0x89, 0x42, 0xd9, 0x8e, 0x6c, 0xd7, 0x8e, 0x55, 0x34, 0x48, 0xc1, 0x58, 0xbd, 0xe8, + 0xc5, 0x1c, 0xa2, 0x77, 0x7b, 0x59, 0x11, 0x29, 0xb8, 0x50, 0x7a, 0x51, 0xfc, 0x73, 0x21, 0xd3, + 0xe4, 0x18, 0x07, 0x93, 0xcc, 0x98, 0x99, 0x04, 0x56, 0xf1, 0xc6, 0x47, 0xd0, 0x97, 0xf2, 0x52, + 0xf0, 0x05, 0xa4, 0xf8, 0x20, 0x92, 0x99, 0x24, 0xee, 0xb2, 0xd1, 0xee, 0x55, 0xc8, 0x9c, 0xdf, + 0x7c, 0xdf, 0x99, 0x03, 0x87, 0x84, 0x5c, 0x09, 0x0d, 0xaa, 0x90, 0x46, 0x42, 0x15, 0x42, 0xc6, + 0x73, 0x9e, 0x60, 0x01, 0x5b, 0x1e, 0xbd, 0x2f, 0x55, 0xf3, 0x79, 0xd3, 0x9c, 0x32, 0x1b, 0xa3, + 0xe3, 0xf6, 0xd7, 0x55, 0x59, 0x15, 0xfa, 0xf7, 0xcf, 0x53, 0x14, 0x3f, 0x4d, 0x25, 0x8f, 0xdb, + 0xaf, 0xbb, 0xe7, 0xdf, 0x4d, 0xa4, 0x4c, 0x52, 0x04, 0xae, 0x04, 0xf0, 0x3c, 0x97, 0x86, 0x1b, + 0x21, 0x73, 0xed, 0xaa, 0x8f, 0xbe, 0x0e, 0xc8, 0x60, 0x61, 0x81, 0x54, 0x92, 0xe1, 0x33, 0x34, + 0x1b, 0x8c, 0x8c, 0x2c, 0xe8, 0x8c, 0xb5, 0x94, 0x2a, 0x64, 0x2e, 0xc0, 0xba, 0x2a, 0x5b, 0xe3, + 0x87, 0x12, 0xb5, 0xf1, 0xfb, 0x52, 0x4f, 0x64, 0xa6, 0x0a, 0xd4, 0x1a, 0x63, 0xe6, 0xd2, 0xc1, + 0xad, 0x2f, 0x3f, 0x7f, 0x7f, 0xbb, 0x72, 0x9d, 0x8e, 0xa0, 0xb2, 0x07, 0xf0, 0xa9, 0x2c, 0x45, + 0xfc, 0x99, 0x6e, 0xc9, 0xf0, 0xb9, 0x8c, 0x5c, 0x3b, 0xbd, 0xc2, 0xae, 0xda, 0x09, 0x27, 0x67, + 0x53, 0xcb, 0xfc, 0xad, 0x64, 0xcb, 0x95, 0x0e, 0xee, 0x58, 0xc1, 0x0d, 0x3a, 0x86, 0xb4, 0xbd, + 0xd1, 0x3a, 0x5e, 0x92, 0x83, 0x35, 0x26, 0x42, 0x1b, 0xec, 0x7f, 0xd3, 0x85, 0x6e, 0xfd, 0xf1, + 0xd9, 0xd4, 0xd3, 0x4c, 0x99, 0xd3, 0x60, 0x62, 0xf9, 0xa3, 0x60, 0x08, 0x45, 0xc3, 0x3a, 0xf6, + 0xe6, 0x14, 0xc9, 0x61, 0x8b, 0x3e, 0x29, 0x53, 0x23, 0xe8, 0x83, 0xcb, 0xf0, 0x75, 0x9f, 0xc0, + 0xb7, 0x82, 0x49, 0x70, 0xd4, 0x09, 0x20, 0xab, 0x91, 0xb5, 0x66, 0x43, 0x06, 0x6b, 0xcc, 0x64, + 0x85, 0xf4, 0x5e, 0x0f, 0xdf, 0x95, 0xba, 0xf9, 0xf4, 0xb0, 0x9b, 0xe9, 0xcf, 0x47, 0x10, 0x63, + 0x8a, 0x06, 0xff, 0x4e, 0xff, 0x9a, 0xbb, 0xec, 0x9a, 0x7f, 0xb8, 0x17, 0x6e, 0x73, 0x7d, 0x86, + 0xdb, 0xd6, 0x40, 0x83, 0xc3, 0xd6, 0xd0, 0xf5, 0xfe, 0xa2, 0x76, 0xb8, 0x07, 0x2d, 0x57, 0xba, + 0xd7, 0xb1, 0x5c, 0xb1, 0x36, 0xf2, 0xbf, 0x57, 0x8c, 0xac, 0xe3, 0x20, 0xb8, 0x0a, 0x42, 0xd5, + 0xe4, 0xd7, 0x64, 0xe8, 0xba, 0xab, 0xb9, 0xb3, 0x7f, 0x71, 0xf7, 0xcd, 0xe6, 0xa6, 0xa5, 0x1e, + 0x05, 0x04, 0x84, 0x6a, 0x9a, 0x3f, 0xf6, 0xe6, 0x8b, 0x8f, 0xdf, 0x77, 0x53, 0xef, 0xc7, 0x6e, + 0xea, 0xfd, 0xda, 0x4d, 0x3d, 0x32, 0x93, 0x45, 0xc2, 0xaa, 0x98, 0x73, 0xcd, 0x2a, 0x9e, 0xc6, + 0x8c, 0x2b, 0x51, 0x13, 0xce, 0xaf, 0xe3, 0x62, 0xbc, 0xe1, 0x69, 0xec, 0xdc, 0x27, 0xae, 0xb2, + 0xf2, 0x5e, 0x85, 0x89, 0x30, 0xef, 0xca, 0x2d, 0x8b, 0x64, 0x06, 0x96, 0x00, 0x35, 0x01, 0xec, + 0xda, 0x26, 0x85, 0x8a, 0x2e, 0xee, 0xfe, 0x76, 0x60, 0xf7, 0xf2, 0xf1, 0x9f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x5c, 0xcd, 0x50, 0x7e, 0x22, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -89,10 +90,10 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type BackupClient interface { - GetVector(ctx context.Context, in *payload.Backup_GetVector_Request, opts ...grpc.CallOption) (*payload.Backup_Compressed_MetaVector, error) + GetVector(ctx context.Context, in *payload.Backup_GetVector_Request, opts ...grpc.CallOption) (*payload.Backup_Compressed_Vector, error) Locations(ctx context.Context, in *payload.Backup_Locations_Request, opts ...grpc.CallOption) (*payload.Info_IPs, error) - Register(ctx context.Context, in *payload.Backup_Compressed_MetaVector, opts ...grpc.CallOption) (*payload.Empty, error) - RegisterMulti(ctx context.Context, in *payload.Backup_Compressed_MetaVectors, opts ...grpc.CallOption) (*payload.Empty, error) + Register(ctx context.Context, in *payload.Backup_Compressed_Vector, opts ...grpc.CallOption) (*payload.Empty, error) + RegisterMulti(ctx context.Context, in *payload.Backup_Compressed_Vectors, opts ...grpc.CallOption) (*payload.Empty, error) Remove(ctx context.Context, in *payload.Backup_Remove_Request, opts ...grpc.CallOption) (*payload.Empty, error) RemoveMulti(ctx context.Context, in *payload.Backup_Remove_RequestMulti, opts ...grpc.CallOption) (*payload.Empty, error) RegisterIPs(ctx context.Context, in *payload.Backup_IP_Register_Request, opts ...grpc.CallOption) (*payload.Empty, error) @@ -107,9 +108,9 @@ func NewBackupClient(cc *grpc.ClientConn) BackupClient { return &backupClient{cc} } -func (c *backupClient) GetVector(ctx context.Context, in *payload.Backup_GetVector_Request, opts ...grpc.CallOption) (*payload.Backup_Compressed_MetaVector, error) { - out := new(payload.Backup_Compressed_MetaVector) - err := c.cc.Invoke(ctx, "/backup_manager.Backup/GetVector", in, out, opts...) +func (c *backupClient) GetVector(ctx context.Context, in *payload.Backup_GetVector_Request, opts ...grpc.CallOption) (*payload.Backup_Compressed_Vector, error) { + out := new(payload.Backup_Compressed_Vector) + err := c.cc.Invoke(ctx, "/manager.backup.v1.Backup/GetVector", in, out, opts...) if err != nil { return nil, err } @@ -118,25 +119,25 @@ func (c *backupClient) GetVector(ctx context.Context, in *payload.Backup_GetVect func (c *backupClient) Locations(ctx context.Context, in *payload.Backup_Locations_Request, opts ...grpc.CallOption) (*payload.Info_IPs, error) { out := new(payload.Info_IPs) - err := c.cc.Invoke(ctx, "/backup_manager.Backup/Locations", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.backup.v1.Backup/Locations", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *backupClient) Register(ctx context.Context, in *payload.Backup_Compressed_MetaVector, opts ...grpc.CallOption) (*payload.Empty, error) { +func (c *backupClient) Register(ctx context.Context, in *payload.Backup_Compressed_Vector, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/backup_manager.Backup/Register", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.backup.v1.Backup/Register", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *backupClient) RegisterMulti(ctx context.Context, in *payload.Backup_Compressed_MetaVectors, opts ...grpc.CallOption) (*payload.Empty, error) { +func (c *backupClient) RegisterMulti(ctx context.Context, in *payload.Backup_Compressed_Vectors, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/backup_manager.Backup/RegisterMulti", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.backup.v1.Backup/RegisterMulti", in, out, opts...) if err != nil { return nil, err } @@ -145,7 +146,7 @@ func (c *backupClient) RegisterMulti(ctx context.Context, in *payload.Backup_Com func (c *backupClient) Remove(ctx context.Context, in *payload.Backup_Remove_Request, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/backup_manager.Backup/Remove", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.backup.v1.Backup/Remove", in, out, opts...) if err != nil { return nil, err } @@ -154,7 +155,7 @@ func (c *backupClient) Remove(ctx context.Context, in *payload.Backup_Remove_Req func (c *backupClient) RemoveMulti(ctx context.Context, in *payload.Backup_Remove_RequestMulti, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/backup_manager.Backup/RemoveMulti", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.backup.v1.Backup/RemoveMulti", in, out, opts...) if err != nil { return nil, err } @@ -163,7 +164,7 @@ func (c *backupClient) RemoveMulti(ctx context.Context, in *payload.Backup_Remov func (c *backupClient) RegisterIPs(ctx context.Context, in *payload.Backup_IP_Register_Request, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/backup_manager.Backup/RegisterIPs", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.backup.v1.Backup/RegisterIPs", in, out, opts...) if err != nil { return nil, err } @@ -172,7 +173,7 @@ func (c *backupClient) RegisterIPs(ctx context.Context, in *payload.Backup_IP_Re func (c *backupClient) RemoveIPs(ctx context.Context, in *payload.Backup_IP_Remove_Request, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/backup_manager.Backup/RemoveIPs", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.backup.v1.Backup/RemoveIPs", in, out, opts...) if err != nil { return nil, err } @@ -181,10 +182,10 @@ func (c *backupClient) RemoveIPs(ctx context.Context, in *payload.Backup_IP_Remo // BackupServer is the server API for Backup service. type BackupServer interface { - GetVector(context.Context, *payload.Backup_GetVector_Request) (*payload.Backup_Compressed_MetaVector, error) + GetVector(context.Context, *payload.Backup_GetVector_Request) (*payload.Backup_Compressed_Vector, error) Locations(context.Context, *payload.Backup_Locations_Request) (*payload.Info_IPs, error) - Register(context.Context, *payload.Backup_Compressed_MetaVector) (*payload.Empty, error) - RegisterMulti(context.Context, *payload.Backup_Compressed_MetaVectors) (*payload.Empty, error) + Register(context.Context, *payload.Backup_Compressed_Vector) (*payload.Empty, error) + RegisterMulti(context.Context, *payload.Backup_Compressed_Vectors) (*payload.Empty, error) Remove(context.Context, *payload.Backup_Remove_Request) (*payload.Empty, error) RemoveMulti(context.Context, *payload.Backup_Remove_RequestMulti) (*payload.Empty, error) RegisterIPs(context.Context, *payload.Backup_IP_Register_Request) (*payload.Empty, error) @@ -195,16 +196,16 @@ type BackupServer interface { type UnimplementedBackupServer struct { } -func (*UnimplementedBackupServer) GetVector(ctx context.Context, req *payload.Backup_GetVector_Request) (*payload.Backup_Compressed_MetaVector, error) { +func (*UnimplementedBackupServer) GetVector(ctx context.Context, req *payload.Backup_GetVector_Request) (*payload.Backup_Compressed_Vector, error) { return nil, status.Errorf(codes.Unimplemented, "method GetVector not implemented") } func (*UnimplementedBackupServer) Locations(ctx context.Context, req *payload.Backup_Locations_Request) (*payload.Info_IPs, error) { return nil, status.Errorf(codes.Unimplemented, "method Locations not implemented") } -func (*UnimplementedBackupServer) Register(ctx context.Context, req *payload.Backup_Compressed_MetaVector) (*payload.Empty, error) { +func (*UnimplementedBackupServer) Register(ctx context.Context, req *payload.Backup_Compressed_Vector) (*payload.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Register not implemented") } -func (*UnimplementedBackupServer) RegisterMulti(ctx context.Context, req *payload.Backup_Compressed_MetaVectors) (*payload.Empty, error) { +func (*UnimplementedBackupServer) RegisterMulti(ctx context.Context, req *payload.Backup_Compressed_Vectors) (*payload.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method RegisterMulti not implemented") } func (*UnimplementedBackupServer) Remove(ctx context.Context, req *payload.Backup_Remove_Request) (*payload.Empty, error) { @@ -234,7 +235,7 @@ func _Backup_GetVector_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/backup_manager.Backup/GetVector", + FullMethod: "/manager.backup.v1.Backup/GetVector", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackupServer).GetVector(ctx, req.(*payload.Backup_GetVector_Request)) @@ -252,7 +253,7 @@ func _Backup_Locations_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/backup_manager.Backup/Locations", + FullMethod: "/manager.backup.v1.Backup/Locations", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackupServer).Locations(ctx, req.(*payload.Backup_Locations_Request)) @@ -261,7 +262,7 @@ func _Backup_Locations_Handler(srv interface{}, ctx context.Context, dec func(in } func _Backup_Register_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Backup_Compressed_MetaVector) + in := new(payload.Backup_Compressed_Vector) if err := dec(in); err != nil { return nil, err } @@ -270,16 +271,16 @@ func _Backup_Register_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/backup_manager.Backup/Register", + FullMethod: "/manager.backup.v1.Backup/Register", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServer).Register(ctx, req.(*payload.Backup_Compressed_MetaVector)) + return srv.(BackupServer).Register(ctx, req.(*payload.Backup_Compressed_Vector)) } return interceptor(ctx, in, info, handler) } func _Backup_RegisterMulti_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Backup_Compressed_MetaVectors) + in := new(payload.Backup_Compressed_Vectors) if err := dec(in); err != nil { return nil, err } @@ -288,10 +289,10 @@ func _Backup_RegisterMulti_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/backup_manager.Backup/RegisterMulti", + FullMethod: "/manager.backup.v1.Backup/RegisterMulti", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServer).RegisterMulti(ctx, req.(*payload.Backup_Compressed_MetaVectors)) + return srv.(BackupServer).RegisterMulti(ctx, req.(*payload.Backup_Compressed_Vectors)) } return interceptor(ctx, in, info, handler) } @@ -306,7 +307,7 @@ func _Backup_Remove_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/backup_manager.Backup/Remove", + FullMethod: "/manager.backup.v1.Backup/Remove", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackupServer).Remove(ctx, req.(*payload.Backup_Remove_Request)) @@ -324,7 +325,7 @@ func _Backup_RemoveMulti_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/backup_manager.Backup/RemoveMulti", + FullMethod: "/manager.backup.v1.Backup/RemoveMulti", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackupServer).RemoveMulti(ctx, req.(*payload.Backup_Remove_RequestMulti)) @@ -342,7 +343,7 @@ func _Backup_RegisterIPs_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/backup_manager.Backup/RegisterIPs", + FullMethod: "/manager.backup.v1.Backup/RegisterIPs", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackupServer).RegisterIPs(ctx, req.(*payload.Backup_IP_Register_Request)) @@ -360,7 +361,7 @@ func _Backup_RemoveIPs_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/backup_manager.Backup/RemoveIPs", + FullMethod: "/manager.backup.v1.Backup/RemoveIPs", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackupServer).RemoveIPs(ctx, req.(*payload.Backup_IP_Remove_Request)) @@ -369,7 +370,7 @@ func _Backup_RemoveIPs_Handler(srv interface{}, ctx context.Context, dec func(in } var _Backup_serviceDesc = grpc.ServiceDesc{ - ServiceName: "backup_manager.Backup", + ServiceName: "manager.backup.v1.Backup", HandlerType: (*BackupServer)(nil), Methods: []grpc.MethodDesc{ { @@ -406,5 +407,5 @@ var _Backup_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "backup/backup_manager.proto", + Metadata: "apis/proto/v1/manager/backup/backup_manager.proto", } diff --git a/apis/grpc/manager/compressor/compressor.pb.go b/apis/grpc/v1/manager/compressor/compressor.pb.go similarity index 69% rename from apis/grpc/manager/compressor/compressor.pb.go rename to apis/grpc/v1/manager/compressor/compressor.pb.go index dba61a4f56..51f9375af1 100644 --- a/apis/grpc/manager/compressor/compressor.pb.go +++ b/apis/grpc/v1/manager/compressor/compressor.pb.go @@ -21,9 +21,8 @@ import ( fmt "fmt" math "math" - _ "github.com/danielvladco/go-proto-gql/pb" proto "github.com/gogo/protobuf/proto" - payload "github.com/vdaas/vald/apis/grpc/payload" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -41,40 +40,42 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func init() { proto.RegisterFile("compressor/compressor.proto", fileDescriptor_17b3bee0bd31629e) } - -var fileDescriptor_17b3bee0bd31629e = []byte{ - // 470 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x41, 0x6b, 0xd4, 0x40, - 0x14, 0xc7, 0x49, 0xd5, 0xa5, 0x3b, 0xba, 0x69, 0x3b, 0x8a, 0xd8, 0xb4, 0xa4, 0x6c, 0xbc, 0xed, - 0x61, 0x1e, 0xd4, 0x5b, 0x8f, 0x2b, 0x22, 0x0b, 0x16, 0xc2, 0x22, 0x0b, 0x4a, 0x0f, 0xce, 0x26, - 0x63, 0x0c, 0x26, 0x99, 0xe9, 0xcc, 0x64, 0xa1, 0x88, 0x17, 0xbf, 0x82, 0x5f, 0xa4, 0x1f, 0xc3, - 0xa3, 0xe0, 0x17, 0x58, 0x16, 0x6f, 0x7e, 0x09, 0x99, 0xcc, 0x66, 0x56, 0x36, 0x76, 0x6f, 0xc9, - 0xfb, 0xff, 0xdf, 0xef, 0xfd, 0xe7, 0xc1, 0x43, 0x27, 0x09, 0x2f, 0x85, 0x64, 0x4a, 0x71, 0x09, - 0x9b, 0x4f, 0x22, 0x24, 0xd7, 0x1c, 0xa3, 0x4d, 0x25, 0x18, 0x08, 0x7a, 0x53, 0x70, 0x9a, 0x5a, - 0x29, 0x38, 0xcd, 0x38, 0xcf, 0x0a, 0x06, 0x54, 0xe4, 0x40, 0xab, 0x8a, 0x6b, 0xaa, 0x73, 0x5e, - 0xa9, 0xb5, 0xfa, 0x48, 0xcc, 0x21, 0xbb, 0x2e, 0xec, 0xdf, 0xf9, 0x9f, 0x07, 0xa8, 0x37, 0xa6, - 0xc9, 0xe7, 0x5a, 0xe0, 0x39, 0xea, 0xbf, 0x66, 0x7a, 0xc6, 0x12, 0xcd, 0x25, 0x1e, 0x92, 0x96, - 0x69, 0x55, 0xe2, 0x24, 0x32, 0x65, 0xd7, 0x35, 0x53, 0x3a, 0x08, 0xb6, 0x2d, 0x97, 0x4c, 0x53, - 0xeb, 0x89, 0x9e, 0x7e, 0xfb, 0xf5, 0xfb, 0xfb, 0xde, 0x21, 0xf6, 0x61, 0xd1, 0x14, 0xe0, 0x4b, - 0x5d, 0xe7, 0xe9, 0x57, 0x7c, 0x85, 0xfa, 0x6f, 0x78, 0x62, 0xf3, 0x74, 0x67, 0x38, 0xc9, 0xcd, - 0x38, 0x72, 0x96, 0x49, 0xf5, 0x91, 0x93, 0x49, 0xac, 0xa2, 0xe3, 0x06, 0xfd, 0x18, 0x1f, 0x41, - 0xd1, 0xda, 0x5b, 0x7a, 0x8c, 0xf6, 0xa7, 0x2c, 0xcb, 0x95, 0x66, 0x12, 0xef, 0x48, 0x17, 0xf8, - 0x4e, 0x7b, 0x55, 0x0a, 0x7d, 0x13, 0x3d, 0xbb, 0x5d, 0x9e, 0x79, 0x0d, 0xd6, 0x8f, 0xfa, 0x20, - 0xd7, 0x88, 0x0b, 0x6f, 0x84, 0xaf, 0xd0, 0xa0, 0x25, 0x5e, 0xd6, 0x85, 0xce, 0xf1, 0xc9, 0xdd, - 0x58, 0xd5, 0xe1, 0x86, 0x8e, 0xfb, 0x24, 0x3a, 0x70, 0x5c, 0x28, 0x0d, 0xc9, 0xd0, 0xdf, 0xa2, - 0xde, 0x94, 0x95, 0x7c, 0xc1, 0x70, 0xb8, 0x8d, 0xb5, 0x75, 0xb7, 0x87, 0x6d, 0x72, 0xe0, 0xc8, - 0x87, 0x23, 0x1f, 0x52, 0x56, 0x30, 0xcd, 0xda, 0x2d, 0x7c, 0x40, 0x0f, 0x6d, 0xb7, 0x4d, 0xfc, - 0x7c, 0x37, 0xba, 0x31, 0x75, 0xf8, 0xa7, 0x8e, 0x8f, 0xa3, 0x41, 0xcb, 0x77, 0xb9, 0x67, 0x66, - 0x82, 0x7d, 0xcc, 0x24, 0x56, 0xdd, 0x09, 0x93, 0x98, 0xb4, 0xfa, 0x9d, 0x2f, 0xc0, 0x6e, 0xc2, - 0x7e, 0x74, 0x0f, 0x72, 0x61, 0xb8, 0xef, 0x50, 0xdf, 0x86, 0x33, 0xd4, 0xe1, 0x7f, 0xa9, 0x3b, - 0xb7, 0x72, 0xec, 0x98, 0x07, 0x11, 0x82, 0x5c, 0xac, 0x83, 0x5f, 0x78, 0xa3, 0xe0, 0xfe, 0xed, - 0xf2, 0x6c, 0x6f, 0x5c, 0xff, 0x58, 0x85, 0xde, 0xcf, 0x55, 0xe8, 0x2d, 0x57, 0xa1, 0x87, 0x86, - 0x5c, 0x66, 0x64, 0x91, 0x52, 0xaa, 0xc8, 0x82, 0x16, 0x29, 0x29, 0x69, 0x45, 0x33, 0x26, 0xc9, - 0xe6, 0xb2, 0xc6, 0xfe, 0x8c, 0x16, 0xe9, 0x4b, 0xf7, 0x1f, 0x7b, 0xef, 0xcf, 0xb3, 0x5c, 0x7f, - 0xaa, 0xe7, 0xc6, 0x04, 0x4d, 0x2f, 0x98, 0x5e, 0x73, 0x6b, 0x0a, 0x32, 0x29, 0x12, 0x58, 0x53, - 0xfe, 0xb9, 0xd8, 0x79, 0xaf, 0xb9, 0xb5, 0x17, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x1f, - 0x2f, 0xa3, 0xd1, 0x03, 0x00, 0x00, +func init() { + proto.RegisterFile("apis/proto/v1/manager/compressor/compressor.proto", fileDescriptor_65a3baf9652f4ae9) +} + +var fileDescriptor_65a3baf9652f4ae9 = []byte{ + // 466 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xdf, 0x8a, 0xd3, 0x40, + 0x18, 0xc5, 0x89, 0x62, 0xd9, 0x8e, 0x6c, 0xd6, 0x1d, 0xbb, 0xe2, 0x06, 0x29, 0x18, 0x17, 0x91, + 0x5e, 0xcc, 0x47, 0x15, 0x6f, 0xf6, 0xb2, 0x22, 0x52, 0x50, 0x29, 0xbd, 0x28, 0xb2, 0x5e, 0x4d, + 0x93, 0x31, 0x0e, 0x26, 0x99, 0x31, 0x33, 0x09, 0x2c, 0x22, 0x82, 0xaf, 0xe0, 0x4b, 0x79, 0x29, + 0xf8, 0x02, 0x52, 0x7c, 0x01, 0xdf, 0x40, 0x66, 0xa6, 0x89, 0x11, 0xa7, 0xf6, 0x2a, 0x7f, 0xce, + 0x99, 0xdf, 0x39, 0xf3, 0xc1, 0x87, 0xa6, 0x54, 0x72, 0x05, 0xb2, 0x12, 0x5a, 0x40, 0x33, 0x85, + 0x82, 0x96, 0x34, 0x63, 0x15, 0x24, 0xa2, 0x90, 0x15, 0x53, 0x4a, 0xf4, 0x5f, 0x89, 0xb5, 0xe1, + 0x93, 0xad, 0x89, 0xf4, 0x94, 0x66, 0x1a, 0xdd, 0xfb, 0x9b, 0x24, 0xe9, 0x65, 0x2e, 0x68, 0xda, + 0x3e, 0xdd, 0xd9, 0xe8, 0x4e, 0x26, 0x44, 0x96, 0x33, 0xa0, 0x92, 0x03, 0x2d, 0x4b, 0xa1, 0xa9, + 0xe6, 0xa2, 0x54, 0x4e, 0x7d, 0xf8, 0xeb, 0x1a, 0x1a, 0xcc, 0x68, 0xf2, 0xae, 0x96, 0x38, 0x45, + 0xc3, 0x67, 0x4c, 0xaf, 0x58, 0xa2, 0x45, 0x85, 0xcf, 0x48, 0x4b, 0x69, 0xa6, 0xc4, 0x19, 0x48, + 0xa7, 0x92, 0x25, 0x7b, 0x5f, 0x33, 0xa5, 0xa3, 0x53, 0x8f, 0xcb, 0x59, 0xe2, 0x5b, 0x9f, 0xbf, + 0xff, 0xfc, 0x72, 0xe5, 0x06, 0x0e, 0xa1, 0xb1, 0x3f, 0xe0, 0x43, 0x5d, 0xf3, 0xf4, 0x23, 0x5e, + 0xa3, 0xe1, 0x73, 0x91, 0xb8, 0x0e, 0xde, 0x94, 0x4e, 0xed, 0x52, 0x46, 0x7d, 0xd7, 0xbc, 0x7c, + 0x23, 0xc8, 0x7c, 0xa1, 0xe2, 0x53, 0x1b, 0x70, 0x13, 0x1f, 0x43, 0xde, 0x9e, 0x68, 0x33, 0x5e, + 0xa2, 0x83, 0x25, 0xcb, 0xb8, 0xd2, 0xac, 0xc2, 0xbb, 0x2b, 0x46, 0xc7, 0x7d, 0xe9, 0x69, 0x21, + 0xf5, 0x65, 0x3c, 0xb2, 0xd0, 0x30, 0x1e, 0x42, 0xb5, 0x05, 0x9c, 0x07, 0x13, 0x7c, 0x81, 0x0e, + 0x5b, 0xde, 0x8b, 0x3a, 0xd7, 0x1c, 0x47, 0x3b, 0xa1, 0xca, 0x47, 0x8d, 0x2c, 0x75, 0x14, 0x1f, + 0x75, 0x54, 0x28, 0x0c, 0xc7, 0xb0, 0x57, 0x68, 0xb0, 0x64, 0x85, 0x68, 0x18, 0xbe, 0xeb, 0x81, + 0x3a, 0xa9, 0x9b, 0x84, 0x87, 0xbd, 0x9d, 0xf3, 0x24, 0x84, 0x94, 0xe5, 0x4c, 0xb3, 0x3f, 0x73, + 0xbe, 0xee, 0x0e, 0xbb, 0xc6, 0xf7, 0xf7, 0xc2, 0xad, 0xcf, 0x97, 0x70, 0xdb, 0x26, 0xe0, 0xf8, + 0xb0, 0x4d, 0xe8, 0xba, 0xbf, 0x32, 0x19, 0xee, 0x42, 0xf3, 0x85, 0xf2, 0x66, 0xcc, 0x17, 0xa4, + 0xb5, 0xfc, 0xef, 0x16, 0xa1, 0xcd, 0x38, 0x88, 0xaf, 0x02, 0x97, 0x86, 0xfc, 0x1a, 0x0d, 0x5d, + 0x3b, 0xc3, 0x3d, 0xdb, 0xc5, 0xdd, 0x37, 0x9b, 0x13, 0x4b, 0x3d, 0x8a, 0x11, 0x70, 0xb9, 0x2d, + 0x7f, 0x1e, 0x4c, 0x66, 0x9f, 0xbe, 0x6e, 0xc6, 0xc1, 0xb7, 0xcd, 0x38, 0xf8, 0xb1, 0x19, 0x07, + 0xe8, 0x81, 0xa8, 0x32, 0xd2, 0xa4, 0x94, 0x2a, 0xd2, 0xd0, 0x3c, 0x25, 0x54, 0x72, 0x43, 0xf8, + 0x77, 0xe3, 0x66, 0xe1, 0x8a, 0xe6, 0xe9, 0x93, 0xee, 0x7b, 0x11, 0x5c, 0x3c, 0xce, 0xb8, 0x7e, + 0x5b, 0xaf, 0x8d, 0x09, 0x2c, 0x02, 0x0c, 0x02, 0xec, 0x5a, 0x66, 0x95, 0x4c, 0xfc, 0xfb, 0xbd, + 0x1e, 0xd8, 0xdd, 0x7b, 0xf4, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x65, 0xd7, 0x29, 0x74, 0x0a, 0x04, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -89,10 +90,10 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type BackupClient interface { - GetVector(ctx context.Context, in *payload.Backup_GetVector_Request, opts ...grpc.CallOption) (*payload.Backup_MetaVector, error) + GetVector(ctx context.Context, in *payload.Backup_GetVector_Request, opts ...grpc.CallOption) (*payload.Backup_Vector, error) Locations(ctx context.Context, in *payload.Backup_Locations_Request, opts ...grpc.CallOption) (*payload.Info_IPs, error) - Register(ctx context.Context, in *payload.Backup_MetaVector, opts ...grpc.CallOption) (*payload.Empty, error) - RegisterMulti(ctx context.Context, in *payload.Backup_MetaVectors, opts ...grpc.CallOption) (*payload.Empty, error) + Register(ctx context.Context, in *payload.Backup_Vector, opts ...grpc.CallOption) (*payload.Empty, error) + RegisterMulti(ctx context.Context, in *payload.Backup_Vectors, opts ...grpc.CallOption) (*payload.Empty, error) Remove(ctx context.Context, in *payload.Backup_Remove_Request, opts ...grpc.CallOption) (*payload.Empty, error) RemoveMulti(ctx context.Context, in *payload.Backup_Remove_RequestMulti, opts ...grpc.CallOption) (*payload.Empty, error) RegisterIPs(ctx context.Context, in *payload.Backup_IP_Register_Request, opts ...grpc.CallOption) (*payload.Empty, error) @@ -107,9 +108,9 @@ func NewBackupClient(cc *grpc.ClientConn) BackupClient { return &backupClient{cc} } -func (c *backupClient) GetVector(ctx context.Context, in *payload.Backup_GetVector_Request, opts ...grpc.CallOption) (*payload.Backup_MetaVector, error) { - out := new(payload.Backup_MetaVector) - err := c.cc.Invoke(ctx, "/compressor.Backup/GetVector", in, out, opts...) +func (c *backupClient) GetVector(ctx context.Context, in *payload.Backup_GetVector_Request, opts ...grpc.CallOption) (*payload.Backup_Vector, error) { + out := new(payload.Backup_Vector) + err := c.cc.Invoke(ctx, "/manager.compressor.v1.Backup/GetVector", in, out, opts...) if err != nil { return nil, err } @@ -118,25 +119,25 @@ func (c *backupClient) GetVector(ctx context.Context, in *payload.Backup_GetVect func (c *backupClient) Locations(ctx context.Context, in *payload.Backup_Locations_Request, opts ...grpc.CallOption) (*payload.Info_IPs, error) { out := new(payload.Info_IPs) - err := c.cc.Invoke(ctx, "/compressor.Backup/Locations", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.compressor.v1.Backup/Locations", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *backupClient) Register(ctx context.Context, in *payload.Backup_MetaVector, opts ...grpc.CallOption) (*payload.Empty, error) { +func (c *backupClient) Register(ctx context.Context, in *payload.Backup_Vector, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/compressor.Backup/Register", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.compressor.v1.Backup/Register", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *backupClient) RegisterMulti(ctx context.Context, in *payload.Backup_MetaVectors, opts ...grpc.CallOption) (*payload.Empty, error) { +func (c *backupClient) RegisterMulti(ctx context.Context, in *payload.Backup_Vectors, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/compressor.Backup/RegisterMulti", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.compressor.v1.Backup/RegisterMulti", in, out, opts...) if err != nil { return nil, err } @@ -145,7 +146,7 @@ func (c *backupClient) RegisterMulti(ctx context.Context, in *payload.Backup_Met func (c *backupClient) Remove(ctx context.Context, in *payload.Backup_Remove_Request, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/compressor.Backup/Remove", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.compressor.v1.Backup/Remove", in, out, opts...) if err != nil { return nil, err } @@ -154,7 +155,7 @@ func (c *backupClient) Remove(ctx context.Context, in *payload.Backup_Remove_Req func (c *backupClient) RemoveMulti(ctx context.Context, in *payload.Backup_Remove_RequestMulti, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/compressor.Backup/RemoveMulti", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.compressor.v1.Backup/RemoveMulti", in, out, opts...) if err != nil { return nil, err } @@ -163,7 +164,7 @@ func (c *backupClient) RemoveMulti(ctx context.Context, in *payload.Backup_Remov func (c *backupClient) RegisterIPs(ctx context.Context, in *payload.Backup_IP_Register_Request, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/compressor.Backup/RegisterIPs", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.compressor.v1.Backup/RegisterIPs", in, out, opts...) if err != nil { return nil, err } @@ -172,7 +173,7 @@ func (c *backupClient) RegisterIPs(ctx context.Context, in *payload.Backup_IP_Re func (c *backupClient) RemoveIPs(ctx context.Context, in *payload.Backup_IP_Remove_Request, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/compressor.Backup/RemoveIPs", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.compressor.v1.Backup/RemoveIPs", in, out, opts...) if err != nil { return nil, err } @@ -181,10 +182,10 @@ func (c *backupClient) RemoveIPs(ctx context.Context, in *payload.Backup_IP_Remo // BackupServer is the server API for Backup service. type BackupServer interface { - GetVector(context.Context, *payload.Backup_GetVector_Request) (*payload.Backup_MetaVector, error) + GetVector(context.Context, *payload.Backup_GetVector_Request) (*payload.Backup_Vector, error) Locations(context.Context, *payload.Backup_Locations_Request) (*payload.Info_IPs, error) - Register(context.Context, *payload.Backup_MetaVector) (*payload.Empty, error) - RegisterMulti(context.Context, *payload.Backup_MetaVectors) (*payload.Empty, error) + Register(context.Context, *payload.Backup_Vector) (*payload.Empty, error) + RegisterMulti(context.Context, *payload.Backup_Vectors) (*payload.Empty, error) Remove(context.Context, *payload.Backup_Remove_Request) (*payload.Empty, error) RemoveMulti(context.Context, *payload.Backup_Remove_RequestMulti) (*payload.Empty, error) RegisterIPs(context.Context, *payload.Backup_IP_Register_Request) (*payload.Empty, error) @@ -195,16 +196,16 @@ type BackupServer interface { type UnimplementedBackupServer struct { } -func (*UnimplementedBackupServer) GetVector(ctx context.Context, req *payload.Backup_GetVector_Request) (*payload.Backup_MetaVector, error) { +func (*UnimplementedBackupServer) GetVector(ctx context.Context, req *payload.Backup_GetVector_Request) (*payload.Backup_Vector, error) { return nil, status.Errorf(codes.Unimplemented, "method GetVector not implemented") } func (*UnimplementedBackupServer) Locations(ctx context.Context, req *payload.Backup_Locations_Request) (*payload.Info_IPs, error) { return nil, status.Errorf(codes.Unimplemented, "method Locations not implemented") } -func (*UnimplementedBackupServer) Register(ctx context.Context, req *payload.Backup_MetaVector) (*payload.Empty, error) { +func (*UnimplementedBackupServer) Register(ctx context.Context, req *payload.Backup_Vector) (*payload.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Register not implemented") } -func (*UnimplementedBackupServer) RegisterMulti(ctx context.Context, req *payload.Backup_MetaVectors) (*payload.Empty, error) { +func (*UnimplementedBackupServer) RegisterMulti(ctx context.Context, req *payload.Backup_Vectors) (*payload.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method RegisterMulti not implemented") } func (*UnimplementedBackupServer) Remove(ctx context.Context, req *payload.Backup_Remove_Request) (*payload.Empty, error) { @@ -234,7 +235,7 @@ func _Backup_GetVector_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/compressor.Backup/GetVector", + FullMethod: "/manager.compressor.v1.Backup/GetVector", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackupServer).GetVector(ctx, req.(*payload.Backup_GetVector_Request)) @@ -252,7 +253,7 @@ func _Backup_Locations_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/compressor.Backup/Locations", + FullMethod: "/manager.compressor.v1.Backup/Locations", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackupServer).Locations(ctx, req.(*payload.Backup_Locations_Request)) @@ -261,7 +262,7 @@ func _Backup_Locations_Handler(srv interface{}, ctx context.Context, dec func(in } func _Backup_Register_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Backup_MetaVector) + in := new(payload.Backup_Vector) if err := dec(in); err != nil { return nil, err } @@ -270,16 +271,16 @@ func _Backup_Register_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/compressor.Backup/Register", + FullMethod: "/manager.compressor.v1.Backup/Register", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServer).Register(ctx, req.(*payload.Backup_MetaVector)) + return srv.(BackupServer).Register(ctx, req.(*payload.Backup_Vector)) } return interceptor(ctx, in, info, handler) } func _Backup_RegisterMulti_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(payload.Backup_MetaVectors) + in := new(payload.Backup_Vectors) if err := dec(in); err != nil { return nil, err } @@ -288,10 +289,10 @@ func _Backup_RegisterMulti_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/compressor.Backup/RegisterMulti", + FullMethod: "/manager.compressor.v1.Backup/RegisterMulti", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServer).RegisterMulti(ctx, req.(*payload.Backup_MetaVectors)) + return srv.(BackupServer).RegisterMulti(ctx, req.(*payload.Backup_Vectors)) } return interceptor(ctx, in, info, handler) } @@ -306,7 +307,7 @@ func _Backup_Remove_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/compressor.Backup/Remove", + FullMethod: "/manager.compressor.v1.Backup/Remove", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackupServer).Remove(ctx, req.(*payload.Backup_Remove_Request)) @@ -324,7 +325,7 @@ func _Backup_RemoveMulti_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/compressor.Backup/RemoveMulti", + FullMethod: "/manager.compressor.v1.Backup/RemoveMulti", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackupServer).RemoveMulti(ctx, req.(*payload.Backup_Remove_RequestMulti)) @@ -342,7 +343,7 @@ func _Backup_RegisterIPs_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/compressor.Backup/RegisterIPs", + FullMethod: "/manager.compressor.v1.Backup/RegisterIPs", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackupServer).RegisterIPs(ctx, req.(*payload.Backup_IP_Register_Request)) @@ -360,7 +361,7 @@ func _Backup_RemoveIPs_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/compressor.Backup/RemoveIPs", + FullMethod: "/manager.compressor.v1.Backup/RemoveIPs", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackupServer).RemoveIPs(ctx, req.(*payload.Backup_IP_Remove_Request)) @@ -369,7 +370,7 @@ func _Backup_RemoveIPs_Handler(srv interface{}, ctx context.Context, dec func(in } var _Backup_serviceDesc = grpc.ServiceDesc{ - ServiceName: "compressor.Backup", + ServiceName: "manager.compressor.v1.Backup", HandlerType: (*BackupServer)(nil), Methods: []grpc.MethodDesc{ { @@ -406,5 +407,5 @@ var _Backup_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "compressor/compressor.proto", + Metadata: "apis/proto/v1/manager/compressor/compressor.proto", } diff --git a/apis/grpc/manager/index/index_manager.pb.go b/apis/grpc/v1/manager/index/index_manager.pb.go similarity index 66% rename from apis/grpc/manager/index/index_manager.pb.go rename to apis/grpc/v1/manager/index/index_manager.pb.go index 5c7cc063eb..267123b492 100644 --- a/apis/grpc/manager/index/index_manager.pb.go +++ b/apis/grpc/v1/manager/index/index_manager.pb.go @@ -21,9 +21,8 @@ import ( fmt "fmt" math "math" - _ "github.com/danielvladco/go-proto-gql/pb" proto "github.com/gogo/protobuf/proto" - payload "github.com/vdaas/vald/apis/grpc/payload" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -41,26 +40,28 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func init() { proto.RegisterFile("index/index_manager.proto", fileDescriptor_11357116787cb271) } - -var fileDescriptor_11357116787cb271 = []byte{ - // 246 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x4b, 0x49, - 0xad, 0xd0, 0x07, 0x93, 0xf1, 0xb9, 0x89, 0x79, 0x89, 0xe9, 0xa9, 0x45, 0x7a, 0x05, 0x45, 0xf9, - 0x25, 0xf9, 0x42, 0xbc, 0x28, 0x82, 0x52, 0xbc, 0x05, 0x89, 0x95, 0x39, 0xf9, 0x89, 0x29, 0x10, - 0x59, 0x29, 0x99, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xfd, 0xc4, 0x82, 0x4c, 0xfd, 0xc4, 0xbc, - 0xbc, 0xfc, 0x92, 0xc4, 0x92, 0xcc, 0xfc, 0xbc, 0x62, 0xa8, 0x2c, 0x4f, 0x41, 0x92, 0x7e, 0x7a, - 0x61, 0x0e, 0x84, 0x67, 0x14, 0xc5, 0xc5, 0xea, 0x09, 0x32, 0x4b, 0xc8, 0x9b, 0x8b, 0x13, 0xcc, - 0xf0, 0xcc, 0x4b, 0xcb, 0x17, 0xe2, 0xd3, 0x83, 0x99, 0xe8, 0x9a, 0x5b, 0x50, 0x52, 0x29, 0x25, - 0x09, 0xe7, 0x83, 0xa4, 0xf5, 0xc0, 0x0a, 0xf5, 0x9c, 0xf3, 0x4b, 0xf3, 0x4a, 0x94, 0x84, 0x9b, - 0x2e, 0x3f, 0x99, 0xcc, 0xc4, 0x2b, 0xc4, 0xad, 0x0f, 0x73, 0x6e, 0x5a, 0xbe, 0x14, 0xcb, 0x86, - 0x07, 0xf2, 0x4c, 0x4e, 0xb9, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, - 0x1c, 0x23, 0x97, 0x4c, 0x7e, 0x51, 0xba, 0x5e, 0x59, 0x4a, 0x62, 0x62, 0xb1, 0x5e, 0x59, 0x62, - 0x4e, 0x8a, 0x1e, 0xcc, 0x47, 0x60, 0x6d, 0x4e, 0x02, 0x61, 0x89, 0x39, 0x29, 0x60, 0x73, 0x7d, - 0x21, 0xe2, 0x01, 0x8c, 0x51, 0xba, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, - 0xfa, 0x60, 0x8d, 0xfa, 0x20, 0x8d, 0x20, 0x6f, 0x15, 0xeb, 0xa7, 0x17, 0x15, 0x24, 0xeb, 0x43, - 0x8d, 0x80, 0xd8, 0x9c, 0xc4, 0x06, 0xf6, 0x91, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x98, 0x1d, - 0x6e, 0xfd, 0x38, 0x01, 0x00, 0x00, +func init() { + proto.RegisterFile("apis/proto/v1/manager/index/index_manager.proto", fileDescriptor_0152ec67984b188e) +} + +var fileDescriptor_0152ec67984b188e = []byte{ + // 252 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0x2c, 0xc8, 0x2c, + 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x33, 0xd4, 0xcf, 0x4d, 0xcc, 0x4b, 0x4c, 0x4f, + 0x2d, 0xd2, 0xcf, 0xcc, 0x4b, 0x49, 0xad, 0x80, 0x90, 0xf1, 0x50, 0x31, 0x3d, 0xb0, 0x22, 0x21, + 0x01, 0x18, 0x17, 0x2c, 0xa9, 0x57, 0x66, 0x28, 0xa5, 0x8c, 0x6a, 0x44, 0x41, 0x62, 0x65, 0x4e, + 0x7e, 0x62, 0x0a, 0x8c, 0x86, 0x68, 0x93, 0x92, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x05, 0x59, + 0xa7, 0x9f, 0x98, 0x97, 0x97, 0x5f, 0x92, 0x58, 0x92, 0x99, 0x9f, 0x57, 0x0c, 0x91, 0x35, 0x8a, + 0xe2, 0x62, 0xf5, 0x04, 0x19, 0x27, 0x14, 0xc8, 0xc5, 0x09, 0x66, 0x78, 0xe6, 0xa5, 0xe5, 0x0b, + 0x09, 0xea, 0xc1, 0xcc, 0x28, 0x33, 0xd4, 0x73, 0xcd, 0x2d, 0x28, 0xa9, 0x94, 0x92, 0x41, 0x16, + 0x02, 0x29, 0xd2, 0x03, 0x2b, 0xd7, 0x73, 0xce, 0x2f, 0xcd, 0x2b, 0x51, 0x12, 0x6e, 0xba, 0xfc, + 0x64, 0x32, 0x13, 0xaf, 0x10, 0x37, 0xdc, 0xfd, 0x69, 0xf9, 0x4e, 0xe5, 0x27, 0x1e, 0xc9, 0x31, + 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x23, 0x97, 0x72, 0x7e, 0x51, 0xba, 0x5e, 0x59, + 0x4a, 0x62, 0x62, 0xb1, 0x5e, 0x59, 0x62, 0x4e, 0x8a, 0x5e, 0x62, 0x41, 0x26, 0xc8, 0x28, 0x14, + 0x4f, 0x39, 0x09, 0x84, 0x25, 0xe6, 0xa4, 0x80, 0x0d, 0xf6, 0x85, 0x88, 0x07, 0x30, 0x46, 0x19, + 0xa4, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x83, 0xf5, 0xeb, 0x83, 0xf4, + 0x43, 0x82, 0x2f, 0xbd, 0xa8, 0x20, 0x19, 0x23, 0xf4, 0x92, 0xd8, 0xc0, 0x7e, 0x33, 0x06, 0x04, + 0x00, 0x00, 0xff, 0xff, 0x35, 0xf2, 0xc1, 0x66, 0x63, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -88,7 +89,7 @@ func NewIndexClient(cc *grpc.ClientConn) IndexClient { func (c *indexClient) IndexInfo(ctx context.Context, in *payload.Empty, opts ...grpc.CallOption) (*payload.Info_Index_Count, error) { out := new(payload.Info_Index_Count) - err := c.cc.Invoke(ctx, "/index_manager.Index/IndexInfo", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.index.v1.Index/IndexInfo", in, out, opts...) if err != nil { return nil, err } @@ -122,7 +123,7 @@ func _Index_IndexInfo_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index_manager.Index/IndexInfo", + FullMethod: "/manager.index.v1.Index/IndexInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(IndexServer).IndexInfo(ctx, req.(*payload.Empty)) @@ -131,7 +132,7 @@ func _Index_IndexInfo_Handler(srv interface{}, ctx context.Context, dec func(int } var _Index_serviceDesc = grpc.ServiceDesc{ - ServiceName: "index_manager.Index", + ServiceName: "manager.index.v1.Index", HandlerType: (*IndexServer)(nil), Methods: []grpc.MethodDesc{ { @@ -140,5 +141,5 @@ var _Index_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "index/index_manager.proto", + Metadata: "apis/proto/v1/manager/index/index_manager.proto", } diff --git a/apis/grpc/manager/replication/agent/replication_manager.pb.go b/apis/grpc/v1/manager/replication/agent/replication_manager.pb.go similarity index 70% rename from apis/grpc/manager/replication/agent/replication_manager.pb.go rename to apis/grpc/v1/manager/replication/agent/replication_manager.pb.go index af4356a222..80b5e0ef7a 100644 --- a/apis/grpc/manager/replication/agent/replication_manager.pb.go +++ b/apis/grpc/v1/manager/replication/agent/replication_manager.pb.go @@ -21,9 +21,8 @@ import ( fmt "fmt" math "math" - _ "github.com/danielvladco/go-proto-gql/pb" proto "github.com/gogo/protobuf/proto" - payload "github.com/vdaas/vald/apis/grpc/payload" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -42,32 +41,32 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { - proto.RegisterFile("replication/agent/replication_manager.proto", fileDescriptor_2c09480608bbf428) -} - -var fileDescriptor_2c09480608bbf428 = []byte{ - // 329 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0xd1, 0x4f, 0x4a, 0xfb, 0x40, - 0x14, 0x07, 0x70, 0x52, 0x7e, 0xfc, 0xa4, 0xe3, 0x9f, 0xc5, 0x28, 0x55, 0xd3, 0x9a, 0x42, 0x56, - 0x82, 0x30, 0x03, 0xba, 0x72, 0x69, 0xc1, 0x85, 0x0b, 0x41, 0xba, 0x28, 0xa8, 0x0b, 0x79, 0x49, - 0xa6, 0x63, 0x60, 0x3a, 0x6f, 0x9c, 0x8c, 0x85, 0x6e, 0x3d, 0x81, 0xe0, 0x11, 0xbc, 0x80, 0xc7, - 0x70, 0x29, 0x78, 0x81, 0x52, 0x3c, 0x88, 0x74, 0x9a, 0x6a, 0x68, 0xeb, 0x32, 0xf9, 0x7e, 0xf3, - 0x79, 0x8f, 0x3c, 0x72, 0x64, 0x85, 0x51, 0x79, 0x0a, 0x2e, 0x47, 0xcd, 0x41, 0x0a, 0xed, 0x78, - 0xe5, 0xcd, 0xdd, 0x00, 0x34, 0x48, 0x61, 0x99, 0xb1, 0xe8, 0x90, 0x6e, 0xaf, 0x88, 0xc2, 0x4d, - 0x03, 0x23, 0x85, 0x90, 0xcd, 0x3a, 0x61, 0x4b, 0x22, 0x4a, 0x25, 0x38, 0x98, 0x9c, 0x83, 0xd6, - 0xe8, 0x7c, 0xbb, 0x28, 0xd3, 0x0d, 0x93, 0x70, 0xf9, 0xa0, 0x66, 0x4f, 0xc7, 0xaf, 0x35, 0xb2, - 0xde, 0xfd, 0x25, 0x69, 0x8f, 0xac, 0x75, 0x45, 0x8a, 0x43, 0x61, 0xe9, 0x01, 0x9b, 0xb3, 0x95, - 0x02, 0x2b, 0xd3, 0x51, 0xb8, 0xf5, 0x13, 0x9f, 0x0f, 0x8c, 0x1b, 0xc5, 0xad, 0xa7, 0xcf, 0xaf, - 0x97, 0x5a, 0x23, 0xde, 0xa9, 0x6e, 0xcf, 0x6d, 0x89, 0xdd, 0x92, 0x7a, 0x57, 0x24, 0xa0, 0x40, - 0xa7, 0x82, 0x46, 0x7f, 0xc8, 0x65, 0xbe, 0x44, 0x47, 0x9e, 0xde, 0x8b, 0x1b, 0x0b, 0xf4, 0xdc, - 0xbb, 0x26, 0xf5, 0xb3, 0xe9, 0x7f, 0xbb, 0xd0, 0x7d, 0xa4, 0x0b, 0x1f, 0x87, 0xcd, 0x95, 0xc3, - 0x7c, 0xbf, 0x88, 0xdb, 0x5e, 0xde, 0xa7, 0xbb, 0x7c, 0xf9, 0x08, 0xb9, 0xee, 0x63, 0xf8, 0xef, - 0x6d, 0xdc, 0xae, 0x75, 0x9e, 0x83, 0xf7, 0x49, 0x14, 0x7c, 0x4c, 0xa2, 0x60, 0x3c, 0x89, 0x02, - 0x72, 0x88, 0x56, 0xb2, 0x61, 0x06, 0x50, 0xb0, 0x21, 0xa8, 0x8c, 0xcd, 0x4f, 0x54, 0x31, 0x98, - 0x37, 0x3a, 0xcd, 0x1e, 0xa8, 0xac, 0x32, 0xf7, 0x72, 0xd6, 0xf4, 0xd3, 0xaf, 0x82, 0x9b, 0x53, - 0x99, 0xbb, 0xfb, 0xc7, 0x84, 0xa5, 0x38, 0xe0, 0xde, 0xe3, 0x53, 0x6f, 0x7a, 0xb8, 0x82, 0x4b, - 0x6b, 0x52, 0x5e, 0xca, 0xcb, 0xdb, 0x25, 0xff, 0xfd, 0xfd, 0x4e, 0xbe, 0x03, 0x00, 0x00, 0xff, - 0xff, 0x8c, 0x5e, 0xa1, 0xb7, 0x3e, 0x02, 0x00, 0x00, + proto.RegisterFile("apis/proto/v1/manager/replication/agent/replication_manager.proto", fileDescriptor_e8f74170057978aa) +} + +var fileDescriptor_e8f74170057978aa = []byte{ + // 330 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x4a, 0xfb, 0x30, + 0x1c, 0xc7, 0xe9, 0x0e, 0xff, 0x3f, 0xab, 0x27, 0x83, 0x4c, 0xad, 0xa3, 0x6a, 0xbd, 0x27, 0x4c, + 0xaf, 0x5e, 0x36, 0xf0, 0xe0, 0x41, 0x90, 0x1d, 0x3c, 0x88, 0x30, 0x7e, 0x6b, 0xb3, 0x18, 0xc8, + 0xf2, 0x0b, 0x69, 0x0c, 0xec, 0xea, 0x2b, 0x78, 0xf3, 0x25, 0x7c, 0x0d, 0x8f, 0x82, 0x2f, 0x20, + 0xc3, 0x07, 0x91, 0x65, 0x2d, 0x76, 0xba, 0x79, 0x2a, 0x6d, 0xbf, 0xdf, 0xcf, 0x87, 0xf6, 0x9b, + 0xb8, 0x0f, 0x46, 0x96, 0xcc, 0x58, 0x74, 0xc8, 0x7c, 0x8f, 0x4d, 0x41, 0x83, 0xe0, 0x96, 0x59, + 0x6e, 0x94, 0xcc, 0xc1, 0x49, 0xd4, 0x0c, 0x04, 0xd7, 0xae, 0xf9, 0x64, 0x54, 0xa5, 0x68, 0xa8, + 0x91, 0x6e, 0x7d, 0xdb, 0x88, 0xd0, 0x50, 0xa2, 0xbe, 0x97, 0x9c, 0xac, 0x0a, 0x0c, 0xcc, 0x14, + 0x42, 0x51, 0x5f, 0x97, 0x88, 0xa4, 0x2b, 0x10, 0x85, 0xe2, 0x0c, 0x8c, 0x64, 0xa0, 0x35, 0xba, + 0x00, 0x29, 0x97, 0x6f, 0x4f, 0x5f, 0x5a, 0xf1, 0xd6, 0xf0, 0x9b, 0x4d, 0xee, 0xe2, 0xff, 0x43, + 0x9e, 0xa3, 0xe7, 0x96, 0x1c, 0xd1, 0x1a, 0xe4, 0x7b, 0xb4, 0x91, 0xa1, 0x55, 0x60, 0x96, 0x6c, + 0x37, 0x13, 0x17, 0x53, 0xe3, 0x66, 0x59, 0xf7, 0xf1, 0xfd, 0xf3, 0xa9, 0xd5, 0xc9, 0x76, 0x56, + 0x3e, 0xd3, 0x56, 0x48, 0x88, 0xdb, 0x43, 0x3e, 0x06, 0x05, 0x3a, 0xe7, 0xe4, 0x78, 0x33, 0xbf, + 0x8a, 0xac, 0x13, 0xa4, 0x41, 0xb0, 0x97, 0x75, 0x7e, 0x08, 0x6a, 0xea, 0x28, 0x6e, 0xf7, 0x17, + 0xff, 0xe7, 0x52, 0x4f, 0x90, 0xfc, 0xee, 0x27, 0xe9, 0x26, 0x6b, 0x68, 0x95, 0xd9, 0x61, 0xe0, + 0xef, 0x93, 0xdd, 0x35, 0x3b, 0x49, 0x3d, 0xc1, 0xc1, 0x73, 0xf4, 0x3a, 0x4f, 0xa3, 0xb7, 0x79, + 0x1a, 0x7d, 0xcc, 0xd3, 0x28, 0x66, 0x68, 0x05, 0xf5, 0x05, 0x40, 0x49, 0x3d, 0xa8, 0x82, 0x82, + 0x91, 0x0b, 0xf2, 0xc6, 0xdd, 0x06, 0x07, 0x37, 0xa0, 0x8a, 0x86, 0xf8, 0x6a, 0x99, 0x0c, 0xfa, + 0xeb, 0xe8, 0xf6, 0x5c, 0x48, 0x77, 0xff, 0x30, 0xa6, 0x39, 0x4e, 0x59, 0xc0, 0xb2, 0x05, 0x96, + 0x85, 0xa5, 0x85, 0x35, 0xf9, 0x9f, 0x27, 0x69, 0xfc, 0x2f, 0xac, 0x7a, 0xf6, 0x15, 0x00, 0x00, + 0xff, 0xff, 0x3d, 0x03, 0xdc, 0x90, 0x7b, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -97,7 +96,7 @@ func NewReplicationClient(cc *grpc.ClientConn) ReplicationClient { func (c *replicationClient) Recover(ctx context.Context, in *payload.Replication_Recovery, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/replication_manager.Replication/Recover", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.replication.agent.v1.Replication/Recover", in, out, opts...) if err != nil { return nil, err } @@ -106,7 +105,7 @@ func (c *replicationClient) Recover(ctx context.Context, in *payload.Replication func (c *replicationClient) Rebalance(ctx context.Context, in *payload.Replication_Rebalance, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/replication_manager.Replication/Rebalance", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.replication.agent.v1.Replication/Rebalance", in, out, opts...) if err != nil { return nil, err } @@ -115,7 +114,7 @@ func (c *replicationClient) Rebalance(ctx context.Context, in *payload.Replicati func (c *replicationClient) AgentInfo(ctx context.Context, in *payload.Empty, opts ...grpc.CallOption) (*payload.Replication_Agents, error) { out := new(payload.Replication_Agents) - err := c.cc.Invoke(ctx, "/replication_manager.Replication/AgentInfo", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.replication.agent.v1.Replication/AgentInfo", in, out, opts...) if err != nil { return nil, err } @@ -157,7 +156,7 @@ func _Replication_Recover_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/replication_manager.Replication/Recover", + FullMethod: "/manager.replication.agent.v1.Replication/Recover", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ReplicationServer).Recover(ctx, req.(*payload.Replication_Recovery)) @@ -175,7 +174,7 @@ func _Replication_Rebalance_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/replication_manager.Replication/Rebalance", + FullMethod: "/manager.replication.agent.v1.Replication/Rebalance", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ReplicationServer).Rebalance(ctx, req.(*payload.Replication_Rebalance)) @@ -193,7 +192,7 @@ func _Replication_AgentInfo_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/replication_manager.Replication/AgentInfo", + FullMethod: "/manager.replication.agent.v1.Replication/AgentInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ReplicationServer).AgentInfo(ctx, req.(*payload.Empty)) @@ -202,7 +201,7 @@ func _Replication_AgentInfo_Handler(srv interface{}, ctx context.Context, dec fu } var _Replication_serviceDesc = grpc.ServiceDesc{ - ServiceName: "replication_manager.Replication", + ServiceName: "manager.replication.agent.v1.Replication", HandlerType: (*ReplicationServer)(nil), Methods: []grpc.MethodDesc{ { @@ -219,5 +218,5 @@ var _Replication_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "replication/agent/replication_manager.proto", + Metadata: "apis/proto/v1/manager/replication/agent/replication_manager.proto", } diff --git a/apis/grpc/manager/replication/controller/replication_manager.pb.go b/apis/grpc/v1/manager/replication/controller/replication_manager.pb.go similarity index 65% rename from apis/grpc/manager/replication/controller/replication_manager.pb.go rename to apis/grpc/v1/manager/replication/controller/replication_manager.pb.go index d984cef219..67a1747a37 100644 --- a/apis/grpc/manager/replication/controller/replication_manager.pb.go +++ b/apis/grpc/v1/manager/replication/controller/replication_manager.pb.go @@ -21,9 +21,8 @@ import ( fmt "fmt" math "math" - _ "github.com/danielvladco/go-proto-gql/pb" proto "github.com/gogo/protobuf/proto" - payload "github.com/vdaas/vald/apis/grpc/payload" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -42,29 +41,29 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { - proto.RegisterFile("replication/controller/replication_manager.proto", fileDescriptor_ca7dddd9e8833d57) + proto.RegisterFile("apis/proto/v1/manager/replication/controller/replication_manager.proto", fileDescriptor_7996d9fdae0b086a) } -var fileDescriptor_ca7dddd9e8833d57 = []byte{ - // 274 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x28, 0x4a, 0x2d, 0xc8, - 0xc9, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, - 0x49, 0x2d, 0xd2, 0x47, 0x12, 0x8e, 0xcf, 0x4d, 0xcc, 0x4b, 0x4c, 0x4f, 0x2d, 0xd2, 0x2b, 0x28, - 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc6, 0x22, 0x25, 0xc5, 0x5b, 0x90, 0x58, 0x99, 0x93, 0x9f, 0x98, - 0x02, 0x51, 0x23, 0x25, 0x93, 0x9e, 0x9f, 0x9f, 0x9e, 0x93, 0xaa, 0x9f, 0x58, 0x90, 0xa9, 0x9f, - 0x98, 0x97, 0x97, 0x5f, 0x02, 0x56, 0x5d, 0x0c, 0x95, 0xe5, 0x29, 0x48, 0xd2, 0x4f, 0x2f, 0xcc, - 0x81, 0xf0, 0x8c, 0x2a, 0xb8, 0x44, 0x83, 0x10, 0x26, 0x3a, 0xc3, 0x9d, 0x20, 0x14, 0xc9, 0xc5, - 0x8f, 0x24, 0xe1, 0x99, 0x97, 0x96, 0x2f, 0xc4, 0xa7, 0x07, 0xb3, 0xc7, 0x35, 0xb7, 0xa0, 0xa4, - 0x52, 0x4a, 0x1a, 0xce, 0x47, 0x52, 0xa9, 0xe7, 0x98, 0x9e, 0x9a, 0x57, 0x52, 0xac, 0x24, 0xd9, - 0x74, 0xf9, 0xc9, 0x64, 0x26, 0x61, 0x21, 0x41, 0x64, 0xcf, 0xe8, 0x67, 0xe6, 0xa5, 0xe5, 0x4b, - 0xb1, 0x6c, 0x78, 0x20, 0xcf, 0xe4, 0x34, 0x9f, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, - 0x18, 0x1f, 0x3c, 0x92, 0x63, 0xe4, 0xd2, 0xcd, 0x2f, 0x4a, 0xd7, 0x2b, 0x4b, 0x49, 0x4c, 0x2c, - 0xd6, 0x2b, 0x4b, 0xcc, 0x49, 0xd1, 0x83, 0x79, 0x1b, 0x49, 0xb7, 0x1e, 0x22, 0x84, 0x9c, 0x14, - 0xc2, 0x12, 0x73, 0x52, 0x90, 0xac, 0xf5, 0x85, 0x28, 0x47, 0x78, 0x20, 0x80, 0x31, 0xca, 0x2e, - 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0x1f, 0x6c, 0xb2, 0x3e, 0xc8, 0x64, - 0x50, 0xb0, 0x14, 0xeb, 0xa7, 0x17, 0x15, 0x24, 0xeb, 0x43, 0xed, 0xd0, 0xc7, 0x1e, 0x0b, 0x49, - 0x6c, 0xe0, 0x20, 0x32, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xc5, 0xa0, 0x9e, 0x80, 0xa6, 0x01, - 0x00, 0x00, +var fileDescriptor_7996d9fdae0b086a = []byte{ + // 279 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0x2c, 0xc8, 0x2c, + 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x33, 0xd4, 0xcf, 0x4d, 0xcc, 0x4b, 0x4c, 0x4f, + 0x2d, 0xd2, 0x2f, 0x4a, 0x2d, 0xc8, 0xc9, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x4f, 0xce, + 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x41, 0x15, 0x8e, 0x87, 0x2a, 0xd5, 0x03, 0xeb, 0x15, 0x52, + 0x84, 0x71, 0x91, 0x94, 0xe8, 0x21, 0x74, 0xea, 0x95, 0x19, 0x4a, 0x29, 0xa3, 0x5a, 0x55, 0x90, + 0x58, 0x99, 0x93, 0x9f, 0x98, 0x02, 0xa3, 0x21, 0xe6, 0x48, 0xc9, 0xa4, 0xe7, 0xe7, 0xa7, 0xe7, + 0xa4, 0xea, 0x27, 0x16, 0x64, 0xea, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x80, 0x4d, 0x2a, 0x86, 0xc8, + 0x1a, 0x55, 0x70, 0x89, 0x06, 0x21, 0xcc, 0x77, 0x86, 0x1b, 0x2f, 0x14, 0xcf, 0xc5, 0x8f, 0x24, + 0xe1, 0x99, 0x97, 0x96, 0x2f, 0x24, 0xa8, 0x07, 0x33, 0xb9, 0xcc, 0x50, 0xcf, 0x35, 0xb7, 0xa0, + 0xa4, 0x52, 0x4a, 0x0e, 0x59, 0x08, 0x49, 0xbd, 0x9e, 0x63, 0x7a, 0x6a, 0x5e, 0x49, 0xb1, 0x92, + 0x64, 0xd3, 0xe5, 0x27, 0x93, 0x99, 0x84, 0x85, 0x04, 0x51, 0xfc, 0x9f, 0x99, 0x97, 0x96, 0xef, + 0xb4, 0x92, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0xe4, + 0x32, 0xc9, 0x2f, 0x4a, 0xd7, 0x2b, 0x4b, 0x49, 0x4c, 0x2c, 0xd6, 0x2b, 0x4b, 0xcc, 0x49, 0xd1, + 0x4b, 0x2c, 0xc8, 0x04, 0x99, 0x89, 0x3f, 0x10, 0x9c, 0x14, 0xc2, 0x12, 0x73, 0x52, 0x90, 0xec, + 0xf5, 0x85, 0x28, 0x47, 0xf8, 0x23, 0x80, 0x31, 0xca, 0x31, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, + 0x2f, 0x39, 0x3f, 0x57, 0x1f, 0x6c, 0x81, 0x3e, 0xc8, 0x02, 0x7d, 0x70, 0xd8, 0xa5, 0x17, 0x15, + 0x24, 0x13, 0x8e, 0xa5, 0x24, 0x36, 0x70, 0x60, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd3, + 0x28, 0xa3, 0x9f, 0xdc, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -92,7 +91,7 @@ func NewReplicationControllerClient(cc *grpc.ClientConn) ReplicationControllerCl func (c *replicationControllerClient) ReplicationInfo(ctx context.Context, in *payload.Empty, opts ...grpc.CallOption) (*payload.Replication_Agents, error) { out := new(payload.Replication_Agents) - err := c.cc.Invoke(ctx, "/replication_manager.ReplicationController/ReplicationInfo", in, out, opts...) + err := c.cc.Invoke(ctx, "/manager.replication.controller.v1.ReplicationController/ReplicationInfo", in, out, opts...) if err != nil { return nil, err } @@ -126,7 +125,7 @@ func _ReplicationController_ReplicationInfo_Handler(srv interface{}, ctx context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/replication_manager.ReplicationController/ReplicationInfo", + FullMethod: "/manager.replication.controller.v1.ReplicationController/ReplicationInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ReplicationControllerServer).ReplicationInfo(ctx, req.(*payload.Empty)) @@ -135,7 +134,7 @@ func _ReplicationController_ReplicationInfo_Handler(srv interface{}, ctx context } var _ReplicationController_serviceDesc = grpc.ServiceDesc{ - ServiceName: "replication_manager.ReplicationController", + ServiceName: "manager.replication.controller.v1.ReplicationController", HandlerType: (*ReplicationControllerServer)(nil), Methods: []grpc.MethodDesc{ { @@ -144,5 +143,5 @@ var _ReplicationController_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "replication/controller/replication_manager.proto", + Metadata: "apis/proto/v1/manager/replication/controller/replication_manager.proto", } diff --git a/apis/grpc/meta/meta.pb.go b/apis/grpc/v1/meta/meta.pb.go similarity index 79% rename from apis/grpc/meta/meta.pb.go rename to apis/grpc/v1/meta/meta.pb.go index 2737f22a60..b9e6990c1c 100644 --- a/apis/grpc/meta/meta.pb.go +++ b/apis/grpc/v1/meta/meta.pb.go @@ -21,9 +21,8 @@ import ( fmt "fmt" math "math" - _ "github.com/danielvladco/go-proto-gql/pb" proto "github.com/gogo/protobuf/proto" - payload "github.com/vdaas/vald/apis/grpc/payload" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -41,35 +40,35 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func init() { proto.RegisterFile("meta.proto", fileDescriptor_3b5ea8fe65782bcc) } - -var fileDescriptor_3b5ea8fe65782bcc = []byte{ - // 385 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x4a, 0xeb, 0x40, - 0x14, 0xc7, 0x49, 0xe9, 0x17, 0xe7, 0xb6, 0xe9, 0xed, 0xdc, 0x5b, 0x17, 0x41, 0x2a, 0x0c, 0xae, - 0xb2, 0xc8, 0x80, 0xee, 0x85, 0x16, 0x8b, 0x88, 0x14, 0x2b, 0x15, 0x17, 0x6e, 0x64, 0xda, 0x0e, - 0x31, 0x90, 0x64, 0x62, 0x66, 0x2c, 0x74, 0xeb, 0x2b, 0xf8, 0x22, 0x3e, 0x86, 0x4b, 0xc1, 0x17, - 0x28, 0x45, 0xf0, 0x35, 0x64, 0x26, 0x89, 0xd5, 0xa4, 0x9b, 0x76, 0x13, 0xe6, 0xe3, 0x9c, 0x5f, - 0x7e, 0x9c, 0x3f, 0x03, 0x10, 0x30, 0x49, 0x9d, 0x28, 0xe6, 0x92, 0xa3, 0x86, 0x5a, 0xdf, 0x05, - 0x34, 0xa4, 0x2e, 0x8b, 0xad, 0x66, 0x44, 0x17, 0x3e, 0xa7, 0xb3, 0xe4, 0xd2, 0xda, 0x77, 0x39, - 0x77, 0x7d, 0x46, 0x68, 0xe4, 0x11, 0x1a, 0x86, 0x5c, 0x52, 0xe9, 0xf1, 0x50, 0xa4, 0xb7, 0x8d, - 0x68, 0x42, 0xdc, 0x07, 0x3f, 0xd9, 0x1d, 0x7d, 0x56, 0xa0, 0x3c, 0x64, 0x92, 0xa2, 0x13, 0xa8, - 0x9d, 0x31, 0xa9, 0x97, 0x6d, 0x27, 0xe3, 0xa9, 0xad, 0x73, 0xc1, 0x16, 0x56, 0xee, 0xe8, 0x86, - 0xfa, 0xb8, 0xf9, 0xf4, 0xfe, 0xf1, 0x5c, 0xaa, 0xe1, 0x0a, 0x51, 0x2e, 0xa8, 0x0f, 0xf5, 0xb4, - 0x5f, 0x20, 0x54, 0x00, 0x08, 0x0b, 0x15, 0x08, 0x02, 0x9b, 0x1a, 0x51, 0xc7, 0x55, 0x8d, 0x10, - 0x68, 0x08, 0x66, 0xca, 0x38, 0x0f, 0xe7, 0x2c, 0x16, 0x0c, 0x15, 0xff, 0x6b, 0x15, 0xed, 0x70, - 0x47, 0x73, 0x5a, 0xb8, 0x49, 0xbc, 0xa4, 0x2f, 0x51, 0xba, 0x82, 0x56, 0xa6, 0x94, 0xf1, 0x36, - 0x58, 0x58, 0x1b, 0x6c, 0xf1, 0x9e, 0x26, 0xfe, 0xc5, 0xe6, 0x2f, 0xa2, 0x50, 0x53, 0x1a, 0xa7, - 0x53, 0xfa, 0x5f, 0x68, 0x53, 0x76, 0xe6, 0xf7, 0xe9, 0x20, 0x88, 0xe4, 0x22, 0x3f, 0xa5, 0x1e, - 0xd4, 0xc7, 0xd9, 0x94, 0x3a, 0x9b, 0x00, 0xa2, 0x40, 0xc8, 0x0f, 0xa9, 0x07, 0x70, 0xca, 0x7c, - 0x26, 0xd9, 0xf6, 0x59, 0xd9, 0xa9, 0xc5, 0x00, 0xfe, 0xac, 0x11, 0x5b, 0xc7, 0x65, 0x67, 0x26, - 0x23, 0x68, 0xaf, 0x31, 0x3b, 0x25, 0x66, 0xe7, 0x12, 0xbb, 0x06, 0xf4, 0x43, 0x6c, 0xc7, 0xd0, - 0xec, 0x5c, 0x68, 0x56, 0xf9, 0x65, 0x79, 0x50, 0xea, 0x5f, 0xbe, 0xae, 0xba, 0xc6, 0xdb, 0xaa, - 0x6b, 0x2c, 0x57, 0x5d, 0x03, 0xfe, 0xf1, 0xd8, 0x75, 0xe6, 0x33, 0x4a, 0x85, 0x33, 0xa7, 0xfe, - 0xcc, 0x51, 0x95, 0xfd, 0xb2, 0xfa, 0x8e, 0x8c, 0xdb, 0x43, 0xd7, 0x93, 0xf7, 0x8f, 0x13, 0x67, - 0xca, 0x03, 0xa2, 0x6b, 0x88, 0xaa, 0x51, 0xef, 0x49, 0x10, 0x37, 0x8e, 0xa6, 0x9a, 0x3b, 0xa9, - 0xea, 0x17, 0x74, 0xfc, 0x15, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x09, 0x3a, 0x7f, 0x98, 0x03, 0x00, - 0x00, +func init() { proto.RegisterFile("apis/proto/v1/meta/meta.proto", fileDescriptor_f506bb68c7e24dcc) } + +var fileDescriptor_f506bb68c7e24dcc = []byte{ + // 390 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xc1, 0x4a, 0xeb, 0x40, + 0x14, 0x86, 0xc9, 0xe5, 0xde, 0x36, 0xcc, 0xa5, 0xed, 0x6d, 0xb8, 0x55, 0x09, 0xda, 0xc5, 0xb8, + 0x10, 0xb2, 0x98, 0xa1, 0xfa, 0x06, 0xa5, 0x52, 0xa5, 0x08, 0x42, 0xa1, 0x42, 0xc1, 0xc5, 0x69, + 0x33, 0xc4, 0x40, 0x9a, 0x09, 0x99, 0x31, 0xd0, 0xad, 0xaf, 0xe0, 0xda, 0xf7, 0x71, 0x29, 0xf8, + 0x02, 0x52, 0x7c, 0x10, 0xc9, 0x24, 0x43, 0xdb, 0x38, 0x6e, 0xda, 0x4d, 0x32, 0x9c, 0x39, 0xf3, + 0xf1, 0x71, 0x7e, 0x0e, 0x3a, 0x81, 0x24, 0x14, 0x34, 0x49, 0xb9, 0xe4, 0x34, 0xeb, 0xd1, 0x05, + 0x93, 0xa0, 0x3e, 0x44, 0x95, 0x9c, 0xba, 0x3a, 0x67, 0x3d, 0xf7, 0x74, 0xbb, 0x2f, 0x81, 0x65, + 0xc4, 0xc1, 0xd7, 0xff, 0xa2, 0xdb, 0x3d, 0x0e, 0x38, 0x0f, 0x22, 0x46, 0x21, 0x09, 0x29, 0xc4, + 0x31, 0x97, 0x20, 0x43, 0x1e, 0x8b, 0xe2, 0xf6, 0xfc, 0xa5, 0x86, 0x7e, 0xdf, 0x30, 0x09, 0xce, + 0x00, 0xd5, 0x87, 0x4c, 0xaa, 0xe3, 0x7f, 0xa2, 0x09, 0x59, 0x8f, 0xe4, 0x15, 0x32, 0x62, 0x4b, + 0xf7, 0x7b, 0x75, 0x02, 0x11, 0x6e, 0x3c, 0xbd, 0x7f, 0x3e, 0xff, 0xaa, 0xe3, 0x3f, 0x4a, 0xd0, + 0xb9, 0x42, 0x76, 0x49, 0x11, 0x4e, 0xc7, 0x84, 0x11, 0x6e, 0xc7, 0xc4, 0x11, 0xb8, 0xa9, 0x40, + 0x36, 0xae, 0x29, 0x90, 0x70, 0xc6, 0xa8, 0x59, 0x92, 0xae, 0xe3, 0x8c, 0xa5, 0x82, 0x39, 0x46, + 0x01, 0xd7, 0x28, 0x8b, 0x3b, 0x8a, 0xd6, 0xc2, 0x0d, 0x1a, 0x16, 0xaf, 0x0b, 0xbd, 0x3b, 0xd4, + 0xd2, 0x7a, 0x9a, 0x6a, 0xd6, 0x71, 0xcd, 0xf2, 0xf8, 0x40, 0x71, 0xff, 0xe1, 0xe6, 0x16, 0x57, + 0xe4, 0xd3, 0x1b, 0x97, 0xd3, 0x3b, 0x34, 0xbd, 0xcc, 0x4d, 0xdb, 0x9b, 0x17, 0x97, 0x8b, 0x44, + 0x2e, 0xab, 0xd3, 0x1b, 0x22, 0x7b, 0xac, 0xa7, 0x77, 0xf4, 0x03, 0x46, 0x98, 0x38, 0xd5, 0xe1, + 0x0d, 0x11, 0x1a, 0xb0, 0x88, 0x49, 0xb6, 0x6b, 0x9e, 0x5e, 0x69, 0x34, 0x42, 0x7f, 0xd7, 0xa0, + 0x1d, 0x23, 0xf5, 0xb4, 0xd5, 0x04, 0xb5, 0xd7, 0xb0, 0x3d, 0x52, 0xf5, 0x2a, 0xa9, 0x4e, 0x91, + 0xb3, 0x21, 0xb9, 0x57, 0xb0, 0x5e, 0x25, 0xd8, 0xfe, 0xfd, 0xeb, 0xaa, 0x6b, 0xbd, 0xad, 0xba, + 0xd6, 0xc7, 0xaa, 0x6b, 0x21, 0x97, 0xa7, 0x01, 0xc9, 0x7c, 0x00, 0x41, 0x32, 0x88, 0x7c, 0x02, + 0x49, 0x98, 0x63, 0xf2, 0xd6, 0xbe, 0x3d, 0x81, 0xc8, 0xcf, 0x81, 0xb7, 0xd6, 0xf4, 0x2c, 0x08, + 0xe5, 0xc3, 0xe3, 0x8c, 0xcc, 0xf9, 0x82, 0xaa, 0x76, 0x9a, 0xb7, 0x53, 0xb5, 0xae, 0x41, 0x9a, + 0xcc, 0xf5, 0x56, 0xcf, 0x6a, 0x6a, 0x0b, 0x2f, 0xbe, 0x02, 0x00, 0x00, 0xff, 0xff, 0xee, 0x8c, + 0xc5, 0xc7, 0xf2, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -106,7 +105,7 @@ func NewMetaClient(cc *grpc.ClientConn) MetaClient { func (c *metaClient) GetMeta(ctx context.Context, in *payload.Meta_Key, opts ...grpc.CallOption) (*payload.Meta_Val, error) { out := new(payload.Meta_Val) - err := c.cc.Invoke(ctx, "/meta_manager.Meta/GetMeta", in, out, opts...) + err := c.cc.Invoke(ctx, "/meta.v1.Meta/GetMeta", in, out, opts...) if err != nil { return nil, err } @@ -115,7 +114,7 @@ func (c *metaClient) GetMeta(ctx context.Context, in *payload.Meta_Key, opts ... func (c *metaClient) GetMetas(ctx context.Context, in *payload.Meta_Keys, opts ...grpc.CallOption) (*payload.Meta_Vals, error) { out := new(payload.Meta_Vals) - err := c.cc.Invoke(ctx, "/meta_manager.Meta/GetMetas", in, out, opts...) + err := c.cc.Invoke(ctx, "/meta.v1.Meta/GetMetas", in, out, opts...) if err != nil { return nil, err } @@ -124,7 +123,7 @@ func (c *metaClient) GetMetas(ctx context.Context, in *payload.Meta_Keys, opts . func (c *metaClient) GetMetaInverse(ctx context.Context, in *payload.Meta_Val, opts ...grpc.CallOption) (*payload.Meta_Key, error) { out := new(payload.Meta_Key) - err := c.cc.Invoke(ctx, "/meta_manager.Meta/GetMetaInverse", in, out, opts...) + err := c.cc.Invoke(ctx, "/meta.v1.Meta/GetMetaInverse", in, out, opts...) if err != nil { return nil, err } @@ -133,7 +132,7 @@ func (c *metaClient) GetMetaInverse(ctx context.Context, in *payload.Meta_Val, o func (c *metaClient) GetMetasInverse(ctx context.Context, in *payload.Meta_Vals, opts ...grpc.CallOption) (*payload.Meta_Keys, error) { out := new(payload.Meta_Keys) - err := c.cc.Invoke(ctx, "/meta_manager.Meta/GetMetasInverse", in, out, opts...) + err := c.cc.Invoke(ctx, "/meta.v1.Meta/GetMetasInverse", in, out, opts...) if err != nil { return nil, err } @@ -142,7 +141,7 @@ func (c *metaClient) GetMetasInverse(ctx context.Context, in *payload.Meta_Vals, func (c *metaClient) SetMeta(ctx context.Context, in *payload.Meta_KeyVal, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/meta_manager.Meta/SetMeta", in, out, opts...) + err := c.cc.Invoke(ctx, "/meta.v1.Meta/SetMeta", in, out, opts...) if err != nil { return nil, err } @@ -151,7 +150,7 @@ func (c *metaClient) SetMeta(ctx context.Context, in *payload.Meta_KeyVal, opts func (c *metaClient) SetMetas(ctx context.Context, in *payload.Meta_KeyVals, opts ...grpc.CallOption) (*payload.Empty, error) { out := new(payload.Empty) - err := c.cc.Invoke(ctx, "/meta_manager.Meta/SetMetas", in, out, opts...) + err := c.cc.Invoke(ctx, "/meta.v1.Meta/SetMetas", in, out, opts...) if err != nil { return nil, err } @@ -160,7 +159,7 @@ func (c *metaClient) SetMetas(ctx context.Context, in *payload.Meta_KeyVals, opt func (c *metaClient) DeleteMeta(ctx context.Context, in *payload.Meta_Key, opts ...grpc.CallOption) (*payload.Meta_Val, error) { out := new(payload.Meta_Val) - err := c.cc.Invoke(ctx, "/meta_manager.Meta/DeleteMeta", in, out, opts...) + err := c.cc.Invoke(ctx, "/meta.v1.Meta/DeleteMeta", in, out, opts...) if err != nil { return nil, err } @@ -169,7 +168,7 @@ func (c *metaClient) DeleteMeta(ctx context.Context, in *payload.Meta_Key, opts func (c *metaClient) DeleteMetas(ctx context.Context, in *payload.Meta_Keys, opts ...grpc.CallOption) (*payload.Meta_Vals, error) { out := new(payload.Meta_Vals) - err := c.cc.Invoke(ctx, "/meta_manager.Meta/DeleteMetas", in, out, opts...) + err := c.cc.Invoke(ctx, "/meta.v1.Meta/DeleteMetas", in, out, opts...) if err != nil { return nil, err } @@ -178,7 +177,7 @@ func (c *metaClient) DeleteMetas(ctx context.Context, in *payload.Meta_Keys, opt func (c *metaClient) DeleteMetaInverse(ctx context.Context, in *payload.Meta_Val, opts ...grpc.CallOption) (*payload.Meta_Key, error) { out := new(payload.Meta_Key) - err := c.cc.Invoke(ctx, "/meta_manager.Meta/DeleteMetaInverse", in, out, opts...) + err := c.cc.Invoke(ctx, "/meta.v1.Meta/DeleteMetaInverse", in, out, opts...) if err != nil { return nil, err } @@ -187,7 +186,7 @@ func (c *metaClient) DeleteMetaInverse(ctx context.Context, in *payload.Meta_Val func (c *metaClient) DeleteMetasInverse(ctx context.Context, in *payload.Meta_Vals, opts ...grpc.CallOption) (*payload.Meta_Keys, error) { out := new(payload.Meta_Keys) - err := c.cc.Invoke(ctx, "/meta_manager.Meta/DeleteMetasInverse", in, out, opts...) + err := c.cc.Invoke(ctx, "/meta.v1.Meta/DeleteMetasInverse", in, out, opts...) if err != nil { return nil, err } @@ -257,7 +256,7 @@ func _Meta_GetMeta_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/meta_manager.Meta/GetMeta", + FullMethod: "/meta.v1.Meta/GetMeta", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MetaServer).GetMeta(ctx, req.(*payload.Meta_Key)) @@ -275,7 +274,7 @@ func _Meta_GetMetas_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/meta_manager.Meta/GetMetas", + FullMethod: "/meta.v1.Meta/GetMetas", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MetaServer).GetMetas(ctx, req.(*payload.Meta_Keys)) @@ -293,7 +292,7 @@ func _Meta_GetMetaInverse_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/meta_manager.Meta/GetMetaInverse", + FullMethod: "/meta.v1.Meta/GetMetaInverse", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MetaServer).GetMetaInverse(ctx, req.(*payload.Meta_Val)) @@ -311,7 +310,7 @@ func _Meta_GetMetasInverse_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/meta_manager.Meta/GetMetasInverse", + FullMethod: "/meta.v1.Meta/GetMetasInverse", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MetaServer).GetMetasInverse(ctx, req.(*payload.Meta_Vals)) @@ -329,7 +328,7 @@ func _Meta_SetMeta_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/meta_manager.Meta/SetMeta", + FullMethod: "/meta.v1.Meta/SetMeta", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MetaServer).SetMeta(ctx, req.(*payload.Meta_KeyVal)) @@ -347,7 +346,7 @@ func _Meta_SetMetas_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/meta_manager.Meta/SetMetas", + FullMethod: "/meta.v1.Meta/SetMetas", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MetaServer).SetMetas(ctx, req.(*payload.Meta_KeyVals)) @@ -365,7 +364,7 @@ func _Meta_DeleteMeta_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/meta_manager.Meta/DeleteMeta", + FullMethod: "/meta.v1.Meta/DeleteMeta", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MetaServer).DeleteMeta(ctx, req.(*payload.Meta_Key)) @@ -383,7 +382,7 @@ func _Meta_DeleteMetas_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/meta_manager.Meta/DeleteMetas", + FullMethod: "/meta.v1.Meta/DeleteMetas", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MetaServer).DeleteMetas(ctx, req.(*payload.Meta_Keys)) @@ -401,7 +400,7 @@ func _Meta_DeleteMetaInverse_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/meta_manager.Meta/DeleteMetaInverse", + FullMethod: "/meta.v1.Meta/DeleteMetaInverse", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MetaServer).DeleteMetaInverse(ctx, req.(*payload.Meta_Val)) @@ -419,7 +418,7 @@ func _Meta_DeleteMetasInverse_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/meta_manager.Meta/DeleteMetasInverse", + FullMethod: "/meta.v1.Meta/DeleteMetasInverse", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MetaServer).DeleteMetasInverse(ctx, req.(*payload.Meta_Vals)) @@ -428,7 +427,7 @@ func _Meta_DeleteMetasInverse_Handler(srv interface{}, ctx context.Context, dec } var _Meta_serviceDesc = grpc.ServiceDesc{ - ServiceName: "meta_manager.Meta", + ServiceName: "meta.v1.Meta", HandlerType: (*MetaServer)(nil), Methods: []grpc.MethodDesc{ { @@ -473,5 +472,5 @@ var _Meta_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "meta.proto", + Metadata: "apis/proto/v1/meta/meta.proto", } diff --git a/apis/grpc/v1/payload/payload.pb.go b/apis/grpc/v1/payload/payload.pb.go new file mode 100644 index 0000000000..f0ae6ac264 --- /dev/null +++ b/apis/grpc/v1/payload/payload.pb.go @@ -0,0 +1,17423 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package payload + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/envoyproxy/protoc-gen-validate/validate" + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Search struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Search) Reset() { *m = Search{} } +func (m *Search) String() string { return proto.CompactTextString(m) } +func (*Search) ProtoMessage() {} +func (*Search) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{0} +} +func (m *Search) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Search) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Search.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Search) XXX_Merge(src proto.Message) { + xxx_messageInfo_Search.Merge(m, src) +} +func (m *Search) XXX_Size() int { + return m.Size() +} +func (m *Search) XXX_DiscardUnknown() { + xxx_messageInfo_Search.DiscardUnknown(m) +} + +var xxx_messageInfo_Search proto.InternalMessageInfo + +type Search_Request struct { + Vector []float32 `protobuf:"fixed32,1,rep,packed,name=vector,proto3" json:"vector,omitempty"` + Config *Search_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Search_Request) Reset() { *m = Search_Request{} } +func (m *Search_Request) String() string { return proto.CompactTextString(m) } +func (*Search_Request) ProtoMessage() {} +func (*Search_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{0, 0} +} +func (m *Search_Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Search_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Search_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Search_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Search_Request.Merge(m, src) +} +func (m *Search_Request) XXX_Size() int { + return m.Size() +} +func (m *Search_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Search_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Search_Request proto.InternalMessageInfo + +func (m *Search_Request) GetVector() []float32 { + if m != nil { + return m.Vector + } + return nil +} + +func (m *Search_Request) GetConfig() *Search_Config { + if m != nil { + return m.Config + } + return nil +} + +type Search_MultiRequest struct { + Requests []*Search_Request `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Search_MultiRequest) Reset() { *m = Search_MultiRequest{} } +func (m *Search_MultiRequest) String() string { return proto.CompactTextString(m) } +func (*Search_MultiRequest) ProtoMessage() {} +func (*Search_MultiRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{0, 1} +} +func (m *Search_MultiRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Search_MultiRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Search_MultiRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Search_MultiRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_Search_MultiRequest.Merge(m, src) +} +func (m *Search_MultiRequest) XXX_Size() int { + return m.Size() +} +func (m *Search_MultiRequest) XXX_DiscardUnknown() { + xxx_messageInfo_Search_MultiRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_Search_MultiRequest proto.InternalMessageInfo + +func (m *Search_MultiRequest) GetRequests() []*Search_Request { + if m != nil { + return m.Requests + } + return nil +} + +type Search_IDRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Config *Search_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Search_IDRequest) Reset() { *m = Search_IDRequest{} } +func (m *Search_IDRequest) String() string { return proto.CompactTextString(m) } +func (*Search_IDRequest) ProtoMessage() {} +func (*Search_IDRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{0, 2} +} +func (m *Search_IDRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Search_IDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Search_IDRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Search_IDRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_Search_IDRequest.Merge(m, src) +} +func (m *Search_IDRequest) XXX_Size() int { + return m.Size() +} +func (m *Search_IDRequest) XXX_DiscardUnknown() { + xxx_messageInfo_Search_IDRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_Search_IDRequest proto.InternalMessageInfo + +func (m *Search_IDRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Search_IDRequest) GetConfig() *Search_Config { + if m != nil { + return m.Config + } + return nil +} + +type Search_MultiIDRequest struct { + Requests []*Search_IDRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Search_MultiIDRequest) Reset() { *m = Search_MultiIDRequest{} } +func (m *Search_MultiIDRequest) String() string { return proto.CompactTextString(m) } +func (*Search_MultiIDRequest) ProtoMessage() {} +func (*Search_MultiIDRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{0, 3} +} +func (m *Search_MultiIDRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Search_MultiIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Search_MultiIDRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Search_MultiIDRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_Search_MultiIDRequest.Merge(m, src) +} +func (m *Search_MultiIDRequest) XXX_Size() int { + return m.Size() +} +func (m *Search_MultiIDRequest) XXX_DiscardUnknown() { + xxx_messageInfo_Search_MultiIDRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_Search_MultiIDRequest proto.InternalMessageInfo + +func (m *Search_MultiIDRequest) GetRequests() []*Search_IDRequest { + if m != nil { + return m.Requests + } + return nil +} + +type Search_ObjectRequest struct { + Object []byte `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + Config *Search_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Search_ObjectRequest) Reset() { *m = Search_ObjectRequest{} } +func (m *Search_ObjectRequest) String() string { return proto.CompactTextString(m) } +func (*Search_ObjectRequest) ProtoMessage() {} +func (*Search_ObjectRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{0, 4} +} +func (m *Search_ObjectRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Search_ObjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Search_ObjectRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Search_ObjectRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_Search_ObjectRequest.Merge(m, src) +} +func (m *Search_ObjectRequest) XXX_Size() int { + return m.Size() +} +func (m *Search_ObjectRequest) XXX_DiscardUnknown() { + xxx_messageInfo_Search_ObjectRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_Search_ObjectRequest proto.InternalMessageInfo + +func (m *Search_ObjectRequest) GetObject() []byte { + if m != nil { + return m.Object + } + return nil +} + +func (m *Search_ObjectRequest) GetConfig() *Search_Config { + if m != nil { + return m.Config + } + return nil +} + +type Search_Config struct { + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Num uint32 `protobuf:"varint,2,opt,name=num,proto3" json:"num,omitempty"` + Radius float32 `protobuf:"fixed32,3,opt,name=radius,proto3" json:"radius,omitempty"` + Epsilon float32 `protobuf:"fixed32,4,opt,name=epsilon,proto3" json:"epsilon,omitempty"` + Timeout int64 `protobuf:"varint,5,opt,name=timeout,proto3" json:"timeout,omitempty"` + Filters *Filter_Config `protobuf:"bytes,6,opt,name=filters,proto3" json:"filters,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Search_Config) Reset() { *m = Search_Config{} } +func (m *Search_Config) String() string { return proto.CompactTextString(m) } +func (*Search_Config) ProtoMessage() {} +func (*Search_Config) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{0, 5} +} +func (m *Search_Config) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Search_Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Search_Config.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Search_Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Search_Config.Merge(m, src) +} +func (m *Search_Config) XXX_Size() int { + return m.Size() +} +func (m *Search_Config) XXX_DiscardUnknown() { + xxx_messageInfo_Search_Config.DiscardUnknown(m) +} + +var xxx_messageInfo_Search_Config proto.InternalMessageInfo + +func (m *Search_Config) GetRequestId() string { + if m != nil { + return m.RequestId + } + return "" +} + +func (m *Search_Config) GetNum() uint32 { + if m != nil { + return m.Num + } + return 0 +} + +func (m *Search_Config) GetRadius() float32 { + if m != nil { + return m.Radius + } + return 0 +} + +func (m *Search_Config) GetEpsilon() float32 { + if m != nil { + return m.Epsilon + } + return 0 +} + +func (m *Search_Config) GetTimeout() int64 { + if m != nil { + return m.Timeout + } + return 0 +} + +func (m *Search_Config) GetFilters() *Filter_Config { + if m != nil { + return m.Filters + } + return nil +} + +type Search_Response struct { + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Results []*Object_Distance `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Search_Response) Reset() { *m = Search_Response{} } +func (m *Search_Response) String() string { return proto.CompactTextString(m) } +func (*Search_Response) ProtoMessage() {} +func (*Search_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{0, 6} +} +func (m *Search_Response) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Search_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Search_Response.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Search_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Search_Response.Merge(m, src) +} +func (m *Search_Response) XXX_Size() int { + return m.Size() +} +func (m *Search_Response) XXX_DiscardUnknown() { + xxx_messageInfo_Search_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Search_Response proto.InternalMessageInfo + +func (m *Search_Response) GetRequestId() string { + if m != nil { + return m.RequestId + } + return "" +} + +func (m *Search_Response) GetResults() []*Object_Distance { + if m != nil { + return m.Results + } + return nil +} + +type Search_Responses struct { + Responses []*Search_Response `protobuf:"bytes,1,rep,name=responses,proto3" json:"responses,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Search_Responses) Reset() { *m = Search_Responses{} } +func (m *Search_Responses) String() string { return proto.CompactTextString(m) } +func (*Search_Responses) ProtoMessage() {} +func (*Search_Responses) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{0, 7} +} +func (m *Search_Responses) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Search_Responses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Search_Responses.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Search_Responses) XXX_Merge(src proto.Message) { + xxx_messageInfo_Search_Responses.Merge(m, src) +} +func (m *Search_Responses) XXX_Size() int { + return m.Size() +} +func (m *Search_Responses) XXX_DiscardUnknown() { + xxx_messageInfo_Search_Responses.DiscardUnknown(m) +} + +var xxx_messageInfo_Search_Responses proto.InternalMessageInfo + +func (m *Search_Responses) GetResponses() []*Search_Response { + if m != nil { + return m.Responses + } + return nil +} + +type Filter struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Filter) Reset() { *m = Filter{} } +func (m *Filter) String() string { return proto.CompactTextString(m) } +func (*Filter) ProtoMessage() {} +func (*Filter) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{1} +} +func (m *Filter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Filter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Filter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Filter.Merge(m, src) +} +func (m *Filter) XXX_Size() int { + return m.Size() +} +func (m *Filter) XXX_DiscardUnknown() { + xxx_messageInfo_Filter.DiscardUnknown(m) +} + +var xxx_messageInfo_Filter proto.InternalMessageInfo + +type Filter_Target struct { + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Filter_Target) Reset() { *m = Filter_Target{} } +func (m *Filter_Target) String() string { return proto.CompactTextString(m) } +func (*Filter_Target) ProtoMessage() {} +func (*Filter_Target) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{1, 0} +} +func (m *Filter_Target) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Filter_Target) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Filter_Target.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Filter_Target) XXX_Merge(src proto.Message) { + xxx_messageInfo_Filter_Target.Merge(m, src) +} +func (m *Filter_Target) XXX_Size() int { + return m.Size() +} +func (m *Filter_Target) XXX_DiscardUnknown() { + xxx_messageInfo_Filter_Target.DiscardUnknown(m) +} + +var xxx_messageInfo_Filter_Target proto.InternalMessageInfo + +func (m *Filter_Target) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *Filter_Target) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +type Filter_Config struct { + Targets []string `protobuf:"bytes,1,rep,name=targets,proto3" json:"targets,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Filter_Config) Reset() { *m = Filter_Config{} } +func (m *Filter_Config) String() string { return proto.CompactTextString(m) } +func (*Filter_Config) ProtoMessage() {} +func (*Filter_Config) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{1, 1} +} +func (m *Filter_Config) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Filter_Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Filter_Config.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Filter_Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Filter_Config.Merge(m, src) +} +func (m *Filter_Config) XXX_Size() int { + return m.Size() +} +func (m *Filter_Config) XXX_DiscardUnknown() { + xxx_messageInfo_Filter_Config.DiscardUnknown(m) +} + +var xxx_messageInfo_Filter_Config proto.InternalMessageInfo + +func (m *Filter_Config) GetTargets() []string { + if m != nil { + return m.Targets + } + return nil +} + +type Insert struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Insert) Reset() { *m = Insert{} } +func (m *Insert) String() string { return proto.CompactTextString(m) } +func (*Insert) ProtoMessage() {} +func (*Insert) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{2} +} +func (m *Insert) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Insert) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Insert.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Insert) XXX_Merge(src proto.Message) { + xxx_messageInfo_Insert.Merge(m, src) +} +func (m *Insert) XXX_Size() int { + return m.Size() +} +func (m *Insert) XXX_DiscardUnknown() { + xxx_messageInfo_Insert.DiscardUnknown(m) +} + +var xxx_messageInfo_Insert proto.InternalMessageInfo + +type Insert_Request struct { + Vector *Object_Vector `protobuf:"bytes,1,opt,name=vector,proto3" json:"vector,omitempty"` + Config *Insert_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Insert_Request) Reset() { *m = Insert_Request{} } +func (m *Insert_Request) String() string { return proto.CompactTextString(m) } +func (*Insert_Request) ProtoMessage() {} +func (*Insert_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{2, 0} +} +func (m *Insert_Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Insert_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Insert_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Insert_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Insert_Request.Merge(m, src) +} +func (m *Insert_Request) XXX_Size() int { + return m.Size() +} +func (m *Insert_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Insert_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Insert_Request proto.InternalMessageInfo + +func (m *Insert_Request) GetVector() *Object_Vector { + if m != nil { + return m.Vector + } + return nil +} + +func (m *Insert_Request) GetConfig() *Insert_Config { + if m != nil { + return m.Config + } + return nil +} + +type Insert_MultiRequest struct { + Requests []*Insert_Request `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Insert_MultiRequest) Reset() { *m = Insert_MultiRequest{} } +func (m *Insert_MultiRequest) String() string { return proto.CompactTextString(m) } +func (*Insert_MultiRequest) ProtoMessage() {} +func (*Insert_MultiRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{2, 1} +} +func (m *Insert_MultiRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Insert_MultiRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Insert_MultiRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Insert_MultiRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_Insert_MultiRequest.Merge(m, src) +} +func (m *Insert_MultiRequest) XXX_Size() int { + return m.Size() +} +func (m *Insert_MultiRequest) XXX_DiscardUnknown() { + xxx_messageInfo_Insert_MultiRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_Insert_MultiRequest proto.InternalMessageInfo + +func (m *Insert_MultiRequest) GetRequests() []*Insert_Request { + if m != nil { + return m.Requests + } + return nil +} + +type Insert_Config struct { + SkipStrictExistCheck bool `protobuf:"varint,1,opt,name=skip_strict_exist_check,json=skipStrictExistCheck,proto3" json:"skip_strict_exist_check,omitempty"` + Filters *Filter_Config `protobuf:"bytes,2,opt,name=filters,proto3" json:"filters,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Insert_Config) Reset() { *m = Insert_Config{} } +func (m *Insert_Config) String() string { return proto.CompactTextString(m) } +func (*Insert_Config) ProtoMessage() {} +func (*Insert_Config) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{2, 2} +} +func (m *Insert_Config) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Insert_Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Insert_Config.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Insert_Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Insert_Config.Merge(m, src) +} +func (m *Insert_Config) XXX_Size() int { + return m.Size() +} +func (m *Insert_Config) XXX_DiscardUnknown() { + xxx_messageInfo_Insert_Config.DiscardUnknown(m) +} + +var xxx_messageInfo_Insert_Config proto.InternalMessageInfo + +func (m *Insert_Config) GetSkipStrictExistCheck() bool { + if m != nil { + return m.SkipStrictExistCheck + } + return false +} + +func (m *Insert_Config) GetFilters() *Filter_Config { + if m != nil { + return m.Filters + } + return nil +} + +type Update struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Update) Reset() { *m = Update{} } +func (m *Update) String() string { return proto.CompactTextString(m) } +func (*Update) ProtoMessage() {} +func (*Update) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{3} +} +func (m *Update) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Update) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Update.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Update) XXX_Merge(src proto.Message) { + xxx_messageInfo_Update.Merge(m, src) +} +func (m *Update) XXX_Size() int { + return m.Size() +} +func (m *Update) XXX_DiscardUnknown() { + xxx_messageInfo_Update.DiscardUnknown(m) +} + +var xxx_messageInfo_Update proto.InternalMessageInfo + +type Update_Request struct { + Vector *Object_Vector `protobuf:"bytes,1,opt,name=vector,proto3" json:"vector,omitempty"` + Config *Update_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Update_Request) Reset() { *m = Update_Request{} } +func (m *Update_Request) String() string { return proto.CompactTextString(m) } +func (*Update_Request) ProtoMessage() {} +func (*Update_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{3, 0} +} +func (m *Update_Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Update_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Update_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Update_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Update_Request.Merge(m, src) +} +func (m *Update_Request) XXX_Size() int { + return m.Size() +} +func (m *Update_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Update_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Update_Request proto.InternalMessageInfo + +func (m *Update_Request) GetVector() *Object_Vector { + if m != nil { + return m.Vector + } + return nil +} + +func (m *Update_Request) GetConfig() *Update_Config { + if m != nil { + return m.Config + } + return nil +} + +type Update_MultiRequest struct { + Requests []*Update_Request `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Update_MultiRequest) Reset() { *m = Update_MultiRequest{} } +func (m *Update_MultiRequest) String() string { return proto.CompactTextString(m) } +func (*Update_MultiRequest) ProtoMessage() {} +func (*Update_MultiRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{3, 1} +} +func (m *Update_MultiRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Update_MultiRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Update_MultiRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Update_MultiRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_Update_MultiRequest.Merge(m, src) +} +func (m *Update_MultiRequest) XXX_Size() int { + return m.Size() +} +func (m *Update_MultiRequest) XXX_DiscardUnknown() { + xxx_messageInfo_Update_MultiRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_Update_MultiRequest proto.InternalMessageInfo + +func (m *Update_MultiRequest) GetRequests() []*Update_Request { + if m != nil { + return m.Requests + } + return nil +} + +type Update_Config struct { + SkipStrictExistCheck bool `protobuf:"varint,1,opt,name=skip_strict_exist_check,json=skipStrictExistCheck,proto3" json:"skip_strict_exist_check,omitempty"` + Filters *Filter_Config `protobuf:"bytes,2,opt,name=filters,proto3" json:"filters,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Update_Config) Reset() { *m = Update_Config{} } +func (m *Update_Config) String() string { return proto.CompactTextString(m) } +func (*Update_Config) ProtoMessage() {} +func (*Update_Config) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{3, 2} +} +func (m *Update_Config) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Update_Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Update_Config.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Update_Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Update_Config.Merge(m, src) +} +func (m *Update_Config) XXX_Size() int { + return m.Size() +} +func (m *Update_Config) XXX_DiscardUnknown() { + xxx_messageInfo_Update_Config.DiscardUnknown(m) +} + +var xxx_messageInfo_Update_Config proto.InternalMessageInfo + +func (m *Update_Config) GetSkipStrictExistCheck() bool { + if m != nil { + return m.SkipStrictExistCheck + } + return false +} + +func (m *Update_Config) GetFilters() *Filter_Config { + if m != nil { + return m.Filters + } + return nil +} + +type Upsert struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Upsert) Reset() { *m = Upsert{} } +func (m *Upsert) String() string { return proto.CompactTextString(m) } +func (*Upsert) ProtoMessage() {} +func (*Upsert) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{4} +} +func (m *Upsert) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Upsert) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Upsert.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Upsert) XXX_Merge(src proto.Message) { + xxx_messageInfo_Upsert.Merge(m, src) +} +func (m *Upsert) XXX_Size() int { + return m.Size() +} +func (m *Upsert) XXX_DiscardUnknown() { + xxx_messageInfo_Upsert.DiscardUnknown(m) +} + +var xxx_messageInfo_Upsert proto.InternalMessageInfo + +type Upsert_Request struct { + Vector *Object_Vector `protobuf:"bytes,1,opt,name=vector,proto3" json:"vector,omitempty"` + Config *Upsert_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Upsert_Request) Reset() { *m = Upsert_Request{} } +func (m *Upsert_Request) String() string { return proto.CompactTextString(m) } +func (*Upsert_Request) ProtoMessage() {} +func (*Upsert_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{4, 0} +} +func (m *Upsert_Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Upsert_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Upsert_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Upsert_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Upsert_Request.Merge(m, src) +} +func (m *Upsert_Request) XXX_Size() int { + return m.Size() +} +func (m *Upsert_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Upsert_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Upsert_Request proto.InternalMessageInfo + +func (m *Upsert_Request) GetVector() *Object_Vector { + if m != nil { + return m.Vector + } + return nil +} + +func (m *Upsert_Request) GetConfig() *Upsert_Config { + if m != nil { + return m.Config + } + return nil +} + +type Upsert_MultiRequest struct { + Requests []*Upsert_Request `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Upsert_MultiRequest) Reset() { *m = Upsert_MultiRequest{} } +func (m *Upsert_MultiRequest) String() string { return proto.CompactTextString(m) } +func (*Upsert_MultiRequest) ProtoMessage() {} +func (*Upsert_MultiRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{4, 1} +} +func (m *Upsert_MultiRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Upsert_MultiRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Upsert_MultiRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Upsert_MultiRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_Upsert_MultiRequest.Merge(m, src) +} +func (m *Upsert_MultiRequest) XXX_Size() int { + return m.Size() +} +func (m *Upsert_MultiRequest) XXX_DiscardUnknown() { + xxx_messageInfo_Upsert_MultiRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_Upsert_MultiRequest proto.InternalMessageInfo + +func (m *Upsert_MultiRequest) GetRequests() []*Upsert_Request { + if m != nil { + return m.Requests + } + return nil +} + +type Upsert_Config struct { + SkipStrictExistCheck bool `protobuf:"varint,1,opt,name=skip_strict_exist_check,json=skipStrictExistCheck,proto3" json:"skip_strict_exist_check,omitempty"` + Filters *Filter_Config `protobuf:"bytes,2,opt,name=filters,proto3" json:"filters,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Upsert_Config) Reset() { *m = Upsert_Config{} } +func (m *Upsert_Config) String() string { return proto.CompactTextString(m) } +func (*Upsert_Config) ProtoMessage() {} +func (*Upsert_Config) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{4, 2} +} +func (m *Upsert_Config) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Upsert_Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Upsert_Config.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Upsert_Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Upsert_Config.Merge(m, src) +} +func (m *Upsert_Config) XXX_Size() int { + return m.Size() +} +func (m *Upsert_Config) XXX_DiscardUnknown() { + xxx_messageInfo_Upsert_Config.DiscardUnknown(m) +} + +var xxx_messageInfo_Upsert_Config proto.InternalMessageInfo + +func (m *Upsert_Config) GetSkipStrictExistCheck() bool { + if m != nil { + return m.SkipStrictExistCheck + } + return false +} + +func (m *Upsert_Config) GetFilters() *Filter_Config { + if m != nil { + return m.Filters + } + return nil +} + +type Remove struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Remove) Reset() { *m = Remove{} } +func (m *Remove) String() string { return proto.CompactTextString(m) } +func (*Remove) ProtoMessage() {} +func (*Remove) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{5} +} +func (m *Remove) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Remove) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Remove.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Remove) XXX_Merge(src proto.Message) { + xxx_messageInfo_Remove.Merge(m, src) +} +func (m *Remove) XXX_Size() int { + return m.Size() +} +func (m *Remove) XXX_DiscardUnknown() { + xxx_messageInfo_Remove.DiscardUnknown(m) +} + +var xxx_messageInfo_Remove proto.InternalMessageInfo + +type Remove_Request struct { + Id *Object_ID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Config *Remove_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Remove_Request) Reset() { *m = Remove_Request{} } +func (m *Remove_Request) String() string { return proto.CompactTextString(m) } +func (*Remove_Request) ProtoMessage() {} +func (*Remove_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{5, 0} +} +func (m *Remove_Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Remove_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Remove_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Remove_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Remove_Request.Merge(m, src) +} +func (m *Remove_Request) XXX_Size() int { + return m.Size() +} +func (m *Remove_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Remove_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Remove_Request proto.InternalMessageInfo + +func (m *Remove_Request) GetId() *Object_ID { + if m != nil { + return m.Id + } + return nil +} + +func (m *Remove_Request) GetConfig() *Remove_Config { + if m != nil { + return m.Config + } + return nil +} + +type Remove_MultiRequest struct { + Requests []*Remove_Request `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Remove_MultiRequest) Reset() { *m = Remove_MultiRequest{} } +func (m *Remove_MultiRequest) String() string { return proto.CompactTextString(m) } +func (*Remove_MultiRequest) ProtoMessage() {} +func (*Remove_MultiRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{5, 1} +} +func (m *Remove_MultiRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Remove_MultiRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Remove_MultiRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Remove_MultiRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_Remove_MultiRequest.Merge(m, src) +} +func (m *Remove_MultiRequest) XXX_Size() int { + return m.Size() +} +func (m *Remove_MultiRequest) XXX_DiscardUnknown() { + xxx_messageInfo_Remove_MultiRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_Remove_MultiRequest proto.InternalMessageInfo + +func (m *Remove_MultiRequest) GetRequests() []*Remove_Request { + if m != nil { + return m.Requests + } + return nil +} + +type Remove_Config struct { + SkipStrictExistCheck bool `protobuf:"varint,1,opt,name=skip_strict_exist_check,json=skipStrictExistCheck,proto3" json:"skip_strict_exist_check,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Remove_Config) Reset() { *m = Remove_Config{} } +func (m *Remove_Config) String() string { return proto.CompactTextString(m) } +func (*Remove_Config) ProtoMessage() {} +func (*Remove_Config) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{5, 2} +} +func (m *Remove_Config) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Remove_Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Remove_Config.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Remove_Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Remove_Config.Merge(m, src) +} +func (m *Remove_Config) XXX_Size() int { + return m.Size() +} +func (m *Remove_Config) XXX_DiscardUnknown() { + xxx_messageInfo_Remove_Config.DiscardUnknown(m) +} + +var xxx_messageInfo_Remove_Config proto.InternalMessageInfo + +func (m *Remove_Config) GetSkipStrictExistCheck() bool { + if m != nil { + return m.SkipStrictExistCheck + } + return false +} + +type Meta struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Meta) Reset() { *m = Meta{} } +func (m *Meta) String() string { return proto.CompactTextString(m) } +func (*Meta) ProtoMessage() {} +func (*Meta) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{6} +} +func (m *Meta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Meta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Meta.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Meta) XXX_Merge(src proto.Message) { + xxx_messageInfo_Meta.Merge(m, src) +} +func (m *Meta) XXX_Size() int { + return m.Size() +} +func (m *Meta) XXX_DiscardUnknown() { + xxx_messageInfo_Meta.DiscardUnknown(m) +} + +var xxx_messageInfo_Meta proto.InternalMessageInfo + +type Meta_Key struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Meta_Key) Reset() { *m = Meta_Key{} } +func (m *Meta_Key) String() string { return proto.CompactTextString(m) } +func (*Meta_Key) ProtoMessage() {} +func (*Meta_Key) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{6, 0} +} +func (m *Meta_Key) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Meta_Key) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Meta_Key.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Meta_Key) XXX_Merge(src proto.Message) { + xxx_messageInfo_Meta_Key.Merge(m, src) +} +func (m *Meta_Key) XXX_Size() int { + return m.Size() +} +func (m *Meta_Key) XXX_DiscardUnknown() { + xxx_messageInfo_Meta_Key.DiscardUnknown(m) +} + +var xxx_messageInfo_Meta_Key proto.InternalMessageInfo + +func (m *Meta_Key) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +type Meta_Keys struct { + Keys []string `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Meta_Keys) Reset() { *m = Meta_Keys{} } +func (m *Meta_Keys) String() string { return proto.CompactTextString(m) } +func (*Meta_Keys) ProtoMessage() {} +func (*Meta_Keys) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{6, 1} +} +func (m *Meta_Keys) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Meta_Keys) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Meta_Keys.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Meta_Keys) XXX_Merge(src proto.Message) { + xxx_messageInfo_Meta_Keys.Merge(m, src) +} +func (m *Meta_Keys) XXX_Size() int { + return m.Size() +} +func (m *Meta_Keys) XXX_DiscardUnknown() { + xxx_messageInfo_Meta_Keys.DiscardUnknown(m) +} + +var xxx_messageInfo_Meta_Keys proto.InternalMessageInfo + +func (m *Meta_Keys) GetKeys() []string { + if m != nil { + return m.Keys + } + return nil +} + +type Meta_Val struct { + Val string `protobuf:"bytes,1,opt,name=val,proto3" json:"val,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Meta_Val) Reset() { *m = Meta_Val{} } +func (m *Meta_Val) String() string { return proto.CompactTextString(m) } +func (*Meta_Val) ProtoMessage() {} +func (*Meta_Val) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{6, 2} +} +func (m *Meta_Val) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Meta_Val) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Meta_Val.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Meta_Val) XXX_Merge(src proto.Message) { + xxx_messageInfo_Meta_Val.Merge(m, src) +} +func (m *Meta_Val) XXX_Size() int { + return m.Size() +} +func (m *Meta_Val) XXX_DiscardUnknown() { + xxx_messageInfo_Meta_Val.DiscardUnknown(m) +} + +var xxx_messageInfo_Meta_Val proto.InternalMessageInfo + +func (m *Meta_Val) GetVal() string { + if m != nil { + return m.Val + } + return "" +} + +type Meta_Vals struct { + Vals []string `protobuf:"bytes,1,rep,name=vals,proto3" json:"vals,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Meta_Vals) Reset() { *m = Meta_Vals{} } +func (m *Meta_Vals) String() string { return proto.CompactTextString(m) } +func (*Meta_Vals) ProtoMessage() {} +func (*Meta_Vals) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{6, 3} +} +func (m *Meta_Vals) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Meta_Vals) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Meta_Vals.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Meta_Vals) XXX_Merge(src proto.Message) { + xxx_messageInfo_Meta_Vals.Merge(m, src) +} +func (m *Meta_Vals) XXX_Size() int { + return m.Size() +} +func (m *Meta_Vals) XXX_DiscardUnknown() { + xxx_messageInfo_Meta_Vals.DiscardUnknown(m) +} + +var xxx_messageInfo_Meta_Vals proto.InternalMessageInfo + +func (m *Meta_Vals) GetVals() []string { + if m != nil { + return m.Vals + } + return nil +} + +type Meta_KeyVal struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Val string `protobuf:"bytes,2,opt,name=val,proto3" json:"val,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Meta_KeyVal) Reset() { *m = Meta_KeyVal{} } +func (m *Meta_KeyVal) String() string { return proto.CompactTextString(m) } +func (*Meta_KeyVal) ProtoMessage() {} +func (*Meta_KeyVal) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{6, 4} +} +func (m *Meta_KeyVal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Meta_KeyVal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Meta_KeyVal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Meta_KeyVal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Meta_KeyVal.Merge(m, src) +} +func (m *Meta_KeyVal) XXX_Size() int { + return m.Size() +} +func (m *Meta_KeyVal) XXX_DiscardUnknown() { + xxx_messageInfo_Meta_KeyVal.DiscardUnknown(m) +} + +var xxx_messageInfo_Meta_KeyVal proto.InternalMessageInfo + +func (m *Meta_KeyVal) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Meta_KeyVal) GetVal() string { + if m != nil { + return m.Val + } + return "" +} + +type Meta_KeyVals struct { + Kvs []*Meta_KeyVal `protobuf:"bytes,1,rep,name=kvs,proto3" json:"kvs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Meta_KeyVals) Reset() { *m = Meta_KeyVals{} } +func (m *Meta_KeyVals) String() string { return proto.CompactTextString(m) } +func (*Meta_KeyVals) ProtoMessage() {} +func (*Meta_KeyVals) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{6, 5} +} +func (m *Meta_KeyVals) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Meta_KeyVals) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Meta_KeyVals.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Meta_KeyVals) XXX_Merge(src proto.Message) { + xxx_messageInfo_Meta_KeyVals.Merge(m, src) +} +func (m *Meta_KeyVals) XXX_Size() int { + return m.Size() +} +func (m *Meta_KeyVals) XXX_DiscardUnknown() { + xxx_messageInfo_Meta_KeyVals.DiscardUnknown(m) +} + +var xxx_messageInfo_Meta_KeyVals proto.InternalMessageInfo + +func (m *Meta_KeyVals) GetKvs() []*Meta_KeyVal { + if m != nil { + return m.Kvs + } + return nil +} + +type Object struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object) Reset() { *m = Object{} } +func (m *Object) String() string { return proto.CompactTextString(m) } +func (*Object) ProtoMessage() {} +func (*Object) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{7} +} +func (m *Object) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Object) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Object.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Object) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object.Merge(m, src) +} +func (m *Object) XXX_Size() int { + return m.Size() +} +func (m *Object) XXX_DiscardUnknown() { + xxx_messageInfo_Object.DiscardUnknown(m) +} + +var xxx_messageInfo_Object proto.InternalMessageInfo + +type Object_Distance struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Distance float32 `protobuf:"fixed32,2,opt,name=distance,proto3" json:"distance,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object_Distance) Reset() { *m = Object_Distance{} } +func (m *Object_Distance) String() string { return proto.CompactTextString(m) } +func (*Object_Distance) ProtoMessage() {} +func (*Object_Distance) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{7, 0} +} +func (m *Object_Distance) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Object_Distance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Object_Distance.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Object_Distance) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object_Distance.Merge(m, src) +} +func (m *Object_Distance) XXX_Size() int { + return m.Size() +} +func (m *Object_Distance) XXX_DiscardUnknown() { + xxx_messageInfo_Object_Distance.DiscardUnknown(m) +} + +var xxx_messageInfo_Object_Distance proto.InternalMessageInfo + +func (m *Object_Distance) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Object_Distance) GetDistance() float32 { + if m != nil { + return m.Distance + } + return 0 +} + +type Object_ID struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object_ID) Reset() { *m = Object_ID{} } +func (m *Object_ID) String() string { return proto.CompactTextString(m) } +func (*Object_ID) ProtoMessage() {} +func (*Object_ID) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{7, 1} +} +func (m *Object_ID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Object_ID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Object_ID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Object_ID) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object_ID.Merge(m, src) +} +func (m *Object_ID) XXX_Size() int { + return m.Size() +} +func (m *Object_ID) XXX_DiscardUnknown() { + xxx_messageInfo_Object_ID.DiscardUnknown(m) +} + +var xxx_messageInfo_Object_ID proto.InternalMessageInfo + +func (m *Object_ID) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type Object_IDs struct { + Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object_IDs) Reset() { *m = Object_IDs{} } +func (m *Object_IDs) String() string { return proto.CompactTextString(m) } +func (*Object_IDs) ProtoMessage() {} +func (*Object_IDs) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{7, 2} +} +func (m *Object_IDs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Object_IDs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Object_IDs.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Object_IDs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object_IDs.Merge(m, src) +} +func (m *Object_IDs) XXX_Size() int { + return m.Size() +} +func (m *Object_IDs) XXX_DiscardUnknown() { + xxx_messageInfo_Object_IDs.DiscardUnknown(m) +} + +var xxx_messageInfo_Object_IDs proto.InternalMessageInfo + +func (m *Object_IDs) GetIds() []string { + if m != nil { + return m.Ids + } + return nil +} + +type Object_Vector struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Vector []float32 `protobuf:"fixed32,2,rep,packed,name=vector,proto3" json:"vector,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object_Vector) Reset() { *m = Object_Vector{} } +func (m *Object_Vector) String() string { return proto.CompactTextString(m) } +func (*Object_Vector) ProtoMessage() {} +func (*Object_Vector) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{7, 3} +} +func (m *Object_Vector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Object_Vector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Object_Vector.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Object_Vector) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object_Vector.Merge(m, src) +} +func (m *Object_Vector) XXX_Size() int { + return m.Size() +} +func (m *Object_Vector) XXX_DiscardUnknown() { + xxx_messageInfo_Object_Vector.DiscardUnknown(m) +} + +var xxx_messageInfo_Object_Vector proto.InternalMessageInfo + +func (m *Object_Vector) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Object_Vector) GetVector() []float32 { + if m != nil { + return m.Vector + } + return nil +} + +type Object_Vectors struct { + Vectors []*Object_Vector `protobuf:"bytes,1,rep,name=vectors,proto3" json:"vectors,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object_Vectors) Reset() { *m = Object_Vectors{} } +func (m *Object_Vectors) String() string { return proto.CompactTextString(m) } +func (*Object_Vectors) ProtoMessage() {} +func (*Object_Vectors) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{7, 4} +} +func (m *Object_Vectors) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Object_Vectors) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Object_Vectors.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Object_Vectors) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object_Vectors.Merge(m, src) +} +func (m *Object_Vectors) XXX_Size() int { + return m.Size() +} +func (m *Object_Vectors) XXX_DiscardUnknown() { + xxx_messageInfo_Object_Vectors.DiscardUnknown(m) +} + +var xxx_messageInfo_Object_Vectors proto.InternalMessageInfo + +func (m *Object_Vectors) GetVectors() []*Object_Vector { + if m != nil { + return m.Vectors + } + return nil +} + +type Object_Blob struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Object []byte `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object_Blob) Reset() { *m = Object_Blob{} } +func (m *Object_Blob) String() string { return proto.CompactTextString(m) } +func (*Object_Blob) ProtoMessage() {} +func (*Object_Blob) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{7, 5} +} +func (m *Object_Blob) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Object_Blob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Object_Blob.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Object_Blob) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object_Blob.Merge(m, src) +} +func (m *Object_Blob) XXX_Size() int { + return m.Size() +} +func (m *Object_Blob) XXX_DiscardUnknown() { + xxx_messageInfo_Object_Blob.DiscardUnknown(m) +} + +var xxx_messageInfo_Object_Blob proto.InternalMessageInfo + +func (m *Object_Blob) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Object_Blob) GetObject() []byte { + if m != nil { + return m.Object + } + return nil +} + +type Object_Location struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` + Ips []string `protobuf:"bytes,3,rep,name=ips,proto3" json:"ips,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object_Location) Reset() { *m = Object_Location{} } +func (m *Object_Location) String() string { return proto.CompactTextString(m) } +func (*Object_Location) ProtoMessage() {} +func (*Object_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{7, 6} +} +func (m *Object_Location) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Object_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Object_Location.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Object_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object_Location.Merge(m, src) +} +func (m *Object_Location) XXX_Size() int { + return m.Size() +} +func (m *Object_Location) XXX_DiscardUnknown() { + xxx_messageInfo_Object_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_Object_Location proto.InternalMessageInfo + +func (m *Object_Location) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Object_Location) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + +func (m *Object_Location) GetIps() []string { + if m != nil { + return m.Ips + } + return nil +} + +type Object_Locations struct { + Locations []*Object_Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object_Locations) Reset() { *m = Object_Locations{} } +func (m *Object_Locations) String() string { return proto.CompactTextString(m) } +func (*Object_Locations) ProtoMessage() {} +func (*Object_Locations) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{7, 7} +} +func (m *Object_Locations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Object_Locations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Object_Locations.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Object_Locations) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object_Locations.Merge(m, src) +} +func (m *Object_Locations) XXX_Size() int { + return m.Size() +} +func (m *Object_Locations) XXX_DiscardUnknown() { + xxx_messageInfo_Object_Locations.DiscardUnknown(m) +} + +var xxx_messageInfo_Object_Locations proto.InternalMessageInfo + +func (m *Object_Locations) GetLocations() []*Object_Location { + if m != nil { + return m.Locations + } + return nil +} + +type Control struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Control) Reset() { *m = Control{} } +func (m *Control) String() string { return proto.CompactTextString(m) } +func (*Control) ProtoMessage() {} +func (*Control) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{8} +} +func (m *Control) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Control) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Control.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Control) XXX_Merge(src proto.Message) { + xxx_messageInfo_Control.Merge(m, src) +} +func (m *Control) XXX_Size() int { + return m.Size() +} +func (m *Control) XXX_DiscardUnknown() { + xxx_messageInfo_Control.DiscardUnknown(m) +} + +var xxx_messageInfo_Control proto.InternalMessageInfo + +type Control_CreateIndexRequest struct { + PoolSize uint32 `protobuf:"varint,1,opt,name=pool_size,json=poolSize,proto3" json:"pool_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Control_CreateIndexRequest) Reset() { *m = Control_CreateIndexRequest{} } +func (m *Control_CreateIndexRequest) String() string { return proto.CompactTextString(m) } +func (*Control_CreateIndexRequest) ProtoMessage() {} +func (*Control_CreateIndexRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{8, 0} +} +func (m *Control_CreateIndexRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Control_CreateIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Control_CreateIndexRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Control_CreateIndexRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_Control_CreateIndexRequest.Merge(m, src) +} +func (m *Control_CreateIndexRequest) XXX_Size() int { + return m.Size() +} +func (m *Control_CreateIndexRequest) XXX_DiscardUnknown() { + xxx_messageInfo_Control_CreateIndexRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_Control_CreateIndexRequest proto.InternalMessageInfo + +func (m *Control_CreateIndexRequest) GetPoolSize() uint32 { + if m != nil { + return m.PoolSize + } + return 0 +} + +type Replication struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Replication) Reset() { *m = Replication{} } +func (m *Replication) String() string { return proto.CompactTextString(m) } +func (*Replication) ProtoMessage() {} +func (*Replication) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{9} +} +func (m *Replication) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Replication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Replication.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Replication) XXX_Merge(src proto.Message) { + xxx_messageInfo_Replication.Merge(m, src) +} +func (m *Replication) XXX_Size() int { + return m.Size() +} +func (m *Replication) XXX_DiscardUnknown() { + xxx_messageInfo_Replication.DiscardUnknown(m) +} + +var xxx_messageInfo_Replication proto.InternalMessageInfo + +type Replication_Recovery struct { + DeletedAgents []string `protobuf:"bytes,1,rep,name=deleted_agents,json=deletedAgents,proto3" json:"deleted_agents,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Replication_Recovery) Reset() { *m = Replication_Recovery{} } +func (m *Replication_Recovery) String() string { return proto.CompactTextString(m) } +func (*Replication_Recovery) ProtoMessage() {} +func (*Replication_Recovery) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{9, 0} +} +func (m *Replication_Recovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Replication_Recovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Replication_Recovery.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Replication_Recovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_Replication_Recovery.Merge(m, src) +} +func (m *Replication_Recovery) XXX_Size() int { + return m.Size() +} +func (m *Replication_Recovery) XXX_DiscardUnknown() { + xxx_messageInfo_Replication_Recovery.DiscardUnknown(m) +} + +var xxx_messageInfo_Replication_Recovery proto.InternalMessageInfo + +func (m *Replication_Recovery) GetDeletedAgents() []string { + if m != nil { + return m.DeletedAgents + } + return nil +} + +type Replication_Rebalance struct { + HighUsageAgents []string `protobuf:"bytes,1,rep,name=high_usage_agents,json=highUsageAgents,proto3" json:"high_usage_agents,omitempty"` + LowUsageAgents []string `protobuf:"bytes,2,rep,name=low_usage_agents,json=lowUsageAgents,proto3" json:"low_usage_agents,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Replication_Rebalance) Reset() { *m = Replication_Rebalance{} } +func (m *Replication_Rebalance) String() string { return proto.CompactTextString(m) } +func (*Replication_Rebalance) ProtoMessage() {} +func (*Replication_Rebalance) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{9, 1} +} +func (m *Replication_Rebalance) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Replication_Rebalance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Replication_Rebalance.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Replication_Rebalance) XXX_Merge(src proto.Message) { + xxx_messageInfo_Replication_Rebalance.Merge(m, src) +} +func (m *Replication_Rebalance) XXX_Size() int { + return m.Size() +} +func (m *Replication_Rebalance) XXX_DiscardUnknown() { + xxx_messageInfo_Replication_Rebalance.DiscardUnknown(m) +} + +var xxx_messageInfo_Replication_Rebalance proto.InternalMessageInfo + +func (m *Replication_Rebalance) GetHighUsageAgents() []string { + if m != nil { + return m.HighUsageAgents + } + return nil +} + +func (m *Replication_Rebalance) GetLowUsageAgents() []string { + if m != nil { + return m.LowUsageAgents + } + return nil +} + +type Replication_Agents struct { + Agents []string `protobuf:"bytes,1,rep,name=agents,proto3" json:"agents,omitempty"` + RemovedAgents []string `protobuf:"bytes,2,rep,name=removed_agents,json=removedAgents,proto3" json:"removed_agents,omitempty"` + ReplicatingAgent []string `protobuf:"bytes,3,rep,name=replicating_agent,json=replicatingAgent,proto3" json:"replicating_agent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Replication_Agents) Reset() { *m = Replication_Agents{} } +func (m *Replication_Agents) String() string { return proto.CompactTextString(m) } +func (*Replication_Agents) ProtoMessage() {} +func (*Replication_Agents) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{9, 2} +} +func (m *Replication_Agents) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Replication_Agents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Replication_Agents.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Replication_Agents) XXX_Merge(src proto.Message) { + xxx_messageInfo_Replication_Agents.Merge(m, src) +} +func (m *Replication_Agents) XXX_Size() int { + return m.Size() +} +func (m *Replication_Agents) XXX_DiscardUnknown() { + xxx_messageInfo_Replication_Agents.DiscardUnknown(m) +} + +var xxx_messageInfo_Replication_Agents proto.InternalMessageInfo + +func (m *Replication_Agents) GetAgents() []string { + if m != nil { + return m.Agents + } + return nil +} + +func (m *Replication_Agents) GetRemovedAgents() []string { + if m != nil { + return m.RemovedAgents + } + return nil +} + +func (m *Replication_Agents) GetReplicatingAgent() []string { + if m != nil { + return m.ReplicatingAgent + } + return nil +} + +type Discoverer struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Discoverer) Reset() { *m = Discoverer{} } +func (m *Discoverer) String() string { return proto.CompactTextString(m) } +func (*Discoverer) ProtoMessage() {} +func (*Discoverer) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{10} +} +func (m *Discoverer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Discoverer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Discoverer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Discoverer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Discoverer.Merge(m, src) +} +func (m *Discoverer) XXX_Size() int { + return m.Size() +} +func (m *Discoverer) XXX_DiscardUnknown() { + xxx_messageInfo_Discoverer.DiscardUnknown(m) +} + +var xxx_messageInfo_Discoverer proto.InternalMessageInfo + +type Discoverer_Request struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Node string `protobuf:"bytes,3,opt,name=node,proto3" json:"node,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Discoverer_Request) Reset() { *m = Discoverer_Request{} } +func (m *Discoverer_Request) String() string { return proto.CompactTextString(m) } +func (*Discoverer_Request) ProtoMessage() {} +func (*Discoverer_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{10, 0} +} +func (m *Discoverer_Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Discoverer_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Discoverer_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Discoverer_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Discoverer_Request.Merge(m, src) +} +func (m *Discoverer_Request) XXX_Size() int { + return m.Size() +} +func (m *Discoverer_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Discoverer_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Discoverer_Request proto.InternalMessageInfo + +func (m *Discoverer_Request) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Discoverer_Request) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *Discoverer_Request) GetNode() string { + if m != nil { + return m.Node + } + return "" +} + +type Backup struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup) Reset() { *m = Backup{} } +func (m *Backup) String() string { return proto.CompactTextString(m) } +func (*Backup) ProtoMessage() {} +func (*Backup) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11} +} +func (m *Backup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup.Merge(m, src) +} +func (m *Backup) XXX_Size() int { + return m.Size() +} +func (m *Backup) XXX_DiscardUnknown() { + xxx_messageInfo_Backup.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup proto.InternalMessageInfo + +type Backup_GetVector struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_GetVector) Reset() { *m = Backup_GetVector{} } +func (m *Backup_GetVector) String() string { return proto.CompactTextString(m) } +func (*Backup_GetVector) ProtoMessage() {} +func (*Backup_GetVector) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 0} +} +func (m *Backup_GetVector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_GetVector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_GetVector.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_GetVector) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_GetVector.Merge(m, src) +} +func (m *Backup_GetVector) XXX_Size() int { + return m.Size() +} +func (m *Backup_GetVector) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_GetVector.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_GetVector proto.InternalMessageInfo + +type Backup_GetVector_Request struct { + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_GetVector_Request) Reset() { *m = Backup_GetVector_Request{} } +func (m *Backup_GetVector_Request) String() string { return proto.CompactTextString(m) } +func (*Backup_GetVector_Request) ProtoMessage() {} +func (*Backup_GetVector_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 0, 0} +} +func (m *Backup_GetVector_Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_GetVector_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_GetVector_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_GetVector_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_GetVector_Request.Merge(m, src) +} +func (m *Backup_GetVector_Request) XXX_Size() int { + return m.Size() +} +func (m *Backup_GetVector_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_GetVector_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_GetVector_Request proto.InternalMessageInfo + +func (m *Backup_GetVector_Request) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + +type Backup_GetVector_Owner struct { + Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_GetVector_Owner) Reset() { *m = Backup_GetVector_Owner{} } +func (m *Backup_GetVector_Owner) String() string { return proto.CompactTextString(m) } +func (*Backup_GetVector_Owner) ProtoMessage() {} +func (*Backup_GetVector_Owner) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 0, 1} +} +func (m *Backup_GetVector_Owner) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_GetVector_Owner) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_GetVector_Owner.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_GetVector_Owner) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_GetVector_Owner.Merge(m, src) +} +func (m *Backup_GetVector_Owner) XXX_Size() int { + return m.Size() +} +func (m *Backup_GetVector_Owner) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_GetVector_Owner.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_GetVector_Owner proto.InternalMessageInfo + +func (m *Backup_GetVector_Owner) GetIp() string { + if m != nil { + return m.Ip + } + return "" +} + +type Backup_Locations struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_Locations) Reset() { *m = Backup_Locations{} } +func (m *Backup_Locations) String() string { return proto.CompactTextString(m) } +func (*Backup_Locations) ProtoMessage() {} +func (*Backup_Locations) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 1} +} +func (m *Backup_Locations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_Locations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_Locations.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_Locations) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_Locations.Merge(m, src) +} +func (m *Backup_Locations) XXX_Size() int { + return m.Size() +} +func (m *Backup_Locations) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_Locations.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_Locations proto.InternalMessageInfo + +type Backup_Locations_Request struct { + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_Locations_Request) Reset() { *m = Backup_Locations_Request{} } +func (m *Backup_Locations_Request) String() string { return proto.CompactTextString(m) } +func (*Backup_Locations_Request) ProtoMessage() {} +func (*Backup_Locations_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 1, 0} +} +func (m *Backup_Locations_Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_Locations_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_Locations_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_Locations_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_Locations_Request.Merge(m, src) +} +func (m *Backup_Locations_Request) XXX_Size() int { + return m.Size() +} +func (m *Backup_Locations_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_Locations_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_Locations_Request proto.InternalMessageInfo + +func (m *Backup_Locations_Request) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + +type Backup_Remove struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_Remove) Reset() { *m = Backup_Remove{} } +func (m *Backup_Remove) String() string { return proto.CompactTextString(m) } +func (*Backup_Remove) ProtoMessage() {} +func (*Backup_Remove) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 2} +} +func (m *Backup_Remove) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_Remove) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_Remove.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_Remove) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_Remove.Merge(m, src) +} +func (m *Backup_Remove) XXX_Size() int { + return m.Size() +} +func (m *Backup_Remove) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_Remove.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_Remove proto.InternalMessageInfo + +type Backup_Remove_Request struct { + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_Remove_Request) Reset() { *m = Backup_Remove_Request{} } +func (m *Backup_Remove_Request) String() string { return proto.CompactTextString(m) } +func (*Backup_Remove_Request) ProtoMessage() {} +func (*Backup_Remove_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 2, 0} +} +func (m *Backup_Remove_Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_Remove_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_Remove_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_Remove_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_Remove_Request.Merge(m, src) +} +func (m *Backup_Remove_Request) XXX_Size() int { + return m.Size() +} +func (m *Backup_Remove_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_Remove_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_Remove_Request proto.InternalMessageInfo + +func (m *Backup_Remove_Request) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + +type Backup_Remove_RequestMulti struct { + Uuids []string `protobuf:"bytes,1,rep,name=uuids,proto3" json:"uuids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_Remove_RequestMulti) Reset() { *m = Backup_Remove_RequestMulti{} } +func (m *Backup_Remove_RequestMulti) String() string { return proto.CompactTextString(m) } +func (*Backup_Remove_RequestMulti) ProtoMessage() {} +func (*Backup_Remove_RequestMulti) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 2, 1} +} +func (m *Backup_Remove_RequestMulti) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_Remove_RequestMulti) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_Remove_RequestMulti.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_Remove_RequestMulti) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_Remove_RequestMulti.Merge(m, src) +} +func (m *Backup_Remove_RequestMulti) XXX_Size() int { + return m.Size() +} +func (m *Backup_Remove_RequestMulti) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_Remove_RequestMulti.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_Remove_RequestMulti proto.InternalMessageInfo + +func (m *Backup_Remove_RequestMulti) GetUuids() []string { + if m != nil { + return m.Uuids + } + return nil +} + +type Backup_IP struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_IP) Reset() { *m = Backup_IP{} } +func (m *Backup_IP) String() string { return proto.CompactTextString(m) } +func (*Backup_IP) ProtoMessage() {} +func (*Backup_IP) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 3} +} +func (m *Backup_IP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_IP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_IP.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_IP) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_IP.Merge(m, src) +} +func (m *Backup_IP) XXX_Size() int { + return m.Size() +} +func (m *Backup_IP) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_IP.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_IP proto.InternalMessageInfo + +type Backup_IP_Register struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_IP_Register) Reset() { *m = Backup_IP_Register{} } +func (m *Backup_IP_Register) String() string { return proto.CompactTextString(m) } +func (*Backup_IP_Register) ProtoMessage() {} +func (*Backup_IP_Register) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 3, 0} +} +func (m *Backup_IP_Register) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_IP_Register) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_IP_Register.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_IP_Register) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_IP_Register.Merge(m, src) +} +func (m *Backup_IP_Register) XXX_Size() int { + return m.Size() +} +func (m *Backup_IP_Register) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_IP_Register.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_IP_Register proto.InternalMessageInfo + +type Backup_IP_Register_Request struct { + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Ips []string `protobuf:"bytes,2,rep,name=ips,proto3" json:"ips,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_IP_Register_Request) Reset() { *m = Backup_IP_Register_Request{} } +func (m *Backup_IP_Register_Request) String() string { return proto.CompactTextString(m) } +func (*Backup_IP_Register_Request) ProtoMessage() {} +func (*Backup_IP_Register_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 3, 0, 0} +} +func (m *Backup_IP_Register_Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_IP_Register_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_IP_Register_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_IP_Register_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_IP_Register_Request.Merge(m, src) +} +func (m *Backup_IP_Register_Request) XXX_Size() int { + return m.Size() +} +func (m *Backup_IP_Register_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_IP_Register_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_IP_Register_Request proto.InternalMessageInfo + +func (m *Backup_IP_Register_Request) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + +func (m *Backup_IP_Register_Request) GetIps() []string { + if m != nil { + return m.Ips + } + return nil +} + +type Backup_IP_Remove struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_IP_Remove) Reset() { *m = Backup_IP_Remove{} } +func (m *Backup_IP_Remove) String() string { return proto.CompactTextString(m) } +func (*Backup_IP_Remove) ProtoMessage() {} +func (*Backup_IP_Remove) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 3, 1} +} +func (m *Backup_IP_Remove) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_IP_Remove) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_IP_Remove.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_IP_Remove) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_IP_Remove.Merge(m, src) +} +func (m *Backup_IP_Remove) XXX_Size() int { + return m.Size() +} +func (m *Backup_IP_Remove) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_IP_Remove.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_IP_Remove proto.InternalMessageInfo + +type Backup_IP_Remove_Request struct { + Ips []string `protobuf:"bytes,1,rep,name=ips,proto3" json:"ips,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_IP_Remove_Request) Reset() { *m = Backup_IP_Remove_Request{} } +func (m *Backup_IP_Remove_Request) String() string { return proto.CompactTextString(m) } +func (*Backup_IP_Remove_Request) ProtoMessage() {} +func (*Backup_IP_Remove_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 3, 1, 0} +} +func (m *Backup_IP_Remove_Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_IP_Remove_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_IP_Remove_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_IP_Remove_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_IP_Remove_Request.Merge(m, src) +} +func (m *Backup_IP_Remove_Request) XXX_Size() int { + return m.Size() +} +func (m *Backup_IP_Remove_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_IP_Remove_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_IP_Remove_Request proto.InternalMessageInfo + +func (m *Backup_IP_Remove_Request) GetIps() []string { + if m != nil { + return m.Ips + } + return nil +} + +type Backup_Vector struct { + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Vector []float32 `protobuf:"fixed32,3,rep,packed,name=vector,proto3" json:"vector,omitempty"` + Ips []string `protobuf:"bytes,4,rep,name=ips,proto3" json:"ips,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_Vector) Reset() { *m = Backup_Vector{} } +func (m *Backup_Vector) String() string { return proto.CompactTextString(m) } +func (*Backup_Vector) ProtoMessage() {} +func (*Backup_Vector) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 4} +} +func (m *Backup_Vector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_Vector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_Vector.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_Vector) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_Vector.Merge(m, src) +} +func (m *Backup_Vector) XXX_Size() int { + return m.Size() +} +func (m *Backup_Vector) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_Vector.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_Vector proto.InternalMessageInfo + +func (m *Backup_Vector) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + +func (m *Backup_Vector) GetVector() []float32 { + if m != nil { + return m.Vector + } + return nil +} + +func (m *Backup_Vector) GetIps() []string { + if m != nil { + return m.Ips + } + return nil +} + +type Backup_Vectors struct { + Vectors []*Backup_Vector `protobuf:"bytes,1,rep,name=vectors,proto3" json:"vectors,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_Vectors) Reset() { *m = Backup_Vectors{} } +func (m *Backup_Vectors) String() string { return proto.CompactTextString(m) } +func (*Backup_Vectors) ProtoMessage() {} +func (*Backup_Vectors) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 5} +} +func (m *Backup_Vectors) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_Vectors) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_Vectors.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_Vectors) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_Vectors.Merge(m, src) +} +func (m *Backup_Vectors) XXX_Size() int { + return m.Size() +} +func (m *Backup_Vectors) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_Vectors.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_Vectors proto.InternalMessageInfo + +func (m *Backup_Vectors) GetVectors() []*Backup_Vector { + if m != nil { + return m.Vectors + } + return nil +} + +type Backup_Compressed struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_Compressed) Reset() { *m = Backup_Compressed{} } +func (m *Backup_Compressed) String() string { return proto.CompactTextString(m) } +func (*Backup_Compressed) ProtoMessage() {} +func (*Backup_Compressed) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 6} +} +func (m *Backup_Compressed) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_Compressed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_Compressed.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_Compressed) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_Compressed.Merge(m, src) +} +func (m *Backup_Compressed) XXX_Size() int { + return m.Size() +} +func (m *Backup_Compressed) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_Compressed.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_Compressed proto.InternalMessageInfo + +type Backup_Compressed_Vector struct { + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Vector []byte `protobuf:"bytes,3,opt,name=vector,proto3" json:"vector,omitempty"` + Ips []string `protobuf:"bytes,4,rep,name=ips,proto3" json:"ips,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_Compressed_Vector) Reset() { *m = Backup_Compressed_Vector{} } +func (m *Backup_Compressed_Vector) String() string { return proto.CompactTextString(m) } +func (*Backup_Compressed_Vector) ProtoMessage() {} +func (*Backup_Compressed_Vector) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 6, 0} +} +func (m *Backup_Compressed_Vector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_Compressed_Vector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_Compressed_Vector.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_Compressed_Vector) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_Compressed_Vector.Merge(m, src) +} +func (m *Backup_Compressed_Vector) XXX_Size() int { + return m.Size() +} +func (m *Backup_Compressed_Vector) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_Compressed_Vector.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_Compressed_Vector proto.InternalMessageInfo + +func (m *Backup_Compressed_Vector) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + +func (m *Backup_Compressed_Vector) GetVector() []byte { + if m != nil { + return m.Vector + } + return nil +} + +func (m *Backup_Compressed_Vector) GetIps() []string { + if m != nil { + return m.Ips + } + return nil +} + +type Backup_Compressed_Vectors struct { + Vectors []*Backup_Compressed_Vector `protobuf:"bytes,1,rep,name=vectors,proto3" json:"vectors,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backup_Compressed_Vectors) Reset() { *m = Backup_Compressed_Vectors{} } +func (m *Backup_Compressed_Vectors) String() string { return proto.CompactTextString(m) } +func (*Backup_Compressed_Vectors) ProtoMessage() {} +func (*Backup_Compressed_Vectors) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{11, 6, 1} +} +func (m *Backup_Compressed_Vectors) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Backup_Compressed_Vectors) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Backup_Compressed_Vectors.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Backup_Compressed_Vectors) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backup_Compressed_Vectors.Merge(m, src) +} +func (m *Backup_Compressed_Vectors) XXX_Size() int { + return m.Size() +} +func (m *Backup_Compressed_Vectors) XXX_DiscardUnknown() { + xxx_messageInfo_Backup_Compressed_Vectors.DiscardUnknown(m) +} + +var xxx_messageInfo_Backup_Compressed_Vectors proto.InternalMessageInfo + +func (m *Backup_Compressed_Vectors) GetVectors() []*Backup_Compressed_Vector { + if m != nil { + return m.Vectors + } + return nil +} + +type Info struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{12} +} +func (m *Info) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info.Merge(m, src) +} +func (m *Info) XXX_Size() int { + return m.Size() +} +func (m *Info) XXX_DiscardUnknown() { + xxx_messageInfo_Info.DiscardUnknown(m) +} + +var xxx_messageInfo_Info proto.InternalMessageInfo + +type Info_Index struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info_Index) Reset() { *m = Info_Index{} } +func (m *Info_Index) String() string { return proto.CompactTextString(m) } +func (*Info_Index) ProtoMessage() {} +func (*Info_Index) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{12, 0} +} +func (m *Info_Index) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info_Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info_Index.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info_Index) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info_Index.Merge(m, src) +} +func (m *Info_Index) XXX_Size() int { + return m.Size() +} +func (m *Info_Index) XXX_DiscardUnknown() { + xxx_messageInfo_Info_Index.DiscardUnknown(m) +} + +var xxx_messageInfo_Info_Index proto.InternalMessageInfo + +type Info_Index_Count struct { + Stored uint32 `protobuf:"varint,1,opt,name=stored,proto3" json:"stored,omitempty"` + Uncommitted uint32 `protobuf:"varint,2,opt,name=uncommitted,proto3" json:"uncommitted,omitempty"` + Indexing bool `protobuf:"varint,3,opt,name=indexing,proto3" json:"indexing,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info_Index_Count) Reset() { *m = Info_Index_Count{} } +func (m *Info_Index_Count) String() string { return proto.CompactTextString(m) } +func (*Info_Index_Count) ProtoMessage() {} +func (*Info_Index_Count) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{12, 0, 0} +} +func (m *Info_Index_Count) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info_Index_Count) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info_Index_Count.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info_Index_Count) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info_Index_Count.Merge(m, src) +} +func (m *Info_Index_Count) XXX_Size() int { + return m.Size() +} +func (m *Info_Index_Count) XXX_DiscardUnknown() { + xxx_messageInfo_Info_Index_Count.DiscardUnknown(m) +} + +var xxx_messageInfo_Info_Index_Count proto.InternalMessageInfo + +func (m *Info_Index_Count) GetStored() uint32 { + if m != nil { + return m.Stored + } + return 0 +} + +func (m *Info_Index_Count) GetUncommitted() uint32 { + if m != nil { + return m.Uncommitted + } + return 0 +} + +func (m *Info_Index_Count) GetIndexing() bool { + if m != nil { + return m.Indexing + } + return false +} + +type Info_Index_UUID struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info_Index_UUID) Reset() { *m = Info_Index_UUID{} } +func (m *Info_Index_UUID) String() string { return proto.CompactTextString(m) } +func (*Info_Index_UUID) ProtoMessage() {} +func (*Info_Index_UUID) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{12, 0, 1} +} +func (m *Info_Index_UUID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info_Index_UUID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info_Index_UUID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info_Index_UUID) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info_Index_UUID.Merge(m, src) +} +func (m *Info_Index_UUID) XXX_Size() int { + return m.Size() +} +func (m *Info_Index_UUID) XXX_DiscardUnknown() { + xxx_messageInfo_Info_Index_UUID.DiscardUnknown(m) +} + +var xxx_messageInfo_Info_Index_UUID proto.InternalMessageInfo + +type Info_Index_UUID_Committed struct { + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info_Index_UUID_Committed) Reset() { *m = Info_Index_UUID_Committed{} } +func (m *Info_Index_UUID_Committed) String() string { return proto.CompactTextString(m) } +func (*Info_Index_UUID_Committed) ProtoMessage() {} +func (*Info_Index_UUID_Committed) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{12, 0, 1, 0} +} +func (m *Info_Index_UUID_Committed) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info_Index_UUID_Committed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info_Index_UUID_Committed.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info_Index_UUID_Committed) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info_Index_UUID_Committed.Merge(m, src) +} +func (m *Info_Index_UUID_Committed) XXX_Size() int { + return m.Size() +} +func (m *Info_Index_UUID_Committed) XXX_DiscardUnknown() { + xxx_messageInfo_Info_Index_UUID_Committed.DiscardUnknown(m) +} + +var xxx_messageInfo_Info_Index_UUID_Committed proto.InternalMessageInfo + +func (m *Info_Index_UUID_Committed) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + +type Info_Index_UUID_Uncommitted struct { + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info_Index_UUID_Uncommitted) Reset() { *m = Info_Index_UUID_Uncommitted{} } +func (m *Info_Index_UUID_Uncommitted) String() string { return proto.CompactTextString(m) } +func (*Info_Index_UUID_Uncommitted) ProtoMessage() {} +func (*Info_Index_UUID_Uncommitted) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{12, 0, 1, 1} +} +func (m *Info_Index_UUID_Uncommitted) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info_Index_UUID_Uncommitted) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info_Index_UUID_Uncommitted.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info_Index_UUID_Uncommitted) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info_Index_UUID_Uncommitted.Merge(m, src) +} +func (m *Info_Index_UUID_Uncommitted) XXX_Size() int { + return m.Size() +} +func (m *Info_Index_UUID_Uncommitted) XXX_DiscardUnknown() { + xxx_messageInfo_Info_Index_UUID_Uncommitted.DiscardUnknown(m) +} + +var xxx_messageInfo_Info_Index_UUID_Uncommitted proto.InternalMessageInfo + +func (m *Info_Index_UUID_Uncommitted) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + +type Info_Pod struct { + AppName string `protobuf:"bytes,1,opt,name=app_name,json=appName,proto3" json:"app_name,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + Ip string `protobuf:"bytes,4,opt,name=ip,proto3" json:"ip,omitempty"` + Cpu *Info_CPU `protobuf:"bytes,5,opt,name=cpu,proto3" json:"cpu,omitempty"` + Memory *Info_Memory `protobuf:"bytes,6,opt,name=memory,proto3" json:"memory,omitempty"` + Node *Info_Node `protobuf:"bytes,7,opt,name=node,proto3" json:"node,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info_Pod) Reset() { *m = Info_Pod{} } +func (m *Info_Pod) String() string { return proto.CompactTextString(m) } +func (*Info_Pod) ProtoMessage() {} +func (*Info_Pod) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{12, 1} +} +func (m *Info_Pod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info_Pod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info_Pod.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info_Pod) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info_Pod.Merge(m, src) +} +func (m *Info_Pod) XXX_Size() int { + return m.Size() +} +func (m *Info_Pod) XXX_DiscardUnknown() { + xxx_messageInfo_Info_Pod.DiscardUnknown(m) +} + +var xxx_messageInfo_Info_Pod proto.InternalMessageInfo + +func (m *Info_Pod) GetAppName() string { + if m != nil { + return m.AppName + } + return "" +} + +func (m *Info_Pod) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Info_Pod) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *Info_Pod) GetIp() string { + if m != nil { + return m.Ip + } + return "" +} + +func (m *Info_Pod) GetCpu() *Info_CPU { + if m != nil { + return m.Cpu + } + return nil +} + +func (m *Info_Pod) GetMemory() *Info_Memory { + if m != nil { + return m.Memory + } + return nil +} + +func (m *Info_Pod) GetNode() *Info_Node { + if m != nil { + return m.Node + } + return nil +} + +type Info_Node struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + InternalAddr string `protobuf:"bytes,2,opt,name=internal_addr,json=internalAddr,proto3" json:"internal_addr,omitempty"` + ExternalAddr string `protobuf:"bytes,3,opt,name=external_addr,json=externalAddr,proto3" json:"external_addr,omitempty"` + Cpu *Info_CPU `protobuf:"bytes,4,opt,name=cpu,proto3" json:"cpu,omitempty"` + Memory *Info_Memory `protobuf:"bytes,5,opt,name=memory,proto3" json:"memory,omitempty"` + Pods *Info_Pods `protobuf:"bytes,6,opt,name=Pods,proto3" json:"Pods,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info_Node) Reset() { *m = Info_Node{} } +func (m *Info_Node) String() string { return proto.CompactTextString(m) } +func (*Info_Node) ProtoMessage() {} +func (*Info_Node) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{12, 2} +} +func (m *Info_Node) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info_Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info_Node.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info_Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info_Node.Merge(m, src) +} +func (m *Info_Node) XXX_Size() int { + return m.Size() +} +func (m *Info_Node) XXX_DiscardUnknown() { + xxx_messageInfo_Info_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_Info_Node proto.InternalMessageInfo + +func (m *Info_Node) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Info_Node) GetInternalAddr() string { + if m != nil { + return m.InternalAddr + } + return "" +} + +func (m *Info_Node) GetExternalAddr() string { + if m != nil { + return m.ExternalAddr + } + return "" +} + +func (m *Info_Node) GetCpu() *Info_CPU { + if m != nil { + return m.Cpu + } + return nil +} + +func (m *Info_Node) GetMemory() *Info_Memory { + if m != nil { + return m.Memory + } + return nil +} + +func (m *Info_Node) GetPods() *Info_Pods { + if m != nil { + return m.Pods + } + return nil +} + +type Info_CPU struct { + Limit float64 `protobuf:"fixed64,1,opt,name=limit,proto3" json:"limit,omitempty"` + Request float64 `protobuf:"fixed64,2,opt,name=request,proto3" json:"request,omitempty"` + Usage float64 `protobuf:"fixed64,3,opt,name=usage,proto3" json:"usage,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info_CPU) Reset() { *m = Info_CPU{} } +func (m *Info_CPU) String() string { return proto.CompactTextString(m) } +func (*Info_CPU) ProtoMessage() {} +func (*Info_CPU) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{12, 3} +} +func (m *Info_CPU) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info_CPU) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info_CPU.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info_CPU) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info_CPU.Merge(m, src) +} +func (m *Info_CPU) XXX_Size() int { + return m.Size() +} +func (m *Info_CPU) XXX_DiscardUnknown() { + xxx_messageInfo_Info_CPU.DiscardUnknown(m) +} + +var xxx_messageInfo_Info_CPU proto.InternalMessageInfo + +func (m *Info_CPU) GetLimit() float64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *Info_CPU) GetRequest() float64 { + if m != nil { + return m.Request + } + return 0 +} + +func (m *Info_CPU) GetUsage() float64 { + if m != nil { + return m.Usage + } + return 0 +} + +type Info_Memory struct { + Limit float64 `protobuf:"fixed64,1,opt,name=limit,proto3" json:"limit,omitempty"` + Request float64 `protobuf:"fixed64,2,opt,name=request,proto3" json:"request,omitempty"` + Usage float64 `protobuf:"fixed64,3,opt,name=usage,proto3" json:"usage,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info_Memory) Reset() { *m = Info_Memory{} } +func (m *Info_Memory) String() string { return proto.CompactTextString(m) } +func (*Info_Memory) ProtoMessage() {} +func (*Info_Memory) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{12, 4} +} +func (m *Info_Memory) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info_Memory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info_Memory.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info_Memory) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info_Memory.Merge(m, src) +} +func (m *Info_Memory) XXX_Size() int { + return m.Size() +} +func (m *Info_Memory) XXX_DiscardUnknown() { + xxx_messageInfo_Info_Memory.DiscardUnknown(m) +} + +var xxx_messageInfo_Info_Memory proto.InternalMessageInfo + +func (m *Info_Memory) GetLimit() float64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *Info_Memory) GetRequest() float64 { + if m != nil { + return m.Request + } + return 0 +} + +func (m *Info_Memory) GetUsage() float64 { + if m != nil { + return m.Usage + } + return 0 +} + +type Info_Pods struct { + Pods []*Info_Pod `protobuf:"bytes,1,rep,name=pods,proto3" json:"pods,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info_Pods) Reset() { *m = Info_Pods{} } +func (m *Info_Pods) String() string { return proto.CompactTextString(m) } +func (*Info_Pods) ProtoMessage() {} +func (*Info_Pods) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{12, 5} +} +func (m *Info_Pods) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info_Pods) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info_Pods.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info_Pods) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info_Pods.Merge(m, src) +} +func (m *Info_Pods) XXX_Size() int { + return m.Size() +} +func (m *Info_Pods) XXX_DiscardUnknown() { + xxx_messageInfo_Info_Pods.DiscardUnknown(m) +} + +var xxx_messageInfo_Info_Pods proto.InternalMessageInfo + +func (m *Info_Pods) GetPods() []*Info_Pod { + if m != nil { + return m.Pods + } + return nil +} + +type Info_Nodes struct { + Nodes []*Info_Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info_Nodes) Reset() { *m = Info_Nodes{} } +func (m *Info_Nodes) String() string { return proto.CompactTextString(m) } +func (*Info_Nodes) ProtoMessage() {} +func (*Info_Nodes) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{12, 6} +} +func (m *Info_Nodes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info_Nodes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info_Nodes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info_Nodes) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info_Nodes.Merge(m, src) +} +func (m *Info_Nodes) XXX_Size() int { + return m.Size() +} +func (m *Info_Nodes) XXX_DiscardUnknown() { + xxx_messageInfo_Info_Nodes.DiscardUnknown(m) +} + +var xxx_messageInfo_Info_Nodes proto.InternalMessageInfo + +func (m *Info_Nodes) GetNodes() []*Info_Node { + if m != nil { + return m.Nodes + } + return nil +} + +type Info_IPs struct { + Ip []string `protobuf:"bytes,1,rep,name=ip,proto3" json:"ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info_IPs) Reset() { *m = Info_IPs{} } +func (m *Info_IPs) String() string { return proto.CompactTextString(m) } +func (*Info_IPs) ProtoMessage() {} +func (*Info_IPs) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{12, 7} +} +func (m *Info_IPs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info_IPs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info_IPs.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info_IPs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info_IPs.Merge(m, src) +} +func (m *Info_IPs) XXX_Size() int { + return m.Size() +} +func (m *Info_IPs) XXX_DiscardUnknown() { + xxx_messageInfo_Info_IPs.DiscardUnknown(m) +} + +var xxx_messageInfo_Info_IPs proto.InternalMessageInfo + +func (m *Info_IPs) GetIp() []string { + if m != nil { + return m.Ip + } + return nil +} + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_f0518b37b4e7594b, []int{13} +} +func (m *Empty) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return m.Size() +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Search)(nil), "payload.v1.Search") + proto.RegisterType((*Search_Request)(nil), "payload.v1.Search.Request") + proto.RegisterType((*Search_MultiRequest)(nil), "payload.v1.Search.MultiRequest") + proto.RegisterType((*Search_IDRequest)(nil), "payload.v1.Search.IDRequest") + proto.RegisterType((*Search_MultiIDRequest)(nil), "payload.v1.Search.MultiIDRequest") + proto.RegisterType((*Search_ObjectRequest)(nil), "payload.v1.Search.ObjectRequest") + proto.RegisterType((*Search_Config)(nil), "payload.v1.Search.Config") + proto.RegisterType((*Search_Response)(nil), "payload.v1.Search.Response") + proto.RegisterType((*Search_Responses)(nil), "payload.v1.Search.Responses") + proto.RegisterType((*Filter)(nil), "payload.v1.Filter") + proto.RegisterType((*Filter_Target)(nil), "payload.v1.Filter.Target") + proto.RegisterType((*Filter_Config)(nil), "payload.v1.Filter.Config") + proto.RegisterType((*Insert)(nil), "payload.v1.Insert") + proto.RegisterType((*Insert_Request)(nil), "payload.v1.Insert.Request") + proto.RegisterType((*Insert_MultiRequest)(nil), "payload.v1.Insert.MultiRequest") + proto.RegisterType((*Insert_Config)(nil), "payload.v1.Insert.Config") + proto.RegisterType((*Update)(nil), "payload.v1.Update") + proto.RegisterType((*Update_Request)(nil), "payload.v1.Update.Request") + proto.RegisterType((*Update_MultiRequest)(nil), "payload.v1.Update.MultiRequest") + proto.RegisterType((*Update_Config)(nil), "payload.v1.Update.Config") + proto.RegisterType((*Upsert)(nil), "payload.v1.Upsert") + proto.RegisterType((*Upsert_Request)(nil), "payload.v1.Upsert.Request") + proto.RegisterType((*Upsert_MultiRequest)(nil), "payload.v1.Upsert.MultiRequest") + proto.RegisterType((*Upsert_Config)(nil), "payload.v1.Upsert.Config") + proto.RegisterType((*Remove)(nil), "payload.v1.Remove") + proto.RegisterType((*Remove_Request)(nil), "payload.v1.Remove.Request") + proto.RegisterType((*Remove_MultiRequest)(nil), "payload.v1.Remove.MultiRequest") + proto.RegisterType((*Remove_Config)(nil), "payload.v1.Remove.Config") + proto.RegisterType((*Meta)(nil), "payload.v1.Meta") + proto.RegisterType((*Meta_Key)(nil), "payload.v1.Meta.Key") + proto.RegisterType((*Meta_Keys)(nil), "payload.v1.Meta.Keys") + proto.RegisterType((*Meta_Val)(nil), "payload.v1.Meta.Val") + proto.RegisterType((*Meta_Vals)(nil), "payload.v1.Meta.Vals") + proto.RegisterType((*Meta_KeyVal)(nil), "payload.v1.Meta.KeyVal") + proto.RegisterType((*Meta_KeyVals)(nil), "payload.v1.Meta.KeyVals") + proto.RegisterType((*Object)(nil), "payload.v1.Object") + proto.RegisterType((*Object_Distance)(nil), "payload.v1.Object.Distance") + proto.RegisterType((*Object_ID)(nil), "payload.v1.Object.ID") + proto.RegisterType((*Object_IDs)(nil), "payload.v1.Object.IDs") + proto.RegisterType((*Object_Vector)(nil), "payload.v1.Object.Vector") + proto.RegisterType((*Object_Vectors)(nil), "payload.v1.Object.Vectors") + proto.RegisterType((*Object_Blob)(nil), "payload.v1.Object.Blob") + proto.RegisterType((*Object_Location)(nil), "payload.v1.Object.Location") + proto.RegisterType((*Object_Locations)(nil), "payload.v1.Object.Locations") + proto.RegisterType((*Control)(nil), "payload.v1.Control") + proto.RegisterType((*Control_CreateIndexRequest)(nil), "payload.v1.Control.CreateIndexRequest") + proto.RegisterType((*Replication)(nil), "payload.v1.Replication") + proto.RegisterType((*Replication_Recovery)(nil), "payload.v1.Replication.Recovery") + proto.RegisterType((*Replication_Rebalance)(nil), "payload.v1.Replication.Rebalance") + proto.RegisterType((*Replication_Agents)(nil), "payload.v1.Replication.Agents") + proto.RegisterType((*Discoverer)(nil), "payload.v1.Discoverer") + proto.RegisterType((*Discoverer_Request)(nil), "payload.v1.Discoverer.Request") + proto.RegisterType((*Backup)(nil), "payload.v1.Backup") + proto.RegisterType((*Backup_GetVector)(nil), "payload.v1.Backup.GetVector") + proto.RegisterType((*Backup_GetVector_Request)(nil), "payload.v1.Backup.GetVector.Request") + proto.RegisterType((*Backup_GetVector_Owner)(nil), "payload.v1.Backup.GetVector.Owner") + proto.RegisterType((*Backup_Locations)(nil), "payload.v1.Backup.Locations") + proto.RegisterType((*Backup_Locations_Request)(nil), "payload.v1.Backup.Locations.Request") + proto.RegisterType((*Backup_Remove)(nil), "payload.v1.Backup.Remove") + proto.RegisterType((*Backup_Remove_Request)(nil), "payload.v1.Backup.Remove.Request") + proto.RegisterType((*Backup_Remove_RequestMulti)(nil), "payload.v1.Backup.Remove.RequestMulti") + proto.RegisterType((*Backup_IP)(nil), "payload.v1.Backup.IP") + proto.RegisterType((*Backup_IP_Register)(nil), "payload.v1.Backup.IP.Register") + proto.RegisterType((*Backup_IP_Register_Request)(nil), "payload.v1.Backup.IP.Register.Request") + proto.RegisterType((*Backup_IP_Remove)(nil), "payload.v1.Backup.IP.Remove") + proto.RegisterType((*Backup_IP_Remove_Request)(nil), "payload.v1.Backup.IP.Remove.Request") + proto.RegisterType((*Backup_Vector)(nil), "payload.v1.Backup.Vector") + proto.RegisterType((*Backup_Vectors)(nil), "payload.v1.Backup.Vectors") + proto.RegisterType((*Backup_Compressed)(nil), "payload.v1.Backup.Compressed") + proto.RegisterType((*Backup_Compressed_Vector)(nil), "payload.v1.Backup.Compressed.Vector") + proto.RegisterType((*Backup_Compressed_Vectors)(nil), "payload.v1.Backup.Compressed.Vectors") + proto.RegisterType((*Info)(nil), "payload.v1.Info") + proto.RegisterType((*Info_Index)(nil), "payload.v1.Info.Index") + proto.RegisterType((*Info_Index_Count)(nil), "payload.v1.Info.Index.Count") + proto.RegisterType((*Info_Index_UUID)(nil), "payload.v1.Info.Index.UUID") + proto.RegisterType((*Info_Index_UUID_Committed)(nil), "payload.v1.Info.Index.UUID.Committed") + proto.RegisterType((*Info_Index_UUID_Uncommitted)(nil), "payload.v1.Info.Index.UUID.Uncommitted") + proto.RegisterType((*Info_Pod)(nil), "payload.v1.Info.Pod") + proto.RegisterType((*Info_Node)(nil), "payload.v1.Info.Node") + proto.RegisterType((*Info_CPU)(nil), "payload.v1.Info.CPU") + proto.RegisterType((*Info_Memory)(nil), "payload.v1.Info.Memory") + proto.RegisterType((*Info_Pods)(nil), "payload.v1.Info.Pods") + proto.RegisterType((*Info_Nodes)(nil), "payload.v1.Info.Nodes") + proto.RegisterType((*Info_IPs)(nil), "payload.v1.Info.IPs") + proto.RegisterType((*Empty)(nil), "payload.v1.Empty") +} + +func init() { + proto.RegisterFile("apis/proto/v1/payload/payload.proto", fileDescriptor_f0518b37b4e7594b) +} + +var fileDescriptor_f0518b37b4e7594b = []byte{ + // 1705 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x93, 0x1b, 0x47, + 0x15, 0x67, 0x46, 0xd2, 0x48, 0x7a, 0xda, 0x35, 0xce, 0x94, 0xe3, 0x55, 0xda, 0xb1, 0x11, 0x4a, + 0x9c, 0xda, 0x18, 0x22, 0xe1, 0x35, 0x0e, 0x10, 0xaa, 0x4c, 0x59, 0x92, 0x4d, 0x29, 0x8b, 0x37, + 0xaa, 0x31, 0xda, 0xa2, 0x52, 0x45, 0x94, 0xde, 0x99, 0x5e, 0x6d, 0xb3, 0xa3, 0xe9, 0x61, 0x7a, + 0x24, 0xaf, 0x72, 0xe7, 0x0b, 0x70, 0x82, 0x13, 0xdf, 0x21, 0xdf, 0x80, 0x82, 0x43, 0x8e, 0x1c, + 0xf8, 0x00, 0x94, 0x4f, 0xdc, 0xb8, 0x71, 0xc8, 0x89, 0xea, 0x7f, 0x33, 0xa3, 0x95, 0x96, 0x2c, + 0x59, 0x2a, 0xe5, 0x93, 0xe6, 0xbd, 0xfe, 0xbd, 0xbf, 0xfd, 0xde, 0xeb, 0x6e, 0xc1, 0x5b, 0x38, + 0xa6, 0xbc, 0x1b, 0x27, 0x2c, 0x65, 0xdd, 0xc5, 0xfd, 0x6e, 0x8c, 0x97, 0x21, 0xc3, 0x81, 0xf9, + 0xed, 0xc8, 0x05, 0x17, 0x0c, 0xb9, 0xb8, 0x8f, 0x1e, 0x4f, 0x69, 0x7a, 0x32, 0x3f, 0xea, 0xf8, + 0x6c, 0xd6, 0x25, 0xd1, 0x82, 0x2d, 0xe3, 0x84, 0x9d, 0x2d, 0x95, 0x06, 0xff, 0xbd, 0x29, 0x89, + 0xde, 0x5b, 0xe0, 0x90, 0x06, 0x38, 0x25, 0xdd, 0xb5, 0x0f, 0xa5, 0xae, 0xfd, 0x07, 0x07, 0x9c, + 0xe7, 0x04, 0x27, 0xfe, 0x09, 0xfa, 0x04, 0xaa, 0x1e, 0xf9, 0xed, 0x9c, 0xf0, 0xd4, 0x6d, 0x81, + 0xb3, 0x20, 0x7e, 0xca, 0x92, 0xa6, 0xd5, 0x2a, 0xed, 0xda, 0xbd, 0xda, 0x97, 0xbd, 0xca, 0xef, + 0x2d, 0xbb, 0x66, 0x7b, 0x9a, 0xef, 0xde, 0x07, 0xc7, 0x67, 0xd1, 0x31, 0x9d, 0x36, 0xed, 0x96, + 0xb5, 0xdb, 0xd8, 0x7b, 0xa3, 0x93, 0xfb, 0xd5, 0x51, 0x0a, 0x3b, 0x7d, 0x09, 0xf0, 0x34, 0x10, + 0x3d, 0x85, 0xad, 0x67, 0xf3, 0x30, 0xa5, 0xc6, 0xc8, 0xfb, 0x50, 0x4b, 0xd4, 0x27, 0x97, 0x66, + 0x1a, 0x7b, 0x68, 0x83, 0x12, 0x8d, 0xf6, 0x32, 0x2c, 0x3a, 0x80, 0xfa, 0x70, 0x60, 0x94, 0x5c, + 0x03, 0x9b, 0x06, 0x4d, 0xab, 0x65, 0xed, 0xd6, 0x3d, 0x9b, 0x06, 0x5f, 0xc7, 0xaf, 0x0f, 0xe1, + 0x9a, 0xf4, 0x2b, 0x57, 0xfa, 0xe3, 0x35, 0xcf, 0xde, 0xdc, 0xa0, 0x26, 0xc3, 0x17, 0x7c, 0xfb, + 0x18, 0xb6, 0x3f, 0x3a, 0xfa, 0x0d, 0xf1, 0x53, 0xa3, 0xea, 0x26, 0x38, 0x4c, 0x32, 0xa4, 0x8f, + 0x5b, 0x9e, 0xa6, 0xbe, 0x8e, 0x9f, 0x7f, 0xb1, 0xc0, 0x51, 0x2c, 0xf7, 0x36, 0x80, 0x36, 0x39, + 0xc9, 0xa2, 0xaf, 0x6b, 0xce, 0x30, 0x70, 0xdf, 0x80, 0x52, 0x34, 0x9f, 0x49, 0xcd, 0xdb, 0xbd, + 0xea, 0x97, 0xbd, 0xf2, 0x3d, 0x7b, 0xd7, 0xf2, 0x04, 0x4f, 0xf8, 0x93, 0xe0, 0x80, 0xce, 0x79, + 0xb3, 0xd4, 0xb2, 0x76, 0x6d, 0x4f, 0x53, 0x6e, 0x13, 0xaa, 0x24, 0xe6, 0x34, 0x64, 0x51, 0xb3, + 0x2c, 0x17, 0x0c, 0x29, 0x56, 0x52, 0x3a, 0x23, 0x6c, 0x9e, 0x36, 0x2b, 0x2d, 0x6b, 0xb7, 0xe4, + 0x19, 0xd2, 0x7d, 0x00, 0xd5, 0x63, 0x1a, 0xa6, 0x24, 0xe1, 0x4d, 0x67, 0x3d, 0x88, 0xa7, 0x72, + 0xc9, 0x04, 0x61, 0x90, 0xe8, 0x53, 0xa8, 0x79, 0x84, 0xc7, 0x2c, 0xe2, 0xe4, 0xab, 0xc2, 0x78, + 0x08, 0xd5, 0x84, 0xf0, 0x79, 0x98, 0xf2, 0xa6, 0x2d, 0x77, 0xe1, 0x56, 0x51, 0xbf, 0xca, 0x73, + 0x67, 0x40, 0x79, 0x8a, 0x23, 0x9f, 0x78, 0x06, 0x8b, 0x9e, 0x42, 0xdd, 0x58, 0xe0, 0xee, 0x4f, + 0xa0, 0x9e, 0x18, 0x42, 0xef, 0xe5, 0xad, 0x8d, 0x55, 0xa6, 0x30, 0x5e, 0x8e, 0x6e, 0x7f, 0x02, + 0x8e, 0x8a, 0x01, 0xfd, 0x00, 0x9c, 0x5f, 0xe2, 0x64, 0x4a, 0x52, 0xd7, 0x85, 0xf2, 0x09, 0xe3, + 0xa9, 0xf6, 0x55, 0x7e, 0x0b, 0x5e, 0xcc, 0x92, 0x54, 0xa5, 0xdb, 0x93, 0xdf, 0xa8, 0x9d, 0x6d, + 0x95, 0x48, 0x9f, 0x94, 0x55, 0xe6, 0xeb, 0x9e, 0x21, 0xdb, 0x7f, 0xb6, 0xc1, 0x19, 0x46, 0x9c, + 0x24, 0x29, 0x5a, 0xe6, 0xad, 0xf7, 0xd3, 0x42, 0xeb, 0xad, 0xe5, 0x54, 0xc7, 0x7c, 0x28, 0x01, + 0xff, 0x6b, 0x57, 0x2a, 0x5b, 0x57, 0xec, 0x4a, 0xad, 0x64, 0xbd, 0xf2, 0xd3, 0x2c, 0xe2, 0x87, + 0xb0, 0xc3, 0x4f, 0x69, 0x3c, 0xe1, 0x69, 0x42, 0xfd, 0x74, 0x42, 0xce, 0x28, 0x4f, 0x27, 0xfe, + 0x09, 0xf1, 0x4f, 0x65, 0x48, 0x35, 0xef, 0x86, 0x58, 0x7e, 0x2e, 0x57, 0x9f, 0x88, 0xc5, 0xbe, + 0x58, 0x2b, 0x56, 0x93, 0x7d, 0xd9, 0x6a, 0x92, 0x39, 0x1c, 0xc7, 0x62, 0x9e, 0x7d, 0x33, 0x39, + 0x54, 0xb6, 0xae, 0x98, 0x43, 0xad, 0xe4, 0xd5, 0xca, 0xe1, 0x37, 0x57, 0x87, 0xca, 0xd6, 0x95, + 0x73, 0xf8, 0x4a, 0xd5, 0xe1, 0xbf, 0x2d, 0x70, 0x3c, 0x32, 0x63, 0x0b, 0x82, 0xfc, 0x3c, 0x87, + 0x77, 0xb3, 0xc3, 0xa9, 0xb1, 0xf7, 0xfa, 0x86, 0xfc, 0x0d, 0x07, 0x5f, 0x7d, 0x66, 0x29, 0xad, + 0x57, 0xcc, 0x96, 0x56, 0xb2, 0x9e, 0xad, 0x9f, 0x5d, 0x31, 0x5b, 0xed, 0xbf, 0x5b, 0x50, 0x7e, + 0x46, 0x52, 0x8c, 0x76, 0xa0, 0xb4, 0x4f, 0x96, 0xee, 0x75, 0x28, 0x9d, 0x92, 0xa5, 0x9e, 0x8f, + 0xe2, 0x13, 0x21, 0x28, 0xef, 0x93, 0x25, 0x17, 0x63, 0xf2, 0x94, 0x2c, 0xcd, 0x14, 0x94, 0xdf, + 0x42, 0xe8, 0x10, 0x87, 0x42, 0x68, 0x81, 0x43, 0x23, 0xb4, 0xc0, 0xa1, 0x10, 0x3a, 0xc4, 0xa1, + 0x14, 0x5a, 0xe0, 0x30, 0x13, 0x12, 0xdf, 0xe8, 0xfb, 0xe0, 0xec, 0x93, 0xa5, 0x96, 0x5b, 0x35, + 0x66, 0x34, 0xd9, 0xb9, 0xa6, 0x1f, 0x42, 0x55, 0xa1, 0xb9, 0xfb, 0x2e, 0x94, 0x4e, 0x17, 0x26, + 0x3f, 0x3b, 0xc5, 0xfc, 0x88, 0x08, 0x3a, 0x0a, 0xe6, 0x09, 0x4c, 0xfb, 0xaf, 0x25, 0x70, 0xd4, + 0x26, 0xa1, 0xf7, 0xa1, 0x66, 0xce, 0x98, 0xb5, 0xdb, 0x06, 0x82, 0x5a, 0xa0, 0xd7, 0xa4, 0x4d, + 0xdb, 0xcb, 0x68, 0x74, 0x1b, 0xec, 0xe1, 0xc0, 0xdd, 0xc9, 0x25, 0xe4, 0x49, 0x9c, 0xd8, 0xd7, + 0x2d, 0x21, 0x2a, 0x42, 0x1f, 0x0e, 0xb8, 0x70, 0x98, 0x06, 0x26, 0x3e, 0xf1, 0x89, 0xfa, 0xe0, + 0xa8, 0xbe, 0xba, 0x50, 0xb6, 0x70, 0x3d, 0xb3, 0x37, 0x5f, 0xcf, 0xd0, 0x23, 0xa8, 0x2a, 0x25, + 0x5c, 0xd4, 0xb3, 0x62, 0x9a, 0xc8, 0x2f, 0xee, 0x64, 0xcf, 0x20, 0xd1, 0x8f, 0xa0, 0xdc, 0x0b, + 0xd9, 0xd1, 0xc5, 0x2e, 0xe4, 0xf7, 0x1a, 0xbb, 0x78, 0xaf, 0x41, 0x03, 0xa8, 0xfd, 0x82, 0xf9, + 0x38, 0xa5, 0x2c, 0x12, 0x9b, 0x17, 0xe1, 0x19, 0x31, 0x87, 0xa5, 0xf8, 0x16, 0xbc, 0xf9, 0x9c, + 0x06, 0x7a, 0x87, 0xe4, 0xb7, 0xcc, 0x41, 0x2c, 0x2e, 0x24, 0x2a, 0x07, 0xb1, 0x3c, 0xc2, 0x8d, + 0x16, 0x79, 0x84, 0x87, 0x86, 0xd8, 0x74, 0x84, 0xeb, 0x10, 0x8c, 0x80, 0x97, 0xa3, 0xdb, 0x4f, + 0xa0, 0xda, 0x67, 0x51, 0x9a, 0xb0, 0x10, 0x7d, 0x00, 0x6e, 0x3f, 0x21, 0x38, 0x25, 0xc3, 0x28, + 0x20, 0x67, 0xa6, 0x6f, 0xde, 0x86, 0x7a, 0xcc, 0x58, 0x38, 0xe1, 0xf4, 0x33, 0xe5, 0x67, 0x76, + 0x5f, 0xfa, 0x96, 0x57, 0x13, 0x2b, 0xcf, 0xe9, 0x67, 0xa4, 0xfd, 0x47, 0x1b, 0x1a, 0x1e, 0x89, + 0x43, 0xaa, 0xf4, 0xa2, 0xfb, 0xe2, 0x0e, 0xe3, 0xb3, 0x05, 0x49, 0x96, 0xee, 0x5d, 0xb8, 0x16, + 0x90, 0x90, 0xa4, 0x24, 0x98, 0xe0, 0x29, 0x89, 0xb2, 0x63, 0x7e, 0x5b, 0x73, 0x1f, 0x4b, 0x26, + 0xc2, 0xe2, 0x52, 0x72, 0x84, 0x43, 0x59, 0x46, 0xf7, 0xe0, 0xb5, 0x13, 0x3a, 0x3d, 0x99, 0xcc, + 0x39, 0x9e, 0x92, 0x55, 0xb1, 0x6f, 0x8b, 0x85, 0xb1, 0xe0, 0x2b, 0x41, 0x77, 0x17, 0xae, 0x87, + 0xec, 0xc5, 0x2a, 0xd4, 0x96, 0xd0, 0x6b, 0x21, 0x7b, 0x51, 0x40, 0x8a, 0xc9, 0xa7, 0x65, 0x6e, + 0x82, 0xb3, 0xa2, 0x54, 0x53, 0xc2, 0xd7, 0x44, 0x4e, 0x82, 0x60, 0x55, 0xd3, 0xb6, 0xe6, 0x6a, + 0xf1, 0xef, 0xc1, 0x6b, 0x89, 0x89, 0x36, 0x9a, 0x2a, 0xa8, 0xde, 0x9d, 0xeb, 0x85, 0x05, 0x89, + 0x6e, 0x1f, 0x03, 0x0c, 0x28, 0x97, 0xc9, 0x20, 0x09, 0xfa, 0x55, 0x3e, 0xfc, 0x6e, 0x15, 0x77, + 0x3f, 0x2f, 0x1e, 0x55, 0x06, 0x6f, 0x42, 0x5d, 0xfc, 0xf2, 0x18, 0xeb, 0xce, 0xa9, 0x7b, 0x39, + 0x43, 0x16, 0x0e, 0x0b, 0x88, 0xbc, 0xa2, 0x8a, 0xc2, 0x61, 0x01, 0x69, 0x7f, 0x5e, 0x01, 0xa7, + 0x87, 0xfd, 0xd3, 0x79, 0x8c, 0xc6, 0x50, 0xff, 0x39, 0x49, 0x55, 0xc9, 0xa2, 0x77, 0x56, 0x2c, + 0xca, 0xda, 0x3a, 0x6f, 0x51, 0x30, 0x51, 0x0b, 0x2a, 0x1f, 0xbd, 0x88, 0x88, 0xea, 0xaa, 0x78, + 0xbd, 0xa4, 0x63, 0xf4, 0xa0, 0x50, 0x74, 0x97, 0x56, 0xfb, 0x69, 0x36, 0xf7, 0x2f, 0x2b, 0xd1, + 0x81, 0x2d, 0x8d, 0x93, 0x13, 0xdc, 0xbd, 0x03, 0x15, 0xc1, 0xd7, 0x7b, 0x95, 0xf5, 0xb2, 0xe5, + 0x29, 0x36, 0xfa, 0x9d, 0x05, 0xf6, 0x70, 0x84, 0x0e, 0x44, 0xcd, 0x4d, 0x29, 0x17, 0xf7, 0xd1, + 0xde, 0xe5, 0x4c, 0xb9, 0x48, 0x35, 0x96, 0x7d, 0x4e, 0xb1, 0x6c, 0xb1, 0x6e, 0xe6, 0xf8, 0xdd, + 0x5c, 0x9b, 0x16, 0xb0, 0x36, 0x09, 0x8c, 0xb2, 0xb9, 0xe4, 0x16, 0x6d, 0x6a, 0x53, 0xf9, 0x48, + 0x2a, 0x5d, 0xf0, 0x62, 0xd4, 0x5d, 0x5e, 0xce, 0xbb, 0xfc, 0xd2, 0x43, 0x4a, 0xed, 0xfb, 0xda, + 0x90, 0xfa, 0x93, 0x05, 0xd0, 0x67, 0xb3, 0x38, 0x21, 0x9c, 0x93, 0x00, 0x3d, 0xfd, 0xaf, 0x0e, + 0xde, 0x2c, 0x38, 0x28, 0x07, 0xd6, 0x85, 0x6e, 0x0d, 0x73, 0xb7, 0x1e, 0x9d, 0x77, 0xeb, 0xed, + 0x0d, 0x6e, 0xe5, 0x2e, 0x9c, 0xf7, 0xb0, 0xfd, 0x45, 0x15, 0xca, 0xc3, 0xe8, 0x98, 0xa1, 0xcf, + 0x2d, 0xa8, 0xc8, 0xc1, 0x83, 0x7e, 0x0d, 0x95, 0x3e, 0x9b, 0x47, 0xf2, 0x65, 0xc8, 0x53, 0x96, + 0x10, 0xe5, 0xe6, 0xb6, 0xa7, 0x29, 0xb7, 0x05, 0x8d, 0x79, 0xe4, 0xb3, 0xd9, 0x8c, 0xa6, 0x29, + 0x09, 0xf4, 0xab, 0xa2, 0xc8, 0x12, 0xa7, 0x0e, 0x15, 0xba, 0x68, 0x34, 0x95, 0xc1, 0xd4, 0xbc, + 0x8c, 0x46, 0x1f, 0x42, 0x79, 0x3c, 0x1e, 0x0e, 0xd0, 0x77, 0xa0, 0xde, 0xcf, 0x04, 0x36, 0xe4, + 0x03, 0x7d, 0x17, 0x1a, 0xe3, 0x82, 0xce, 0x4d, 0x90, 0x7f, 0x59, 0x50, 0x1a, 0x31, 0xf1, 0x9c, + 0xac, 0xe1, 0x38, 0x9e, 0x14, 0x66, 0x79, 0x15, 0xc7, 0xf1, 0x81, 0x1e, 0xe7, 0x92, 0x6d, 0x17, + 0x46, 0xfc, 0x4a, 0x6f, 0x97, 0xce, 0xf7, 0xb6, 0x6a, 0xbf, 0x72, 0xa1, 0x5c, 0xcf, 0x64, 0xfb, + 0xb9, 0xef, 0x40, 0xc9, 0x8f, 0xe7, 0xf2, 0x8d, 0xd9, 0xd8, 0xbb, 0xb1, 0xfa, 0xe6, 0x38, 0x66, + 0x9d, 0xfe, 0x68, 0xec, 0x09, 0x80, 0xdb, 0x05, 0x67, 0x46, 0x66, 0x2c, 0x59, 0xea, 0x47, 0xe7, + 0xce, 0x1a, 0xf4, 0x99, 0x5c, 0xf6, 0x34, 0xcc, 0x7d, 0x57, 0x4f, 0x93, 0xea, 0xfa, 0x3d, 0x4c, + 0xc2, 0x0f, 0x58, 0x40, 0xd4, 0x90, 0x41, 0xff, 0xb4, 0xa0, 0x2c, 0xc8, 0x8d, 0x47, 0xd7, 0x5b, + 0xb0, 0x4d, 0xa3, 0x94, 0x24, 0x11, 0x0e, 0x27, 0x38, 0x08, 0x12, 0x1d, 0xf4, 0x96, 0x61, 0x3e, + 0x0e, 0x82, 0x44, 0x80, 0xc8, 0x59, 0x11, 0xa4, 0x12, 0xb0, 0x65, 0x98, 0x12, 0xa4, 0x43, 0x2d, + 0x5f, 0x3e, 0xd4, 0xca, 0xa5, 0x43, 0x1d, 0xb1, 0xc0, 0x3c, 0xc7, 0xd7, 0x43, 0x15, 0x8b, 0x9e, + 0x84, 0xa0, 0x7d, 0x28, 0xf5, 0x47, 0x63, 0xf7, 0x06, 0x54, 0x42, 0x3a, 0xa3, 0xea, 0x45, 0x6b, + 0x79, 0x8a, 0x10, 0x8f, 0x56, 0x7d, 0x45, 0x94, 0x41, 0x5a, 0x9e, 0x21, 0x05, 0x5e, 0x1e, 0x45, + 0x32, 0x2e, 0x31, 0xa3, 0x04, 0x81, 0x0e, 0xc0, 0x51, 0x9e, 0xfc, 0x9f, 0xf4, 0x7d, 0xa0, 0xe2, + 0x70, 0xf7, 0xc4, 0xd3, 0x3a, 0x30, 0xcd, 0x77, 0x63, 0x53, 0x3c, 0x85, 0x31, 0x25, 0xb1, 0xe8, + 0x11, 0x54, 0xc4, 0x16, 0x72, 0xf7, 0x21, 0x54, 0xc4, 0xa6, 0x1a, 0xe9, 0xcd, 0x1b, 0x5f, 0x9c, + 0xb7, 0x12, 0x8d, 0x5e, 0x87, 0xd2, 0x70, 0xc4, 0xe5, 0x55, 0x2f, 0xd6, 0xe7, 0xa7, 0x4d, 0xe3, + 0x76, 0x15, 0x2a, 0x4f, 0x66, 0x71, 0xba, 0xec, 0x1d, 0x7f, 0xf1, 0xf2, 0x8e, 0xf5, 0xb7, 0x97, + 0x77, 0xac, 0x7f, 0xbc, 0xbc, 0x63, 0xc1, 0x6d, 0x96, 0x4c, 0x3b, 0x8b, 0x00, 0x63, 0xde, 0x59, + 0xe0, 0x30, 0xe8, 0xe0, 0x98, 0x0a, 0xfd, 0xda, 0x54, 0xaf, 0x71, 0x88, 0xc3, 0x60, 0xa4, 0x88, + 0x91, 0xf5, 0xf1, 0xbd, 0xc2, 0x1f, 0x76, 0x52, 0xa8, 0x2b, 0x84, 0xba, 0xf2, 0x7f, 0xbf, 0x69, + 0x12, 0xfb, 0x85, 0xbf, 0xfd, 0x8e, 0x1c, 0xf9, 0x07, 0xdd, 0x83, 0xff, 0x04, 0x00, 0x00, 0xff, + 0xff, 0xdf, 0xa0, 0xb1, 0x70, 0x16, 0x14, 0x00, 0x00, +} + +func (m *Search) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Search) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Search) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Search_Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Search_Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Search_Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Vector) > 0 { + for iNdEx := len(m.Vector) - 1; iNdEx >= 0; iNdEx-- { + f2 := math.Float32bits(float32(m.Vector[iNdEx])) + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(f2)) + } + i = encodeVarintPayload(dAtA, i, uint64(len(m.Vector)*4)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Search_MultiRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Search_MultiRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Search_MultiRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Search_IDRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Search_IDRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Search_IDRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Search_MultiIDRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Search_MultiIDRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Search_MultiIDRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Search_ObjectRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Search_ObjectRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Search_ObjectRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Object) > 0 { + i -= len(m.Object) + copy(dAtA[i:], m.Object) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Object))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Search_Config) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Search_Config) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Search_Config) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Filters != nil { + { + size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Timeout != 0 { + i = encodeVarintPayload(dAtA, i, uint64(m.Timeout)) + i-- + dAtA[i] = 0x28 + } + if m.Epsilon != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Epsilon)))) + i-- + dAtA[i] = 0x25 + } + if m.Radius != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Radius)))) + i-- + dAtA[i] = 0x1d + } + if m.Num != 0 { + i = encodeVarintPayload(dAtA, i, uint64(m.Num)) + i-- + dAtA[i] = 0x10 + } + if len(m.RequestId) > 0 { + i -= len(m.RequestId) + copy(dAtA[i:], m.RequestId) + i = encodeVarintPayload(dAtA, i, uint64(len(m.RequestId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Search_Response) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Search_Response) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Search_Response) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.RequestId) > 0 { + i -= len(m.RequestId) + copy(dAtA[i:], m.RequestId) + i = encodeVarintPayload(dAtA, i, uint64(len(m.RequestId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Search_Responses) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Search_Responses) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Search_Responses) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Responses) > 0 { + for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Filter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Filter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Filter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Filter_Target) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Filter_Target) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Filter_Target) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Port != 0 { + i = encodeVarintPayload(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x10 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Filter_Config) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Filter_Config) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Filter_Config) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Targets) > 0 { + for iNdEx := len(m.Targets) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Targets[iNdEx]) + copy(dAtA[i:], m.Targets[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Targets[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Insert) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Insert) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Insert) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Insert_Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Insert_Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Insert_Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Vector != nil { + { + size, err := m.Vector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Insert_MultiRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Insert_MultiRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Insert_MultiRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Insert_Config) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Insert_Config) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Insert_Config) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Filters != nil { + { + size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.SkipStrictExistCheck { + i-- + if m.SkipStrictExistCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Update) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Update) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Update) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Update_Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Update_Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Update_Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Vector != nil { + { + size, err := m.Vector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Update_MultiRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Update_MultiRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Update_MultiRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Update_Config) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Update_Config) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Update_Config) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Filters != nil { + { + size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.SkipStrictExistCheck { + i-- + if m.SkipStrictExistCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Upsert) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Upsert) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Upsert) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Upsert_Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Upsert_Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Upsert_Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Vector != nil { + { + size, err := m.Vector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Upsert_MultiRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Upsert_MultiRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Upsert_MultiRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Upsert_Config) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Upsert_Config) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Upsert_Config) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Filters != nil { + { + size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.SkipStrictExistCheck { + i-- + if m.SkipStrictExistCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Remove) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Remove) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Remove) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Remove_Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Remove_Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Remove_Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Id != nil { + { + size, err := m.Id.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Remove_MultiRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Remove_MultiRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Remove_MultiRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Remove_Config) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Remove_Config) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Remove_Config) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.SkipStrictExistCheck { + i-- + if m.SkipStrictExistCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Meta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Meta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Meta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Meta_Key) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Meta_Key) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Meta_Key) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Meta_Keys) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Meta_Keys) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Meta_Keys) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Keys) > 0 { + for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Keys[iNdEx]) + copy(dAtA[i:], m.Keys[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Keys[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Meta_Val) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Meta_Val) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Meta_Val) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Val) > 0 { + i -= len(m.Val) + copy(dAtA[i:], m.Val) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Val))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Meta_Vals) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Meta_Vals) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Meta_Vals) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Vals) > 0 { + for iNdEx := len(m.Vals) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Vals[iNdEx]) + copy(dAtA[i:], m.Vals[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Vals[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Meta_KeyVal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Meta_KeyVal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Meta_KeyVal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Val) > 0 { + i -= len(m.Val) + copy(dAtA[i:], m.Val) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Val))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Meta_KeyVals) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Meta_KeyVals) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Meta_KeyVals) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Kvs) > 0 { + for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Object) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Object) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Object) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Object_Distance) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Object_Distance) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Object_Distance) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Distance != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Distance)))) + i-- + dAtA[i] = 0x15 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Object_ID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Object_ID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Object_ID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Object_IDs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Object_IDs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Object_IDs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ids) > 0 { + for iNdEx := len(m.Ids) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Ids[iNdEx]) + copy(dAtA[i:], m.Ids[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Ids[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Object_Vector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Object_Vector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Object_Vector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Vector) > 0 { + for iNdEx := len(m.Vector) - 1; iNdEx >= 0; iNdEx-- { + f17 := math.Float32bits(float32(m.Vector[iNdEx])) + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(f17)) + } + i = encodeVarintPayload(dAtA, i, uint64(len(m.Vector)*4)) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Object_Vectors) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Object_Vectors) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Object_Vectors) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Vectors) > 0 { + for iNdEx := len(m.Vectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Vectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Object_Blob) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Object_Blob) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Object_Blob) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Object) > 0 { + i -= len(m.Object) + copy(dAtA[i:], m.Object) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Object))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Object_Location) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Object_Location) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Object_Location) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ips) > 0 { + for iNdEx := len(m.Ips) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Ips[iNdEx]) + copy(dAtA[i:], m.Ips[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Ips[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Object_Locations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Object_Locations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Object_Locations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Locations) > 0 { + for iNdEx := len(m.Locations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Locations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Control) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Control) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Control) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Control_CreateIndexRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Control_CreateIndexRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Control_CreateIndexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.PoolSize != 0 { + i = encodeVarintPayload(dAtA, i, uint64(m.PoolSize)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Replication) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Replication) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Replication) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Replication_Recovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Replication_Recovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Replication_Recovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.DeletedAgents) > 0 { + for iNdEx := len(m.DeletedAgents) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DeletedAgents[iNdEx]) + copy(dAtA[i:], m.DeletedAgents[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.DeletedAgents[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Replication_Rebalance) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Replication_Rebalance) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Replication_Rebalance) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.LowUsageAgents) > 0 { + for iNdEx := len(m.LowUsageAgents) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LowUsageAgents[iNdEx]) + copy(dAtA[i:], m.LowUsageAgents[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.LowUsageAgents[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.HighUsageAgents) > 0 { + for iNdEx := len(m.HighUsageAgents) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.HighUsageAgents[iNdEx]) + copy(dAtA[i:], m.HighUsageAgents[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.HighUsageAgents[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Replication_Agents) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Replication_Agents) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Replication_Agents) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ReplicatingAgent) > 0 { + for iNdEx := len(m.ReplicatingAgent) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ReplicatingAgent[iNdEx]) + copy(dAtA[i:], m.ReplicatingAgent[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.ReplicatingAgent[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.RemovedAgents) > 0 { + for iNdEx := len(m.RemovedAgents) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RemovedAgents[iNdEx]) + copy(dAtA[i:], m.RemovedAgents[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.RemovedAgents[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Agents) > 0 { + for iNdEx := len(m.Agents) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Agents[iNdEx]) + copy(dAtA[i:], m.Agents[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Agents[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Discoverer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Discoverer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Discoverer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Discoverer_Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Discoverer_Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Discoverer_Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Node) > 0 { + i -= len(m.Node) + copy(dAtA[i:], m.Node) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Node))) + i-- + dAtA[i] = 0x1a + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Backup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Backup_GetVector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_GetVector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_GetVector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Backup_GetVector_Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_GetVector_Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_GetVector_Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Backup_GetVector_Owner) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_GetVector_Owner) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_GetVector_Owner) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ip) > 0 { + i -= len(m.Ip) + copy(dAtA[i:], m.Ip) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Ip))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Backup_Locations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_Locations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_Locations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Backup_Locations_Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_Locations_Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_Locations_Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Backup_Remove) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_Remove) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_Remove) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Backup_Remove_Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_Remove_Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_Remove_Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Backup_Remove_RequestMulti) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_Remove_RequestMulti) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_Remove_RequestMulti) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Uuids) > 0 { + for iNdEx := len(m.Uuids) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Uuids[iNdEx]) + copy(dAtA[i:], m.Uuids[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Uuids[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Backup_IP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_IP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_IP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Backup_IP_Register) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_IP_Register) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_IP_Register) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Backup_IP_Register_Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_IP_Register_Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_IP_Register_Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ips) > 0 { + for iNdEx := len(m.Ips) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Ips[iNdEx]) + copy(dAtA[i:], m.Ips[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Ips[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Backup_IP_Remove) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_IP_Remove) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_IP_Remove) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Backup_IP_Remove_Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_IP_Remove_Request) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_IP_Remove_Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ips) > 0 { + for iNdEx := len(m.Ips) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Ips[iNdEx]) + copy(dAtA[i:], m.Ips[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Ips[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Backup_Vector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_Vector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_Vector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ips) > 0 { + for iNdEx := len(m.Ips) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Ips[iNdEx]) + copy(dAtA[i:], m.Ips[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Ips[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Vector) > 0 { + for iNdEx := len(m.Vector) - 1; iNdEx >= 0; iNdEx-- { + f18 := math.Float32bits(float32(m.Vector[iNdEx])) + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(f18)) + } + i = encodeVarintPayload(dAtA, i, uint64(len(m.Vector)*4)) + i-- + dAtA[i] = 0x1a + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Backup_Vectors) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_Vectors) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_Vectors) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Vectors) > 0 { + for iNdEx := len(m.Vectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Vectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Backup_Compressed) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_Compressed) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_Compressed) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Backup_Compressed_Vector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_Compressed_Vector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_Compressed_Vector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ips) > 0 { + for iNdEx := len(m.Ips) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Ips[iNdEx]) + copy(dAtA[i:], m.Ips[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Ips[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Vector) > 0 { + i -= len(m.Vector) + copy(dAtA[i:], m.Vector) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Vector))) + i-- + dAtA[i] = 0x1a + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Backup_Compressed_Vectors) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Backup_Compressed_Vectors) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Backup_Compressed_Vectors) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Vectors) > 0 { + for iNdEx := len(m.Vectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Vectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Info) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Info_Index) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info_Index) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info_Index) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Info_Index_Count) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info_Index_Count) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info_Index_Count) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Indexing { + i-- + if m.Indexing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Uncommitted != 0 { + i = encodeVarintPayload(dAtA, i, uint64(m.Uncommitted)) + i-- + dAtA[i] = 0x10 + } + if m.Stored != 0 { + i = encodeVarintPayload(dAtA, i, uint64(m.Stored)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Info_Index_UUID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info_Index_UUID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info_Index_UUID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Info_Index_UUID_Committed) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info_Index_UUID_Committed) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info_Index_UUID_Committed) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Info_Index_UUID_Uncommitted) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info_Index_UUID_Uncommitted) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info_Index_UUID_Uncommitted) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Info_Pod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info_Pod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info_Pod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Node != nil { + { + size, err := m.Node.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.Memory != nil { + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Cpu != nil { + { + size, err := m.Cpu.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Ip) > 0 { + i -= len(m.Ip) + copy(dAtA[i:], m.Ip) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Ip))) + i-- + dAtA[i] = 0x22 + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.AppName) > 0 { + i -= len(m.AppName) + copy(dAtA[i:], m.AppName) + i = encodeVarintPayload(dAtA, i, uint64(len(m.AppName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Info_Node) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info_Node) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info_Node) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Memory != nil { + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Cpu != nil { + { + size, err := m.Cpu.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.ExternalAddr) > 0 { + i -= len(m.ExternalAddr) + copy(dAtA[i:], m.ExternalAddr) + i = encodeVarintPayload(dAtA, i, uint64(len(m.ExternalAddr))) + i-- + dAtA[i] = 0x1a + } + if len(m.InternalAddr) > 0 { + i -= len(m.InternalAddr) + copy(dAtA[i:], m.InternalAddr) + i = encodeVarintPayload(dAtA, i, uint64(len(m.InternalAddr))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Info_CPU) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info_CPU) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info_CPU) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Usage != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Usage)))) + i-- + dAtA[i] = 0x19 + } + if m.Request != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Request)))) + i-- + dAtA[i] = 0x11 + } + if m.Limit != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Limit)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Info_Memory) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info_Memory) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info_Memory) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Usage != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Usage)))) + i-- + dAtA[i] = 0x19 + } + if m.Request != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Request)))) + i-- + dAtA[i] = 0x11 + } + if m.Limit != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Limit)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Info_Pods) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info_Pods) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info_Pods) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Pods) > 0 { + for iNdEx := len(m.Pods) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Pods[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Info_Nodes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info_Nodes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info_Nodes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Nodes) > 0 { + for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Nodes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPayload(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Info_IPs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info_IPs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info_IPs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ip) > 0 { + for iNdEx := len(m.Ip) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Ip[iNdEx]) + copy(dAtA[i:], m.Ip[iNdEx]) + i = encodeVarintPayload(dAtA, i, uint64(len(m.Ip[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Empty) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func encodeVarintPayload(dAtA []byte, offset int, v uint64) int { + offset -= sovPayload(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Search) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Search_Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Vector) > 0 { + n += 1 + sovPayload(uint64(len(m.Vector)*4)) + len(m.Vector)*4 + } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Search_MultiRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Search_IDRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Search_MultiIDRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Search_ObjectRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Object) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Search_Config) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RequestId) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.Num != 0 { + n += 1 + sovPayload(uint64(m.Num)) + } + if m.Radius != 0 { + n += 5 + } + if m.Epsilon != 0 { + n += 5 + } + if m.Timeout != 0 { + n += 1 + sovPayload(uint64(m.Timeout)) + } + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Search_Response) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RequestId) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Search_Responses) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Responses) > 0 { + for _, e := range m.Responses { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Filter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Filter_Target) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.Port != 0 { + n += 1 + sovPayload(uint64(m.Port)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Filter_Config) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Targets) > 0 { + for _, s := range m.Targets { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Insert) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Insert_Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Vector != nil { + l = m.Vector.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Insert_MultiRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Insert_Config) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SkipStrictExistCheck { + n += 2 + } + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Update) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Update_Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Vector != nil { + l = m.Vector.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Update_MultiRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Update_Config) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SkipStrictExistCheck { + n += 2 + } + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Upsert) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Upsert_Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Vector != nil { + l = m.Vector.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Upsert_MultiRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Upsert_Config) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SkipStrictExistCheck { + n += 2 + } + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Remove) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Remove_Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != nil { + l = m.Id.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Remove_MultiRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Remove_Config) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SkipStrictExistCheck { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Meta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Meta_Key) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Meta_Keys) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Keys) > 0 { + for _, s := range m.Keys { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Meta_Val) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Val) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Meta_Vals) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Vals) > 0 { + for _, s := range m.Vals { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Meta_KeyVal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + l = len(m.Val) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Meta_KeyVals) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Kvs) > 0 { + for _, e := range m.Kvs { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Object) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Object_Distance) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.Distance != 0 { + n += 5 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Object_ID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Object_IDs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ids) > 0 { + for _, s := range m.Ids { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Object_Vector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if len(m.Vector) > 0 { + n += 1 + sovPayload(uint64(len(m.Vector)*4)) + len(m.Vector)*4 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Object_Vectors) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Vectors) > 0 { + for _, e := range m.Vectors { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Object_Blob) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + l = len(m.Object) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Object_Location) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if len(m.Ips) > 0 { + for _, s := range m.Ips { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Object_Locations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Locations) > 0 { + for _, e := range m.Locations { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Control) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Control_CreateIndexRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolSize != 0 { + n += 1 + sovPayload(uint64(m.PoolSize)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Replication) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Replication_Recovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DeletedAgents) > 0 { + for _, s := range m.DeletedAgents { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Replication_Rebalance) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.HighUsageAgents) > 0 { + for _, s := range m.HighUsageAgents { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if len(m.LowUsageAgents) > 0 { + for _, s := range m.LowUsageAgents { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Replication_Agents) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Agents) > 0 { + for _, s := range m.Agents { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if len(m.RemovedAgents) > 0 { + for _, s := range m.RemovedAgents { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if len(m.ReplicatingAgent) > 0 { + for _, s := range m.ReplicatingAgent { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Discoverer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Discoverer_Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + l = len(m.Node) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_GetVector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_GetVector_Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_GetVector_Owner) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ip) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_Locations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_Locations_Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_Remove) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_Remove_Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_Remove_RequestMulti) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Uuids) > 0 { + for _, s := range m.Uuids { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_IP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_IP_Register) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_IP_Register_Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if len(m.Ips) > 0 { + for _, s := range m.Ips { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_IP_Remove) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_IP_Remove_Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ips) > 0 { + for _, s := range m.Ips { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_Vector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if len(m.Vector) > 0 { + n += 1 + sovPayload(uint64(len(m.Vector)*4)) + len(m.Vector)*4 + } + if len(m.Ips) > 0 { + for _, s := range m.Ips { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_Vectors) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Vectors) > 0 { + for _, e := range m.Vectors { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_Compressed) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_Compressed_Vector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + l = len(m.Vector) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if len(m.Ips) > 0 { + for _, s := range m.Ips { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Backup_Compressed_Vectors) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Vectors) > 0 { + for _, e := range m.Vectors { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Info) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Info_Index) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Info_Index_Count) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Stored != 0 { + n += 1 + sovPayload(uint64(m.Stored)) + } + if m.Uncommitted != 0 { + n += 1 + sovPayload(uint64(m.Uncommitted)) + } + if m.Indexing { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Info_Index_UUID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Info_Index_UUID_Committed) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Info_Index_UUID_Uncommitted) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Info_Pod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AppName) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + l = len(m.Ip) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.Cpu != nil { + l = m.Cpu.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.Memory != nil { + l = m.Memory.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Info_Node) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + l = len(m.InternalAddr) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + l = len(m.ExternalAddr) + if l > 0 { + n += 1 + l + sovPayload(uint64(l)) + } + if m.Cpu != nil { + l = m.Cpu.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.Memory != nil { + l = m.Memory.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovPayload(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Info_CPU) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Limit != 0 { + n += 9 + } + if m.Request != 0 { + n += 9 + } + if m.Usage != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Info_Memory) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Limit != 0 { + n += 9 + } + if m.Request != 0 { + n += 9 + } + if m.Usage != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Info_Pods) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Pods) > 0 { + for _, e := range m.Pods { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Info_Nodes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + l = e.Size() + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Info_IPs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ip) > 0 { + for _, s := range m.Ip { + l = len(s) + n += 1 + l + sovPayload(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Empty) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovPayload(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPayload(x uint64) (n int) { + return sovPayload(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Search) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Search: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Search: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Search_Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 5 { + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + v2 := float32(math.Float32frombits(v)) + m.Vector = append(m.Vector, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 4 + if elementCount != 0 && len(m.Vector) == 0 { + m.Vector = make([]float32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + v2 := float32(math.Float32frombits(v)) + m.Vector = append(m.Vector, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Vector", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Search_Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Search_MultiRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MultiRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MultiRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, &Search_Request{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Search_IDRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IDRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Search_Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Search_MultiIDRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MultiIDRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MultiIDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, &Search_IDRequest{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Search_ObjectRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Object = append(m.Object[:0], dAtA[iNdEx:postIndex]...) + if m.Object == nil { + m.Object = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Search_Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Search_Config) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Num", wireType) + } + m.Num = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Num |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Radius", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Radius = float32(math.Float32frombits(v)) + case 4: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Epsilon", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Epsilon = float32(math.Float32frombits(v)) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + m.Timeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timeout |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &Filter_Config{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Search_Response) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Response: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, &Object_Distance{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Search_Responses) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Responses: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Responses: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Responses = append(m.Responses, &Search_Response{}) + if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Filter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Filter_Target) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Target: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Target: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Filter_Config) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Targets", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Targets = append(m.Targets, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Insert) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Insert: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Insert: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Insert_Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vector == nil { + m.Vector = &Object_Vector{} + } + if err := m.Vector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Insert_Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Insert_MultiRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MultiRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MultiRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, &Insert_Request{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Insert_Config) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipStrictExistCheck", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipStrictExistCheck = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &Filter_Config{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Update) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Update: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Update: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Update_Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vector == nil { + m.Vector = &Object_Vector{} + } + if err := m.Vector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Update_Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Update_MultiRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MultiRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MultiRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, &Update_Request{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Update_Config) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipStrictExistCheck", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipStrictExistCheck = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &Filter_Config{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Upsert) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Upsert: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Upsert: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Upsert_Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vector == nil { + m.Vector = &Object_Vector{} + } + if err := m.Vector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Upsert_Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Upsert_MultiRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MultiRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MultiRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, &Upsert_Request{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Upsert_Config) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipStrictExistCheck", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipStrictExistCheck = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &Filter_Config{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Remove) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Remove: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Remove: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Remove_Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Id == nil { + m.Id = &Object_ID{} + } + if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Remove_Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Remove_MultiRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MultiRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MultiRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, &Remove_Request{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Remove_Config) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipStrictExistCheck", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipStrictExistCheck = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Meta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Meta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Meta_Key) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Key: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Key: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Meta_Keys) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Keys: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Keys: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keys = append(m.Keys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Meta_Val) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Val: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Val: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Val = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Meta_Vals) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vals: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vals: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vals", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vals = append(m.Vals, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Meta_KeyVal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyVal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyVal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Val = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Meta_KeyVals) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyVals: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyVals: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kvs = append(m.Kvs, &Meta_KeyVal{}) + if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Object) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Object: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Object: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Object_Distance) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Distance: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Distance: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Distance", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Distance = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Object_ID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Object_IDs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IDs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IDs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ids", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ids = append(m.Ids, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Object_Vector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType == 5 { + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + v2 := float32(math.Float32frombits(v)) + m.Vector = append(m.Vector, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 4 + if elementCount != 0 && len(m.Vector) == 0 { + m.Vector = make([]float32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + v2 := float32(math.Float32frombits(v)) + m.Vector = append(m.Vector, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Vector", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Object_Vectors) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vectors: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vectors: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vectors = append(m.Vectors, &Object_Vector{}) + if err := m.Vectors[len(m.Vectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Object_Blob) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Blob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Blob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Object = append(m.Object[:0], dAtA[iNdEx:postIndex]...) + if m.Object == nil { + m.Object = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Object_Location) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Location: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Location: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ips", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ips = append(m.Ips, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Object_Locations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Locations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Locations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Locations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Locations = append(m.Locations, &Object_Location{}) + if err := m.Locations[len(m.Locations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Control) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Control: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Control: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Control_CreateIndexRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateIndexRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateIndexRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolSize", wireType) + } + m.PoolSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolSize |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Replication) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Replication: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Replication: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Replication_Recovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Recovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Recovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletedAgents", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeletedAgents = append(m.DeletedAgents, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Replication_Rebalance) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rebalance: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rebalance: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HighUsageAgents", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HighUsageAgents = append(m.HighUsageAgents, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LowUsageAgents", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LowUsageAgents = append(m.LowUsageAgents, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Replication_Agents) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Agents: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Agents: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Agents", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Agents = append(m.Agents, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemovedAgents", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RemovedAgents = append(m.RemovedAgents, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReplicatingAgent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReplicatingAgent = append(m.ReplicatingAgent, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Discoverer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Discoverer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Discoverer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Discoverer_Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Node = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Backup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Backup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_GetVector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetVector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetVector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_GetVector_Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_GetVector_Owner) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Owner: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Owner: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ip", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ip = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_Locations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Locations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Locations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_Locations_Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_Remove) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Remove: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Remove: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_Remove_Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_Remove_RequestMulti) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestMulti: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestMulti: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuids", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuids = append(m.Uuids, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_IP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_IP_Register) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Register: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Register: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_IP_Register_Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ips", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ips = append(m.Ips, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_IP_Remove) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Remove: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Remove: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_IP_Remove_Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ips", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ips = append(m.Ips, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_Vector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType == 5 { + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + v2 := float32(math.Float32frombits(v)) + m.Vector = append(m.Vector, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 4 + if elementCount != 0 && len(m.Vector) == 0 { + m.Vector = make([]float32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + v2 := float32(math.Float32frombits(v)) + m.Vector = append(m.Vector, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Vector", wireType) + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ips", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ips = append(m.Ips, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_Vectors) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vectors: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vectors: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vectors = append(m.Vectors, &Backup_Vector{}) + if err := m.Vectors[len(m.Vectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_Compressed) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Compressed: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Compressed: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_Compressed_Vector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vector", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vector = append(m.Vector[:0], dAtA[iNdEx:postIndex]...) + if m.Vector == nil { + m.Vector = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ips", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ips = append(m.Ips, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Backup_Compressed_Vectors) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vectors: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vectors: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vectors = append(m.Vectors, &Backup_Compressed_Vector{}) + if err := m.Vectors[len(m.Vectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Info) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Info: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Info_Index) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Index: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Index: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Info_Index_Count) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Count: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Count: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stored", wireType) + } + m.Stored = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Stored |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uncommitted", wireType) + } + m.Uncommitted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uncommitted |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Indexing", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Indexing = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Info_Index_UUID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UUID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UUID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Info_Index_UUID_Committed) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Committed: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Committed: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Info_Index_UUID_Uncommitted) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Uncommitted: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Uncommitted: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Info_Pod) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Pod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ip", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ip = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cpu", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cpu == nil { + m.Cpu = &Info_CPU{} + } + if err := m.Cpu.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memory == nil { + m.Memory = &Info_Memory{} + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &Info_Node{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Info_Node) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Node: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InternalAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InternalAddr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalAddr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cpu", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cpu == nil { + m.Cpu = &Info_CPU{} + } + if err := m.Cpu.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memory == nil { + m.Memory = &Info_Memory{} + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &Info_Pods{} + } + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Info_CPU) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CPU: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CPU: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Limit = float64(math.Float64frombits(v)) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Request = float64(math.Float64frombits(v)) + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Usage = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Info_Memory) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Memory: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Memory: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Limit = float64(math.Float64frombits(v)) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Request = float64(math.Float64frombits(v)) + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Usage = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Info_Pods) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Pods: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Pods: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pods = append(m.Pods, &Info_Pod{}) + if err := m.Pods[len(m.Pods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Info_Nodes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Nodes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Nodes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, &Info_Node{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Info_IPs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ip", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPayload + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPayload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ip = append(m.Ip, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Empty) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPayload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPayload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPayload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPayload(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPayload + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPayload + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPayload + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPayload + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPayload + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPayload + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPayload = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPayload = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPayload = fmt.Errorf("proto: unexpected end of group") +) diff --git a/apis/grpc/v1/vald/filter.pb.go b/apis/grpc/v1/vald/filter.pb.go new file mode 100644 index 0000000000..f4aa58a1bc --- /dev/null +++ b/apis/grpc/v1/vald/filter.pb.go @@ -0,0 +1,640 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package vald + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("apis/proto/v1/vald/filter.proto", fileDescriptor_a46f8d8eee988c86) } + +var fileDescriptor_a46f8d8eee988c86 = []byte{ + // 376 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xcf, 0x4a, 0xeb, 0x40, + 0x14, 0xc6, 0x9b, 0xbb, 0x68, 0x61, 0x28, 0x17, 0x8c, 0x0b, 0x31, 0x96, 0x2a, 0x75, 0xa1, 0xb8, + 0x98, 0xb1, 0xba, 0x73, 0xd9, 0x85, 0x20, 0x54, 0x2c, 0x2d, 0x0a, 0xba, 0x29, 0x27, 0xc9, 0x98, + 0x8e, 0xa4, 0x99, 0x71, 0x66, 0x12, 0x70, 0xeb, 0x2b, 0xf8, 0x52, 0x2e, 0x0b, 0xbe, 0x80, 0x14, + 0x1f, 0x44, 0x32, 0xd3, 0x40, 0x2b, 0x55, 0xba, 0x98, 0x55, 0xc2, 0xf9, 0xf3, 0x3b, 0x87, 0x8f, + 0x33, 0x1f, 0xda, 0x07, 0xc1, 0x14, 0x11, 0x92, 0x6b, 0x4e, 0x8a, 0x2e, 0x29, 0x20, 0x8d, 0xc9, + 0x23, 0x4b, 0x35, 0x95, 0xd8, 0x04, 0xfd, 0x46, 0x19, 0xc2, 0x45, 0x37, 0x38, 0x5c, 0xad, 0x14, + 0xf0, 0x92, 0x72, 0x88, 0xab, 0xaf, 0xad, 0x0e, 0x5a, 0x09, 0xe7, 0x49, 0x4a, 0x09, 0x08, 0x46, + 0x20, 0xcb, 0xb8, 0x06, 0xcd, 0x78, 0xa6, 0x6c, 0xf6, 0x6c, 0xd6, 0x40, 0xf5, 0x4b, 0x03, 0xf7, + 0x27, 0xa8, 0x39, 0xa2, 0x20, 0xa3, 0xc9, 0x4d, 0xf8, 0x44, 0x23, 0xed, 0x1f, 0xe0, 0x0a, 0x54, + 0x74, 0xb1, 0xcd, 0x60, 0x9b, 0x1a, 0xd2, 0xe7, 0x9c, 0x2a, 0x1d, 0xec, 0xad, 0xa9, 0x18, 0x52, + 0x25, 0x78, 0xa6, 0x68, 0x67, 0xf7, 0xf5, 0xe3, 0xeb, 0xed, 0xdf, 0x76, 0xe7, 0x3f, 0x51, 0x26, + 0x43, 0xb8, 0xe9, 0xbd, 0xf0, 0x4e, 0xfc, 0x7b, 0xe4, 0x8f, 0xb4, 0xa4, 0x30, 0x75, 0x39, 0xaf, + 0x76, 0xec, 0x9d, 0x7a, 0xfe, 0x18, 0x35, 0xaf, 0x32, 0x45, 0xa5, 0x5e, 0x40, 0x77, 0x96, 0x5b, + 0x6c, 0x0c, 0xf7, 0x52, 0x1e, 0xae, 0xb2, 0x16, 0x89, 0x3e, 0x8f, 0x8c, 0x38, 0x4b, 0xbb, 0x33, + 0x03, 0x5b, 0xda, 0x7d, 0x50, 0xed, 0xee, 0x60, 0x8c, 0x5d, 0xb9, 0x8f, 0xb6, 0xae, 0xf3, 0x54, + 0xb3, 0xcd, 0x80, 0xad, 0x3f, 0x80, 0xaa, 0x53, 0x2b, 0x05, 0xb8, 0x15, 0x31, 0x68, 0xea, 0x48, + 0x80, 0xdc, 0xc0, 0xd6, 0x09, 0xe0, 0x60, 0xcc, 0xaa, 0x00, 0x9b, 0x01, 0x37, 0x12, 0xc0, 0xe1, + 0x05, 0xe4, 0xe2, 0xb7, 0x0b, 0x70, 0x30, 0xe6, 0xa7, 0x00, 0x0e, 0x2e, 0xa0, 0x37, 0x7e, 0x9f, + 0xb7, 0xbd, 0xd9, 0xbc, 0xed, 0x7d, 0xce, 0xdb, 0x1e, 0x0a, 0xb8, 0x4c, 0x70, 0x11, 0x03, 0x28, + 0x6c, 0x6c, 0x03, 0x04, 0x2b, 0xfb, 0xca, 0xff, 0x1e, 0xba, 0x83, 0x34, 0xb6, 0xaf, 0x7f, 0xe0, + 0x3d, 0x1c, 0x25, 0x4c, 0x4f, 0xf2, 0x10, 0x47, 0x7c, 0x4a, 0x4c, 0x83, 0xb5, 0x1e, 0xe3, 0x31, + 0x89, 0x14, 0x51, 0x65, 0x46, 0x61, 0xdd, 0x58, 0xc7, 0xf9, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x1b, 0x54, 0x15, 0xbe, 0xa9, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// FilterClient is the client API for Filter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type FilterClient interface { + SearchObject(ctx context.Context, in *payload.Search_ObjectRequest, opts ...grpc.CallOption) (*payload.Search_Response, error) + StreamSearchObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamSearchObjectClient, error) + InsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Location, error) + StreamInsertObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamInsertObjectClient, error) + MultiInsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Locations, error) + UpdateObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Location, error) + StreamUpdateObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamUpdateObjectClient, error) + MultiUpdateObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Locations, error) + UpsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Location, error) + StreamUpsertObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamUpsertObjectClient, error) + MultiUpsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Locations, error) +} + +type filterClient struct { + cc *grpc.ClientConn +} + +func NewFilterClient(cc *grpc.ClientConn) FilterClient { + return &filterClient{cc} +} + +func (c *filterClient) SearchObject(ctx context.Context, in *payload.Search_ObjectRequest, opts ...grpc.CallOption) (*payload.Search_Response, error) { + out := new(payload.Search_Response) + err := c.cc.Invoke(ctx, "/vald.v1.Filter/SearchObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *filterClient) StreamSearchObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamSearchObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Filter_serviceDesc.Streams[0], "/vald.v1.Filter/StreamSearchObject", opts...) + if err != nil { + return nil, err + } + x := &filterStreamSearchObjectClient{stream} + return x, nil +} + +type Filter_StreamSearchObjectClient interface { + Send(*payload.Search_ObjectRequest) error + Recv() (*payload.Search_Response, error) + grpc.ClientStream +} + +type filterStreamSearchObjectClient struct { + grpc.ClientStream +} + +func (x *filterStreamSearchObjectClient) Send(m *payload.Search_ObjectRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *filterStreamSearchObjectClient) Recv() (*payload.Search_Response, error) { + m := new(payload.Search_Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *filterClient) InsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/vald.v1.Filter/InsertObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *filterClient) StreamInsertObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamInsertObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Filter_serviceDesc.Streams[1], "/vald.v1.Filter/StreamInsertObject", opts...) + if err != nil { + return nil, err + } + x := &filterStreamInsertObjectClient{stream} + return x, nil +} + +type Filter_StreamInsertObjectClient interface { + Send(*payload.Object_Blob) error + Recv() (*payload.Object_Location, error) + grpc.ClientStream +} + +type filterStreamInsertObjectClient struct { + grpc.ClientStream +} + +func (x *filterStreamInsertObjectClient) Send(m *payload.Object_Blob) error { + return x.ClientStream.SendMsg(m) +} + +func (x *filterStreamInsertObjectClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *filterClient) MultiInsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) + err := c.cc.Invoke(ctx, "/vald.v1.Filter/MultiInsertObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *filterClient) UpdateObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/vald.v1.Filter/UpdateObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *filterClient) StreamUpdateObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamUpdateObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Filter_serviceDesc.Streams[2], "/vald.v1.Filter/StreamUpdateObject", opts...) + if err != nil { + return nil, err + } + x := &filterStreamUpdateObjectClient{stream} + return x, nil +} + +type Filter_StreamUpdateObjectClient interface { + Send(*payload.Object_Blob) error + Recv() (*payload.Object_Location, error) + grpc.ClientStream +} + +type filterStreamUpdateObjectClient struct { + grpc.ClientStream +} + +func (x *filterStreamUpdateObjectClient) Send(m *payload.Object_Blob) error { + return x.ClientStream.SendMsg(m) +} + +func (x *filterStreamUpdateObjectClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *filterClient) MultiUpdateObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) + err := c.cc.Invoke(ctx, "/vald.v1.Filter/MultiUpdateObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *filterClient) UpsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/vald.v1.Filter/UpsertObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *filterClient) StreamUpsertObject(ctx context.Context, opts ...grpc.CallOption) (Filter_StreamUpsertObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Filter_serviceDesc.Streams[3], "/vald.v1.Filter/StreamUpsertObject", opts...) + if err != nil { + return nil, err + } + x := &filterStreamUpsertObjectClient{stream} + return x, nil +} + +type Filter_StreamUpsertObjectClient interface { + Send(*payload.Object_Blob) error + Recv() (*payload.Object_Location, error) + grpc.ClientStream +} + +type filterStreamUpsertObjectClient struct { + grpc.ClientStream +} + +func (x *filterStreamUpsertObjectClient) Send(m *payload.Object_Blob) error { + return x.ClientStream.SendMsg(m) +} + +func (x *filterStreamUpsertObjectClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *filterClient) MultiUpsertObject(ctx context.Context, in *payload.Object_Blob, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) + err := c.cc.Invoke(ctx, "/vald.v1.Filter/MultiUpsertObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// FilterServer is the server API for Filter service. +type FilterServer interface { + SearchObject(context.Context, *payload.Search_ObjectRequest) (*payload.Search_Response, error) + StreamSearchObject(Filter_StreamSearchObjectServer) error + InsertObject(context.Context, *payload.Object_Blob) (*payload.Object_Location, error) + StreamInsertObject(Filter_StreamInsertObjectServer) error + MultiInsertObject(context.Context, *payload.Object_Blob) (*payload.Object_Locations, error) + UpdateObject(context.Context, *payload.Object_Blob) (*payload.Object_Location, error) + StreamUpdateObject(Filter_StreamUpdateObjectServer) error + MultiUpdateObject(context.Context, *payload.Object_Blob) (*payload.Object_Locations, error) + UpsertObject(context.Context, *payload.Object_Blob) (*payload.Object_Location, error) + StreamUpsertObject(Filter_StreamUpsertObjectServer) error + MultiUpsertObject(context.Context, *payload.Object_Blob) (*payload.Object_Locations, error) +} + +// UnimplementedFilterServer can be embedded to have forward compatible implementations. +type UnimplementedFilterServer struct { +} + +func (*UnimplementedFilterServer) SearchObject(ctx context.Context, req *payload.Search_ObjectRequest) (*payload.Search_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method SearchObject not implemented") +} +func (*UnimplementedFilterServer) StreamSearchObject(srv Filter_StreamSearchObjectServer) error { + return status.Errorf(codes.Unimplemented, "method StreamSearchObject not implemented") +} +func (*UnimplementedFilterServer) InsertObject(ctx context.Context, req *payload.Object_Blob) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method InsertObject not implemented") +} +func (*UnimplementedFilterServer) StreamInsertObject(srv Filter_StreamInsertObjectServer) error { + return status.Errorf(codes.Unimplemented, "method StreamInsertObject not implemented") +} +func (*UnimplementedFilterServer) MultiInsertObject(ctx context.Context, req *payload.Object_Blob) (*payload.Object_Locations, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiInsertObject not implemented") +} +func (*UnimplementedFilterServer) UpdateObject(ctx context.Context, req *payload.Object_Blob) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateObject not implemented") +} +func (*UnimplementedFilterServer) StreamUpdateObject(srv Filter_StreamUpdateObjectServer) error { + return status.Errorf(codes.Unimplemented, "method StreamUpdateObject not implemented") +} +func (*UnimplementedFilterServer) MultiUpdateObject(ctx context.Context, req *payload.Object_Blob) (*payload.Object_Locations, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiUpdateObject not implemented") +} +func (*UnimplementedFilterServer) UpsertObject(ctx context.Context, req *payload.Object_Blob) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpsertObject not implemented") +} +func (*UnimplementedFilterServer) StreamUpsertObject(srv Filter_StreamUpsertObjectServer) error { + return status.Errorf(codes.Unimplemented, "method StreamUpsertObject not implemented") +} +func (*UnimplementedFilterServer) MultiUpsertObject(ctx context.Context, req *payload.Object_Blob) (*payload.Object_Locations, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiUpsertObject not implemented") +} + +func RegisterFilterServer(s *grpc.Server, srv FilterServer) { + s.RegisterService(&_Filter_serviceDesc, srv) +} + +func _Filter_SearchObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Search_ObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FilterServer).SearchObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Filter/SearchObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FilterServer).SearchObject(ctx, req.(*payload.Search_ObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Filter_StreamSearchObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FilterServer).StreamSearchObject(&filterStreamSearchObjectServer{stream}) +} + +type Filter_StreamSearchObjectServer interface { + Send(*payload.Search_Response) error + Recv() (*payload.Search_ObjectRequest, error) + grpc.ServerStream +} + +type filterStreamSearchObjectServer struct { + grpc.ServerStream +} + +func (x *filterStreamSearchObjectServer) Send(m *payload.Search_Response) error { + return x.ServerStream.SendMsg(m) +} + +func (x *filterStreamSearchObjectServer) Recv() (*payload.Search_ObjectRequest, error) { + m := new(payload.Search_ObjectRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Filter_InsertObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Blob) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FilterServer).InsertObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Filter/InsertObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FilterServer).InsertObject(ctx, req.(*payload.Object_Blob)) + } + return interceptor(ctx, in, info, handler) +} + +func _Filter_StreamInsertObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FilterServer).StreamInsertObject(&filterStreamInsertObjectServer{stream}) +} + +type Filter_StreamInsertObjectServer interface { + Send(*payload.Object_Location) error + Recv() (*payload.Object_Blob, error) + grpc.ServerStream +} + +type filterStreamInsertObjectServer struct { + grpc.ServerStream +} + +func (x *filterStreamInsertObjectServer) Send(m *payload.Object_Location) error { + return x.ServerStream.SendMsg(m) +} + +func (x *filterStreamInsertObjectServer) Recv() (*payload.Object_Blob, error) { + m := new(payload.Object_Blob) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Filter_MultiInsertObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Blob) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FilterServer).MultiInsertObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Filter/MultiInsertObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FilterServer).MultiInsertObject(ctx, req.(*payload.Object_Blob)) + } + return interceptor(ctx, in, info, handler) +} + +func _Filter_UpdateObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Blob) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FilterServer).UpdateObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Filter/UpdateObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FilterServer).UpdateObject(ctx, req.(*payload.Object_Blob)) + } + return interceptor(ctx, in, info, handler) +} + +func _Filter_StreamUpdateObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FilterServer).StreamUpdateObject(&filterStreamUpdateObjectServer{stream}) +} + +type Filter_StreamUpdateObjectServer interface { + Send(*payload.Object_Location) error + Recv() (*payload.Object_Blob, error) + grpc.ServerStream +} + +type filterStreamUpdateObjectServer struct { + grpc.ServerStream +} + +func (x *filterStreamUpdateObjectServer) Send(m *payload.Object_Location) error { + return x.ServerStream.SendMsg(m) +} + +func (x *filterStreamUpdateObjectServer) Recv() (*payload.Object_Blob, error) { + m := new(payload.Object_Blob) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Filter_MultiUpdateObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Blob) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FilterServer).MultiUpdateObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Filter/MultiUpdateObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FilterServer).MultiUpdateObject(ctx, req.(*payload.Object_Blob)) + } + return interceptor(ctx, in, info, handler) +} + +func _Filter_UpsertObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Blob) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FilterServer).UpsertObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Filter/UpsertObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FilterServer).UpsertObject(ctx, req.(*payload.Object_Blob)) + } + return interceptor(ctx, in, info, handler) +} + +func _Filter_StreamUpsertObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FilterServer).StreamUpsertObject(&filterStreamUpsertObjectServer{stream}) +} + +type Filter_StreamUpsertObjectServer interface { + Send(*payload.Object_Location) error + Recv() (*payload.Object_Blob, error) + grpc.ServerStream +} + +type filterStreamUpsertObjectServer struct { + grpc.ServerStream +} + +func (x *filterStreamUpsertObjectServer) Send(m *payload.Object_Location) error { + return x.ServerStream.SendMsg(m) +} + +func (x *filterStreamUpsertObjectServer) Recv() (*payload.Object_Blob, error) { + m := new(payload.Object_Blob) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Filter_MultiUpsertObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_Blob) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FilterServer).MultiUpsertObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Filter/MultiUpsertObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FilterServer).MultiUpsertObject(ctx, req.(*payload.Object_Blob)) + } + return interceptor(ctx, in, info, handler) +} + +var _Filter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "vald.v1.Filter", + HandlerType: (*FilterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SearchObject", + Handler: _Filter_SearchObject_Handler, + }, + { + MethodName: "InsertObject", + Handler: _Filter_InsertObject_Handler, + }, + { + MethodName: "MultiInsertObject", + Handler: _Filter_MultiInsertObject_Handler, + }, + { + MethodName: "UpdateObject", + Handler: _Filter_UpdateObject_Handler, + }, + { + MethodName: "MultiUpdateObject", + Handler: _Filter_MultiUpdateObject_Handler, + }, + { + MethodName: "UpsertObject", + Handler: _Filter_UpsertObject_Handler, + }, + { + MethodName: "MultiUpsertObject", + Handler: _Filter_MultiUpsertObject_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamSearchObject", + Handler: _Filter_StreamSearchObject_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamInsertObject", + Handler: _Filter_StreamInsertObject_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamUpdateObject", + Handler: _Filter_StreamUpdateObject_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamUpsertObject", + Handler: _Filter_StreamUpsertObject_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "apis/proto/v1/vald/filter.proto", +} diff --git a/apis/grpc/v1/vald/insert.pb.go b/apis/grpc/v1/vald/insert.pb.go new file mode 100644 index 0000000000..c2cacec61c --- /dev/null +++ b/apis/grpc/v1/vald/insert.pb.go @@ -0,0 +1,250 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package vald + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("apis/proto/v1/vald/insert.proto", fileDescriptor_7c50984be03265a6) } + +var fileDescriptor_7c50984be03265a6 = []byte{ + // 282 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0x2c, 0xc8, 0x2c, + 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x33, 0xd4, 0x2f, 0x4b, 0xcc, 0x49, 0xd1, 0xcf, + 0xcc, 0x2b, 0x4e, 0x2d, 0x2a, 0xd1, 0x03, 0x0b, 0x0a, 0xb1, 0x83, 0x84, 0xf4, 0xca, 0x0c, 0xa5, + 0x94, 0x51, 0x55, 0x16, 0x24, 0x56, 0xe6, 0xe4, 0x27, 0xa6, 0xc0, 0x68, 0x88, 0x6a, 0x29, 0x99, + 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xfd, 0xc4, 0x82, 0x4c, 0xfd, 0xc4, 0xbc, 0xbc, 0xfc, 0x92, + 0xc4, 0x92, 0xcc, 0xfc, 0xbc, 0x62, 0x88, 0xac, 0xd1, 0x3f, 0x46, 0x2e, 0x36, 0x4f, 0xb0, 0xe1, + 0x42, 0xa1, 0x70, 0x96, 0x94, 0x1e, 0xcc, 0x88, 0x32, 0x43, 0x3d, 0x88, 0x98, 0x5e, 0x50, 0x6a, + 0x61, 0x69, 0x6a, 0x71, 0x89, 0x94, 0x34, 0xb2, 0x9c, 0x7f, 0x52, 0x56, 0x6a, 0x72, 0x89, 0x9e, + 0x4f, 0x7e, 0x32, 0xd8, 0x50, 0x25, 0xa1, 0xa6, 0xcb, 0x4f, 0x26, 0x33, 0xf1, 0x28, 0xb1, 0x43, + 0x1d, 0x6c, 0xc5, 0xa8, 0x25, 0xe4, 0xcb, 0xc5, 0x13, 0x5c, 0x52, 0x94, 0x9a, 0x98, 0x4b, 0xa9, + 0xe1, 0x0c, 0x1a, 0x8c, 0x06, 0x8c, 0x42, 0x7e, 0x5c, 0xdc, 0xbe, 0xa5, 0x39, 0x25, 0x99, 0x50, + 0xd3, 0xe4, 0xb1, 0x98, 0x06, 0x96, 0x87, 0x19, 0x29, 0x83, 0xc7, 0xc8, 0x62, 0x25, 0x06, 0xa7, + 0xf8, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x91, 0x4b, 0x2a, + 0xbf, 0x28, 0x5d, 0xaf, 0x2c, 0x25, 0x31, 0xb1, 0x58, 0x0f, 0x1c, 0xc8, 0x89, 0x05, 0x99, 0x20, + 0x7d, 0x20, 0xb6, 0x13, 0x57, 0x58, 0x62, 0x4e, 0x0a, 0xc4, 0x8a, 0x00, 0xc6, 0x28, 0xf5, 0xf4, + 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0x06, 0x48, 0x44, 0x81, 0x63, + 0x24, 0xbd, 0xa8, 0x20, 0x19, 0x16, 0x75, 0x49, 0x6c, 0xe0, 0x80, 0x36, 0x06, 0x04, 0x00, 0x00, + 0xff, 0xff, 0x37, 0xe5, 0xee, 0x88, 0xd7, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// InsertClient is the client API for Insert service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type InsertClient interface { + Insert(ctx context.Context, in *payload.Insert_Request, opts ...grpc.CallOption) (*payload.Object_Location, error) + StreamInsert(ctx context.Context, opts ...grpc.CallOption) (Insert_StreamInsertClient, error) + MultiInsert(ctx context.Context, in *payload.Insert_MultiRequest, opts ...grpc.CallOption) (*payload.Object_Locations, error) +} + +type insertClient struct { + cc *grpc.ClientConn +} + +func NewInsertClient(cc *grpc.ClientConn) InsertClient { + return &insertClient{cc} +} + +func (c *insertClient) Insert(ctx context.Context, in *payload.Insert_Request, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/vald.v1.Insert/Insert", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *insertClient) StreamInsert(ctx context.Context, opts ...grpc.CallOption) (Insert_StreamInsertClient, error) { + stream, err := c.cc.NewStream(ctx, &_Insert_serviceDesc.Streams[0], "/vald.v1.Insert/StreamInsert", opts...) + if err != nil { + return nil, err + } + x := &insertStreamInsertClient{stream} + return x, nil +} + +type Insert_StreamInsertClient interface { + Send(*payload.Insert_Request) error + Recv() (*payload.Object_Location, error) + grpc.ClientStream +} + +type insertStreamInsertClient struct { + grpc.ClientStream +} + +func (x *insertStreamInsertClient) Send(m *payload.Insert_Request) error { + return x.ClientStream.SendMsg(m) +} + +func (x *insertStreamInsertClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *insertClient) MultiInsert(ctx context.Context, in *payload.Insert_MultiRequest, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) + err := c.cc.Invoke(ctx, "/vald.v1.Insert/MultiInsert", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// InsertServer is the server API for Insert service. +type InsertServer interface { + Insert(context.Context, *payload.Insert_Request) (*payload.Object_Location, error) + StreamInsert(Insert_StreamInsertServer) error + MultiInsert(context.Context, *payload.Insert_MultiRequest) (*payload.Object_Locations, error) +} + +// UnimplementedInsertServer can be embedded to have forward compatible implementations. +type UnimplementedInsertServer struct { +} + +func (*UnimplementedInsertServer) Insert(ctx context.Context, req *payload.Insert_Request) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method Insert not implemented") +} +func (*UnimplementedInsertServer) StreamInsert(srv Insert_StreamInsertServer) error { + return status.Errorf(codes.Unimplemented, "method StreamInsert not implemented") +} +func (*UnimplementedInsertServer) MultiInsert(ctx context.Context, req *payload.Insert_MultiRequest) (*payload.Object_Locations, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiInsert not implemented") +} + +func RegisterInsertServer(s *grpc.Server, srv InsertServer) { + s.RegisterService(&_Insert_serviceDesc, srv) +} + +func _Insert_Insert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Insert_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InsertServer).Insert(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Insert/Insert", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InsertServer).Insert(ctx, req.(*payload.Insert_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Insert_StreamInsert_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(InsertServer).StreamInsert(&insertStreamInsertServer{stream}) +} + +type Insert_StreamInsertServer interface { + Send(*payload.Object_Location) error + Recv() (*payload.Insert_Request, error) + grpc.ServerStream +} + +type insertStreamInsertServer struct { + grpc.ServerStream +} + +func (x *insertStreamInsertServer) Send(m *payload.Object_Location) error { + return x.ServerStream.SendMsg(m) +} + +func (x *insertStreamInsertServer) Recv() (*payload.Insert_Request, error) { + m := new(payload.Insert_Request) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Insert_MultiInsert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Insert_MultiRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InsertServer).MultiInsert(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Insert/MultiInsert", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InsertServer).MultiInsert(ctx, req.(*payload.Insert_MultiRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Insert_serviceDesc = grpc.ServiceDesc{ + ServiceName: "vald.v1.Insert", + HandlerType: (*InsertServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Insert", + Handler: _Insert_Insert_Handler, + }, + { + MethodName: "MultiInsert", + Handler: _Insert_MultiInsert_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamInsert", + Handler: _Insert_StreamInsert_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "apis/proto/v1/vald/insert.proto", +} diff --git a/apis/grpc/v1/vald/object.pb.go b/apis/grpc/v1/vald/object.pb.go new file mode 100644 index 0000000000..daaa447054 --- /dev/null +++ b/apis/grpc/v1/vald/object.pb.go @@ -0,0 +1,250 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package vald + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("apis/proto/v1/vald/object.proto", fileDescriptor_f3068a4c11e32302) } + +var fileDescriptor_f3068a4c11e32302 = []byte{ + // 276 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0x2c, 0xc8, 0x2c, + 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x33, 0xd4, 0x2f, 0x4b, 0xcc, 0x49, 0xd1, 0xcf, + 0x4f, 0xca, 0x4a, 0x4d, 0x2e, 0xd1, 0x03, 0x0b, 0x0a, 0xb1, 0x83, 0x84, 0xf4, 0xca, 0x0c, 0xa5, + 0x94, 0x51, 0x55, 0x16, 0x24, 0x56, 0xe6, 0xe4, 0x27, 0xa6, 0xc0, 0x68, 0x88, 0x6a, 0x29, 0x99, + 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xfd, 0xc4, 0x82, 0x4c, 0xfd, 0xc4, 0xbc, 0xbc, 0xfc, 0x92, + 0xc4, 0x92, 0xcc, 0xfc, 0xbc, 0x62, 0x88, 0xac, 0xd1, 0x37, 0x46, 0x2e, 0x36, 0x7f, 0xb0, 0xe1, + 0x42, 0x3e, 0x5c, 0x6c, 0xae, 0x15, 0x99, 0xc5, 0x25, 0xc5, 0x42, 0xa2, 0x7a, 0x30, 0x23, 0xca, + 0x0c, 0xf5, 0x20, 0xb2, 0x7a, 0x9e, 0x2e, 0x52, 0xd8, 0x85, 0x95, 0x44, 0x9a, 0x2e, 0x3f, 0x99, + 0xcc, 0xc4, 0x27, 0xc4, 0xa3, 0x9f, 0x0a, 0xd6, 0xae, 0x5f, 0x9d, 0x99, 0x52, 0x2b, 0x14, 0xcc, + 0xc5, 0xe9, 0x9e, 0x5a, 0x02, 0x35, 0x1a, 0x87, 0x81, 0x92, 0x58, 0x84, 0xc3, 0x52, 0x93, 0x4b, + 0xf2, 0x8b, 0x90, 0x0c, 0x85, 0x78, 0x1d, 0x62, 0xa8, 0x27, 0x17, 0x7f, 0x70, 0x49, 0x51, 0x6a, + 0x62, 0x2e, 0x25, 0x46, 0x33, 0x68, 0x30, 0x1a, 0x30, 0x3a, 0xc5, 0x9f, 0x78, 0x24, 0xc7, 0x78, + 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x5c, 0x52, 0xf9, 0x45, 0xe9, 0x7a, 0x65, 0x29, + 0x89, 0x89, 0xc5, 0x7a, 0xe0, 0xc0, 0x4d, 0x2c, 0xc8, 0x04, 0x69, 0x04, 0xb1, 0x9d, 0xb8, 0xc2, + 0x12, 0x73, 0x52, 0x20, 0x46, 0x04, 0x30, 0x46, 0xa9, 0xa7, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, + 0x25, 0xe7, 0xe7, 0xea, 0x83, 0x35, 0x40, 0x22, 0x08, 0x1c, 0x13, 0xe9, 0x45, 0x05, 0xc9, 0xb0, + 0x28, 0x4b, 0x62, 0x03, 0x07, 0xb0, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xe5, 0x83, 0x50, 0xc6, + 0xcf, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ObjectClient is the client API for Object service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ObjectClient interface { + Exists(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_ID, error) + GetObject(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_Vector, error) + StreamGetObject(ctx context.Context, opts ...grpc.CallOption) (Object_StreamGetObjectClient, error) +} + +type objectClient struct { + cc *grpc.ClientConn +} + +func NewObjectClient(cc *grpc.ClientConn) ObjectClient { + return &objectClient{cc} +} + +func (c *objectClient) Exists(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_ID, error) { + out := new(payload.Object_ID) + err := c.cc.Invoke(ctx, "/vald.v1.Object/Exists", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectClient) GetObject(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_Vector, error) { + out := new(payload.Object_Vector) + err := c.cc.Invoke(ctx, "/vald.v1.Object/GetObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectClient) StreamGetObject(ctx context.Context, opts ...grpc.CallOption) (Object_StreamGetObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Object_serviceDesc.Streams[0], "/vald.v1.Object/StreamGetObject", opts...) + if err != nil { + return nil, err + } + x := &objectStreamGetObjectClient{stream} + return x, nil +} + +type Object_StreamGetObjectClient interface { + Send(*payload.Object_ID) error + Recv() (*payload.Object_Vector, error) + grpc.ClientStream +} + +type objectStreamGetObjectClient struct { + grpc.ClientStream +} + +func (x *objectStreamGetObjectClient) Send(m *payload.Object_ID) error { + return x.ClientStream.SendMsg(m) +} + +func (x *objectStreamGetObjectClient) Recv() (*payload.Object_Vector, error) { + m := new(payload.Object_Vector) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ObjectServer is the server API for Object service. +type ObjectServer interface { + Exists(context.Context, *payload.Object_ID) (*payload.Object_ID, error) + GetObject(context.Context, *payload.Object_ID) (*payload.Object_Vector, error) + StreamGetObject(Object_StreamGetObjectServer) error +} + +// UnimplementedObjectServer can be embedded to have forward compatible implementations. +type UnimplementedObjectServer struct { +} + +func (*UnimplementedObjectServer) Exists(ctx context.Context, req *payload.Object_ID) (*payload.Object_ID, error) { + return nil, status.Errorf(codes.Unimplemented, "method Exists not implemented") +} +func (*UnimplementedObjectServer) GetObject(ctx context.Context, req *payload.Object_ID) (*payload.Object_Vector, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetObject not implemented") +} +func (*UnimplementedObjectServer) StreamGetObject(srv Object_StreamGetObjectServer) error { + return status.Errorf(codes.Unimplemented, "method StreamGetObject not implemented") +} + +func RegisterObjectServer(s *grpc.Server, srv ObjectServer) { + s.RegisterService(&_Object_serviceDesc, srv) +} + +func _Object_Exists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_ID) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectServer).Exists(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Object/Exists", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectServer).Exists(ctx, req.(*payload.Object_ID)) + } + return interceptor(ctx, in, info, handler) +} + +func _Object_GetObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Object_ID) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectServer).GetObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Object/GetObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectServer).GetObject(ctx, req.(*payload.Object_ID)) + } + return interceptor(ctx, in, info, handler) +} + +func _Object_StreamGetObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ObjectServer).StreamGetObject(&objectStreamGetObjectServer{stream}) +} + +type Object_StreamGetObjectServer interface { + Send(*payload.Object_Vector) error + Recv() (*payload.Object_ID, error) + grpc.ServerStream +} + +type objectStreamGetObjectServer struct { + grpc.ServerStream +} + +func (x *objectStreamGetObjectServer) Send(m *payload.Object_Vector) error { + return x.ServerStream.SendMsg(m) +} + +func (x *objectStreamGetObjectServer) Recv() (*payload.Object_ID, error) { + m := new(payload.Object_ID) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Object_serviceDesc = grpc.ServiceDesc{ + ServiceName: "vald.v1.Object", + HandlerType: (*ObjectServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Exists", + Handler: _Object_Exists_Handler, + }, + { + MethodName: "GetObject", + Handler: _Object_GetObject_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamGetObject", + Handler: _Object_StreamGetObject_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "apis/proto/v1/vald/object.proto", +} diff --git a/apis/grpc/v1/vald/remove.pb.go b/apis/grpc/v1/vald/remove.pb.go new file mode 100644 index 0000000000..6f059e05f8 --- /dev/null +++ b/apis/grpc/v1/vald/remove.pb.go @@ -0,0 +1,249 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package vald + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("apis/proto/v1/vald/remove.proto", fileDescriptor_5b638f34e0c25c81) } + +var fileDescriptor_5b638f34e0c25c81 = []byte{ + // 263 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x91, 0xc1, 0x4a, 0xc4, 0x30, + 0x10, 0x86, 0x37, 0x97, 0x15, 0xa2, 0xa7, 0x1e, 0xeb, 0xd2, 0x85, 0xf5, 0xa0, 0xa7, 0xc4, 0xea, + 0x1b, 0xac, 0x57, 0x57, 0x65, 0x05, 0x0f, 0x5e, 0x64, 0xda, 0x86, 0x18, 0x49, 0x3b, 0x31, 0x49, + 0x03, 0xbe, 0xa1, 0x47, 0x1f, 0x41, 0x7a, 0xf6, 0x21, 0xa4, 0xc9, 0x56, 0x14, 0xc4, 0x8b, 0xa7, + 0x0c, 0xff, 0x3f, 0xf3, 0x05, 0xfe, 0x9f, 0x2e, 0xc1, 0x28, 0xc7, 0x8d, 0x45, 0x8f, 0x3c, 0x94, + 0x3c, 0x80, 0x6e, 0xb8, 0x15, 0x2d, 0x06, 0xc1, 0xa2, 0x98, 0xed, 0x8d, 0x12, 0x0b, 0x65, 0x7e, + 0xf4, 0x73, 0xd3, 0xc0, 0x8b, 0x46, 0x68, 0xa6, 0x37, 0x6d, 0xe7, 0x0b, 0x89, 0x28, 0xb5, 0xe0, + 0x60, 0x14, 0x87, 0xae, 0x43, 0x0f, 0x5e, 0x61, 0xe7, 0x92, 0x7b, 0xf6, 0x41, 0xe8, 0x7c, 0x1b, + 0xe1, 0xd9, 0xc5, 0xd7, 0x94, 0xb3, 0x09, 0x11, 0x4a, 0x96, 0x34, 0xb6, 0x15, 0xcf, 0xbd, 0x70, + 0x3e, 0x3f, 0xfc, 0xee, 0x5d, 0x57, 0x4f, 0xa2, 0xf6, 0xec, 0x12, 0xeb, 0x08, 0x5d, 0xcd, 0xb2, + 0x0d, 0x3d, 0xb8, 0xf5, 0x56, 0x40, 0xfb, 0x6f, 0xd4, 0x09, 0x39, 0x25, 0xd9, 0x15, 0xdd, 0xdf, + 0xf4, 0xda, 0xab, 0x1d, 0x6d, 0xf9, 0x0b, 0x6d, 0xe7, 0x27, 0xe4, 0xe2, 0x0f, 0xa4, 0x5b, 0xcd, + 0xd6, 0x0f, 0xaf, 0x43, 0x41, 0xde, 0x86, 0x82, 0xbc, 0x0f, 0x05, 0xa1, 0x39, 0x5a, 0xc9, 0x42, + 0x03, 0xe0, 0x58, 0x8c, 0x14, 0x8c, 0x1a, 0xef, 0xc6, 0x79, 0x4d, 0xef, 0x40, 0x37, 0xe9, 0x8b, + 0x1b, 0x72, 0x7f, 0x2c, 0x95, 0x7f, 0xec, 0x2b, 0x56, 0x63, 0xcb, 0xe3, 0x41, 0xaa, 0x25, 0xe6, + 0x2f, 0xad, 0xa9, 0xa7, 0xa2, 0xaa, 0x79, 0x8c, 0xf5, 0xfc, 0x33, 0x00, 0x00, 0xff, 0xff, 0x91, + 0xfa, 0x18, 0x91, 0xc5, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// RemoveClient is the client API for Remove service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type RemoveClient interface { + Remove(ctx context.Context, in *payload.Remove_Request, opts ...grpc.CallOption) (*payload.Object_Location, error) + StreamRemove(ctx context.Context, opts ...grpc.CallOption) (Remove_StreamRemoveClient, error) + MultiRemove(ctx context.Context, in *payload.Remove_MultiRequest, opts ...grpc.CallOption) (*payload.Object_Locations, error) +} + +type removeClient struct { + cc *grpc.ClientConn +} + +func NewRemoveClient(cc *grpc.ClientConn) RemoveClient { + return &removeClient{cc} +} + +func (c *removeClient) Remove(ctx context.Context, in *payload.Remove_Request, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/vald.v1.Remove/Remove", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *removeClient) StreamRemove(ctx context.Context, opts ...grpc.CallOption) (Remove_StreamRemoveClient, error) { + stream, err := c.cc.NewStream(ctx, &_Remove_serviceDesc.Streams[0], "/vald.v1.Remove/StreamRemove", opts...) + if err != nil { + return nil, err + } + x := &removeStreamRemoveClient{stream} + return x, nil +} + +type Remove_StreamRemoveClient interface { + Send(*payload.Remove_Request) error + Recv() (*payload.Object_Location, error) + grpc.ClientStream +} + +type removeStreamRemoveClient struct { + grpc.ClientStream +} + +func (x *removeStreamRemoveClient) Send(m *payload.Remove_Request) error { + return x.ClientStream.SendMsg(m) +} + +func (x *removeStreamRemoveClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *removeClient) MultiRemove(ctx context.Context, in *payload.Remove_MultiRequest, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) + err := c.cc.Invoke(ctx, "/vald.v1.Remove/MultiRemove", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RemoveServer is the server API for Remove service. +type RemoveServer interface { + Remove(context.Context, *payload.Remove_Request) (*payload.Object_Location, error) + StreamRemove(Remove_StreamRemoveServer) error + MultiRemove(context.Context, *payload.Remove_MultiRequest) (*payload.Object_Locations, error) +} + +// UnimplementedRemoveServer can be embedded to have forward compatible implementations. +type UnimplementedRemoveServer struct { +} + +func (*UnimplementedRemoveServer) Remove(ctx context.Context, req *payload.Remove_Request) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method Remove not implemented") +} +func (*UnimplementedRemoveServer) StreamRemove(srv Remove_StreamRemoveServer) error { + return status.Errorf(codes.Unimplemented, "method StreamRemove not implemented") +} +func (*UnimplementedRemoveServer) MultiRemove(ctx context.Context, req *payload.Remove_MultiRequest) (*payload.Object_Locations, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiRemove not implemented") +} + +func RegisterRemoveServer(s *grpc.Server, srv RemoveServer) { + s.RegisterService(&_Remove_serviceDesc, srv) +} + +func _Remove_Remove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Remove_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RemoveServer).Remove(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Remove/Remove", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RemoveServer).Remove(ctx, req.(*payload.Remove_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Remove_StreamRemove_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RemoveServer).StreamRemove(&removeStreamRemoveServer{stream}) +} + +type Remove_StreamRemoveServer interface { + Send(*payload.Object_Location) error + Recv() (*payload.Remove_Request, error) + grpc.ServerStream +} + +type removeStreamRemoveServer struct { + grpc.ServerStream +} + +func (x *removeStreamRemoveServer) Send(m *payload.Object_Location) error { + return x.ServerStream.SendMsg(m) +} + +func (x *removeStreamRemoveServer) Recv() (*payload.Remove_Request, error) { + m := new(payload.Remove_Request) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Remove_MultiRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Remove_MultiRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RemoveServer).MultiRemove(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Remove/MultiRemove", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RemoveServer).MultiRemove(ctx, req.(*payload.Remove_MultiRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Remove_serviceDesc = grpc.ServiceDesc{ + ServiceName: "vald.v1.Remove", + HandlerType: (*RemoveServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Remove", + Handler: _Remove_Remove_Handler, + }, + { + MethodName: "MultiRemove", + Handler: _Remove_MultiRemove_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamRemove", + Handler: _Remove_StreamRemove_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "apis/proto/v1/vald/remove.proto", +} diff --git a/apis/grpc/v1/vald/search.pb.go b/apis/grpc/v1/vald/search.pb.go new file mode 100644 index 0000000000..1903cea18a --- /dev/null +++ b/apis/grpc/v1/vald/search.pb.go @@ -0,0 +1,393 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package vald + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("apis/proto/v1/vald/search.proto", fileDescriptor_f8168beed818734d) } + +var fileDescriptor_f8168beed818734d = []byte{ + // 334 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0x3f, 0x4b, 0x3b, 0x31, + 0x18, 0xc7, 0x9b, 0x1f, 0x3f, 0x5a, 0x88, 0x85, 0x4a, 0xc0, 0xe5, 0x2c, 0x2d, 0x9e, 0x83, 0xe2, + 0x90, 0x58, 0xdd, 0x1c, 0x4b, 0x97, 0x0e, 0x15, 0xb1, 0xea, 0xe0, 0xa0, 0x3c, 0xed, 0x85, 0x6b, + 0xe0, 0x7a, 0x89, 0x97, 0xf4, 0xa0, 0xab, 0x9b, 0xb3, 0x6f, 0xca, 0x51, 0xf0, 0x0d, 0xc8, 0xe1, + 0x0b, 0x91, 0x4b, 0xee, 0xb4, 0xc2, 0xf9, 0x07, 0x3a, 0x25, 0x3c, 0xcf, 0xf3, 0xfd, 0xf0, 0x19, + 0xbe, 0xb8, 0x0b, 0x4a, 0x68, 0xa6, 0x12, 0x69, 0x24, 0x4b, 0x7b, 0x2c, 0x85, 0x28, 0x60, 0x9a, + 0x43, 0x32, 0x9d, 0x51, 0x3b, 0x24, 0x8d, 0x7c, 0x44, 0xd3, 0x9e, 0xb7, 0xfb, 0xf5, 0x52, 0xc1, + 0x32, 0x92, 0x10, 0x94, 0xaf, 0xbb, 0xf6, 0xda, 0xa1, 0x94, 0x61, 0xc4, 0x19, 0x28, 0xc1, 0x20, + 0x8e, 0xa5, 0x01, 0x23, 0x64, 0xac, 0xdd, 0xf6, 0xe8, 0xe1, 0x3f, 0xae, 0x8f, 0x2d, 0x9c, 0x5c, + 0x7e, 0xfc, 0x3c, 0x5a, 0x22, 0xd2, 0x1e, 0x75, 0x33, 0x7a, 0xce, 0xef, 0x16, 0x5c, 0x1b, 0x6f, + 0xbb, 0x72, 0xa7, 0x95, 0x8c, 0x35, 0xf7, 0xc9, 0xfd, 0xcb, 0xdb, 0xe3, 0xbf, 0xa6, 0xdf, 0x28, + 0x84, 0x4f, 0xd0, 0x01, 0xb9, 0xc1, 0xd8, 0x9d, 0xf5, 0x97, 0xc3, 0x01, 0x69, 0x57, 0xc4, 0x87, + 0x83, 0x3f, 0xc1, 0xb7, 0x2c, 0xbc, 0xe5, 0xe3, 0x02, 0xce, 0x44, 0x90, 0xf3, 0x47, 0xb8, 0x39, + 0x36, 0x09, 0x87, 0xf9, 0xba, 0xf2, 0xb5, 0x7d, 0x74, 0x88, 0xc8, 0x18, 0x6f, 0xae, 0xe2, 0xd6, + 0x95, 0x76, 0xd0, 0x53, 0xbc, 0x31, 0x5a, 0x44, 0x46, 0x14, 0x8a, 0xdd, 0x8a, 0x84, 0xdd, 0x97, + 0xc8, 0xf6, 0x0f, 0x48, 0xed, 0xd7, 0xc8, 0x05, 0x6e, 0xad, 0xf0, 0xac, 0xe3, 0xce, 0x77, 0xcc, + 0x4f, 0xd1, 0x5f, 0xa8, 0xfd, 0xdb, 0xa7, 0xac, 0x83, 0x9e, 0xb3, 0x0e, 0x7a, 0xcd, 0x3a, 0x08, + 0x7b, 0x32, 0x09, 0x69, 0x1a, 0x00, 0x68, 0x6a, 0xfb, 0x06, 0x4a, 0xe4, 0xb9, 0xfc, 0xdf, 0xc7, + 0x57, 0x10, 0x05, 0x8e, 0x70, 0x86, 0xae, 0xf7, 0x42, 0x61, 0x66, 0x8b, 0x09, 0x9d, 0xca, 0x39, + 0xb3, 0x01, 0xd7, 0x59, 0x5b, 0xce, 0x30, 0x51, 0xd3, 0xb2, 0xc5, 0x93, 0xba, 0xed, 0xdc, 0xf1, + 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0f, 0x51, 0xed, 0xb5, 0xe2, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// SearchClient is the client API for Search service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SearchClient interface { + Search(ctx context.Context, in *payload.Search_Request, opts ...grpc.CallOption) (*payload.Search_Response, error) + SearchByID(ctx context.Context, in *payload.Search_IDRequest, opts ...grpc.CallOption) (*payload.Search_Response, error) + StreamSearch(ctx context.Context, opts ...grpc.CallOption) (Search_StreamSearchClient, error) + StreamSearchByID(ctx context.Context, opts ...grpc.CallOption) (Search_StreamSearchByIDClient, error) + MultiSearch(ctx context.Context, in *payload.Search_MultiRequest, opts ...grpc.CallOption) (*payload.Search_Responses, error) + MultiSearchByID(ctx context.Context, in *payload.Search_MultiIDRequest, opts ...grpc.CallOption) (*payload.Search_Responses, error) +} + +type searchClient struct { + cc *grpc.ClientConn +} + +func NewSearchClient(cc *grpc.ClientConn) SearchClient { + return &searchClient{cc} +} + +func (c *searchClient) Search(ctx context.Context, in *payload.Search_Request, opts ...grpc.CallOption) (*payload.Search_Response, error) { + out := new(payload.Search_Response) + err := c.cc.Invoke(ctx, "/vald.v1.Search/Search", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *searchClient) SearchByID(ctx context.Context, in *payload.Search_IDRequest, opts ...grpc.CallOption) (*payload.Search_Response, error) { + out := new(payload.Search_Response) + err := c.cc.Invoke(ctx, "/vald.v1.Search/SearchByID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *searchClient) StreamSearch(ctx context.Context, opts ...grpc.CallOption) (Search_StreamSearchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Search_serviceDesc.Streams[0], "/vald.v1.Search/StreamSearch", opts...) + if err != nil { + return nil, err + } + x := &searchStreamSearchClient{stream} + return x, nil +} + +type Search_StreamSearchClient interface { + Send(*payload.Search_Request) error + Recv() (*payload.Search_Response, error) + grpc.ClientStream +} + +type searchStreamSearchClient struct { + grpc.ClientStream +} + +func (x *searchStreamSearchClient) Send(m *payload.Search_Request) error { + return x.ClientStream.SendMsg(m) +} + +func (x *searchStreamSearchClient) Recv() (*payload.Search_Response, error) { + m := new(payload.Search_Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *searchClient) StreamSearchByID(ctx context.Context, opts ...grpc.CallOption) (Search_StreamSearchByIDClient, error) { + stream, err := c.cc.NewStream(ctx, &_Search_serviceDesc.Streams[1], "/vald.v1.Search/StreamSearchByID", opts...) + if err != nil { + return nil, err + } + x := &searchStreamSearchByIDClient{stream} + return x, nil +} + +type Search_StreamSearchByIDClient interface { + Send(*payload.Search_IDRequest) error + Recv() (*payload.Search_Response, error) + grpc.ClientStream +} + +type searchStreamSearchByIDClient struct { + grpc.ClientStream +} + +func (x *searchStreamSearchByIDClient) Send(m *payload.Search_IDRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *searchStreamSearchByIDClient) Recv() (*payload.Search_Response, error) { + m := new(payload.Search_Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *searchClient) MultiSearch(ctx context.Context, in *payload.Search_MultiRequest, opts ...grpc.CallOption) (*payload.Search_Responses, error) { + out := new(payload.Search_Responses) + err := c.cc.Invoke(ctx, "/vald.v1.Search/MultiSearch", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *searchClient) MultiSearchByID(ctx context.Context, in *payload.Search_MultiIDRequest, opts ...grpc.CallOption) (*payload.Search_Responses, error) { + out := new(payload.Search_Responses) + err := c.cc.Invoke(ctx, "/vald.v1.Search/MultiSearchByID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SearchServer is the server API for Search service. +type SearchServer interface { + Search(context.Context, *payload.Search_Request) (*payload.Search_Response, error) + SearchByID(context.Context, *payload.Search_IDRequest) (*payload.Search_Response, error) + StreamSearch(Search_StreamSearchServer) error + StreamSearchByID(Search_StreamSearchByIDServer) error + MultiSearch(context.Context, *payload.Search_MultiRequest) (*payload.Search_Responses, error) + MultiSearchByID(context.Context, *payload.Search_MultiIDRequest) (*payload.Search_Responses, error) +} + +// UnimplementedSearchServer can be embedded to have forward compatible implementations. +type UnimplementedSearchServer struct { +} + +func (*UnimplementedSearchServer) Search(ctx context.Context, req *payload.Search_Request) (*payload.Search_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") +} +func (*UnimplementedSearchServer) SearchByID(ctx context.Context, req *payload.Search_IDRequest) (*payload.Search_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method SearchByID not implemented") +} +func (*UnimplementedSearchServer) StreamSearch(srv Search_StreamSearchServer) error { + return status.Errorf(codes.Unimplemented, "method StreamSearch not implemented") +} +func (*UnimplementedSearchServer) StreamSearchByID(srv Search_StreamSearchByIDServer) error { + return status.Errorf(codes.Unimplemented, "method StreamSearchByID not implemented") +} +func (*UnimplementedSearchServer) MultiSearch(ctx context.Context, req *payload.Search_MultiRequest) (*payload.Search_Responses, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiSearch not implemented") +} +func (*UnimplementedSearchServer) MultiSearchByID(ctx context.Context, req *payload.Search_MultiIDRequest) (*payload.Search_Responses, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiSearchByID not implemented") +} + +func RegisterSearchServer(s *grpc.Server, srv SearchServer) { + s.RegisterService(&_Search_serviceDesc, srv) +} + +func _Search_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Search_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SearchServer).Search(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Search/Search", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SearchServer).Search(ctx, req.(*payload.Search_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Search_SearchByID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Search_IDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SearchServer).SearchByID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Search/SearchByID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SearchServer).SearchByID(ctx, req.(*payload.Search_IDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Search_StreamSearch_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SearchServer).StreamSearch(&searchStreamSearchServer{stream}) +} + +type Search_StreamSearchServer interface { + Send(*payload.Search_Response) error + Recv() (*payload.Search_Request, error) + grpc.ServerStream +} + +type searchStreamSearchServer struct { + grpc.ServerStream +} + +func (x *searchStreamSearchServer) Send(m *payload.Search_Response) error { + return x.ServerStream.SendMsg(m) +} + +func (x *searchStreamSearchServer) Recv() (*payload.Search_Request, error) { + m := new(payload.Search_Request) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Search_StreamSearchByID_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SearchServer).StreamSearchByID(&searchStreamSearchByIDServer{stream}) +} + +type Search_StreamSearchByIDServer interface { + Send(*payload.Search_Response) error + Recv() (*payload.Search_IDRequest, error) + grpc.ServerStream +} + +type searchStreamSearchByIDServer struct { + grpc.ServerStream +} + +func (x *searchStreamSearchByIDServer) Send(m *payload.Search_Response) error { + return x.ServerStream.SendMsg(m) +} + +func (x *searchStreamSearchByIDServer) Recv() (*payload.Search_IDRequest, error) { + m := new(payload.Search_IDRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Search_MultiSearch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Search_MultiRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SearchServer).MultiSearch(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Search/MultiSearch", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SearchServer).MultiSearch(ctx, req.(*payload.Search_MultiRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Search_MultiSearchByID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Search_MultiIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SearchServer).MultiSearchByID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Search/MultiSearchByID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SearchServer).MultiSearchByID(ctx, req.(*payload.Search_MultiIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Search_serviceDesc = grpc.ServiceDesc{ + ServiceName: "vald.v1.Search", + HandlerType: (*SearchServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Search", + Handler: _Search_Search_Handler, + }, + { + MethodName: "SearchByID", + Handler: _Search_SearchByID_Handler, + }, + { + MethodName: "MultiSearch", + Handler: _Search_MultiSearch_Handler, + }, + { + MethodName: "MultiSearchByID", + Handler: _Search_MultiSearchByID_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamSearch", + Handler: _Search_StreamSearch_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamSearchByID", + Handler: _Search_StreamSearchByID_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "apis/proto/v1/vald/search.proto", +} diff --git a/apis/grpc/v1/vald/update.pb.go b/apis/grpc/v1/vald/update.pb.go new file mode 100644 index 0000000000..dee73a2181 --- /dev/null +++ b/apis/grpc/v1/vald/update.pb.go @@ -0,0 +1,250 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package vald + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("apis/proto/v1/vald/update.proto", fileDescriptor_a564bbf4b2600403) } + +var fileDescriptor_a564bbf4b2600403 = []byte{ + // 282 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0x2c, 0xc8, 0x2c, + 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x33, 0xd4, 0x2f, 0x4b, 0xcc, 0x49, 0xd1, 0x2f, + 0x2d, 0x48, 0x49, 0x2c, 0x49, 0xd5, 0x03, 0x0b, 0x0a, 0xb1, 0x83, 0x84, 0xf4, 0xca, 0x0c, 0xa5, + 0x94, 0x51, 0x55, 0x16, 0x24, 0x56, 0xe6, 0xe4, 0x27, 0xa6, 0xc0, 0x68, 0x88, 0x6a, 0x29, 0x99, + 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xfd, 0xc4, 0x82, 0x4c, 0xfd, 0xc4, 0xbc, 0xbc, 0xfc, 0x92, + 0xc4, 0x92, 0xcc, 0xfc, 0xbc, 0x62, 0x88, 0xac, 0xd1, 0x3f, 0x46, 0x2e, 0xb6, 0x50, 0xb0, 0xe1, + 0x42, 0xa1, 0x70, 0x96, 0x94, 0x1e, 0xcc, 0x88, 0x32, 0x43, 0x3d, 0x88, 0x98, 0x5e, 0x50, 0x6a, + 0x61, 0x69, 0x6a, 0x71, 0x89, 0x94, 0x34, 0xb2, 0x9c, 0x7f, 0x52, 0x56, 0x6a, 0x72, 0x89, 0x9e, + 0x4f, 0x7e, 0x32, 0xd8, 0x50, 0x25, 0xa1, 0xa6, 0xcb, 0x4f, 0x26, 0x33, 0xf1, 0x28, 0xb1, 0x43, + 0x1d, 0x6c, 0xc5, 0xa8, 0x25, 0xe4, 0xcb, 0xc5, 0x13, 0x5c, 0x52, 0x94, 0x9a, 0x98, 0x4b, 0xa9, + 0xe1, 0x0c, 0x1a, 0x8c, 0x06, 0x8c, 0x42, 0x7e, 0x5c, 0xdc, 0xbe, 0xa5, 0x39, 0x25, 0x99, 0x50, + 0xd3, 0xe4, 0xb1, 0x98, 0x06, 0x96, 0x87, 0x19, 0x29, 0x83, 0xc7, 0xc8, 0x62, 0x25, 0x06, 0xa7, + 0xf8, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x91, 0x4b, 0x2a, + 0xbf, 0x28, 0x5d, 0xaf, 0x2c, 0x25, 0x31, 0xb1, 0x58, 0x0f, 0x1c, 0xc8, 0x89, 0x05, 0x99, 0x20, + 0x7d, 0x20, 0xb6, 0x13, 0x57, 0x58, 0x62, 0x4e, 0x0a, 0xc4, 0x8a, 0x00, 0xc6, 0x28, 0xf5, 0xf4, + 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0x06, 0x48, 0x44, 0x81, 0x63, + 0x24, 0xbd, 0xa8, 0x20, 0x19, 0x16, 0x75, 0x49, 0x6c, 0xe0, 0x80, 0x36, 0x06, 0x04, 0x00, 0x00, + 0xff, 0xff, 0x1e, 0x05, 0x61, 0x3d, 0xd7, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// UpdateClient is the client API for Update service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UpdateClient interface { + Update(ctx context.Context, in *payload.Update_Request, opts ...grpc.CallOption) (*payload.Object_Location, error) + StreamUpdate(ctx context.Context, opts ...grpc.CallOption) (Update_StreamUpdateClient, error) + MultiUpdate(ctx context.Context, in *payload.Update_MultiRequest, opts ...grpc.CallOption) (*payload.Object_Locations, error) +} + +type updateClient struct { + cc *grpc.ClientConn +} + +func NewUpdateClient(cc *grpc.ClientConn) UpdateClient { + return &updateClient{cc} +} + +func (c *updateClient) Update(ctx context.Context, in *payload.Update_Request, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/vald.v1.Update/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *updateClient) StreamUpdate(ctx context.Context, opts ...grpc.CallOption) (Update_StreamUpdateClient, error) { + stream, err := c.cc.NewStream(ctx, &_Update_serviceDesc.Streams[0], "/vald.v1.Update/StreamUpdate", opts...) + if err != nil { + return nil, err + } + x := &updateStreamUpdateClient{stream} + return x, nil +} + +type Update_StreamUpdateClient interface { + Send(*payload.Update_Request) error + Recv() (*payload.Object_Location, error) + grpc.ClientStream +} + +type updateStreamUpdateClient struct { + grpc.ClientStream +} + +func (x *updateStreamUpdateClient) Send(m *payload.Update_Request) error { + return x.ClientStream.SendMsg(m) +} + +func (x *updateStreamUpdateClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *updateClient) MultiUpdate(ctx context.Context, in *payload.Update_MultiRequest, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) + err := c.cc.Invoke(ctx, "/vald.v1.Update/MultiUpdate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UpdateServer is the server API for Update service. +type UpdateServer interface { + Update(context.Context, *payload.Update_Request) (*payload.Object_Location, error) + StreamUpdate(Update_StreamUpdateServer) error + MultiUpdate(context.Context, *payload.Update_MultiRequest) (*payload.Object_Locations, error) +} + +// UnimplementedUpdateServer can be embedded to have forward compatible implementations. +type UnimplementedUpdateServer struct { +} + +func (*UnimplementedUpdateServer) Update(ctx context.Context, req *payload.Update_Request) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (*UnimplementedUpdateServer) StreamUpdate(srv Update_StreamUpdateServer) error { + return status.Errorf(codes.Unimplemented, "method StreamUpdate not implemented") +} +func (*UnimplementedUpdateServer) MultiUpdate(ctx context.Context, req *payload.Update_MultiRequest) (*payload.Object_Locations, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiUpdate not implemented") +} + +func RegisterUpdateServer(s *grpc.Server, srv UpdateServer) { + s.RegisterService(&_Update_serviceDesc, srv) +} + +func _Update_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Update_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UpdateServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Update/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UpdateServer).Update(ctx, req.(*payload.Update_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Update_StreamUpdate_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(UpdateServer).StreamUpdate(&updateStreamUpdateServer{stream}) +} + +type Update_StreamUpdateServer interface { + Send(*payload.Object_Location) error + Recv() (*payload.Update_Request, error) + grpc.ServerStream +} + +type updateStreamUpdateServer struct { + grpc.ServerStream +} + +func (x *updateStreamUpdateServer) Send(m *payload.Object_Location) error { + return x.ServerStream.SendMsg(m) +} + +func (x *updateStreamUpdateServer) Recv() (*payload.Update_Request, error) { + m := new(payload.Update_Request) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Update_MultiUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Update_MultiRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UpdateServer).MultiUpdate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Update/MultiUpdate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UpdateServer).MultiUpdate(ctx, req.(*payload.Update_MultiRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Update_serviceDesc = grpc.ServiceDesc{ + ServiceName: "vald.v1.Update", + HandlerType: (*UpdateServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Update", + Handler: _Update_Update_Handler, + }, + { + MethodName: "MultiUpdate", + Handler: _Update_MultiUpdate_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamUpdate", + Handler: _Update_StreamUpdate_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "apis/proto/v1/vald/update.proto", +} diff --git a/apis/grpc/v1/vald/upsert.pb.go b/apis/grpc/v1/vald/upsert.pb.go new file mode 100644 index 0000000000..ee4601004c --- /dev/null +++ b/apis/grpc/v1/vald/upsert.pb.go @@ -0,0 +1,250 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package vald + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + payload "github.com/vdaas/vald/apis/grpc/v1/payload" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("apis/proto/v1/vald/upsert.proto", fileDescriptor_792e000853e2404f) } + +var fileDescriptor_792e000853e2404f = []byte{ + // 282 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0x2c, 0xc8, 0x2c, + 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x33, 0xd4, 0x2f, 0x4b, 0xcc, 0x49, 0xd1, 0x2f, + 0x2d, 0x28, 0x4e, 0x2d, 0x2a, 0xd1, 0x03, 0x0b, 0x0a, 0xb1, 0x83, 0x84, 0xf4, 0xca, 0x0c, 0xa5, + 0x94, 0x51, 0x55, 0x16, 0x24, 0x56, 0xe6, 0xe4, 0x27, 0xa6, 0xc0, 0x68, 0x88, 0x6a, 0x29, 0x99, + 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xfd, 0xc4, 0x82, 0x4c, 0xfd, 0xc4, 0xbc, 0xbc, 0xfc, 0x92, + 0xc4, 0x92, 0xcc, 0xfc, 0xbc, 0x62, 0x88, 0xac, 0xd1, 0x3f, 0x46, 0x2e, 0xb6, 0x50, 0xb0, 0xe1, + 0x42, 0xa1, 0x70, 0x96, 0x94, 0x1e, 0xcc, 0x88, 0x32, 0x43, 0x3d, 0x88, 0x98, 0x5e, 0x50, 0x6a, + 0x61, 0x69, 0x6a, 0x71, 0x89, 0x94, 0x34, 0xb2, 0x9c, 0x7f, 0x52, 0x56, 0x6a, 0x72, 0x89, 0x9e, + 0x4f, 0x7e, 0x32, 0xd8, 0x50, 0x25, 0xa1, 0xa6, 0xcb, 0x4f, 0x26, 0x33, 0xf1, 0x28, 0xb1, 0x43, + 0x1d, 0x6c, 0xc5, 0xa8, 0x25, 0xe4, 0xcb, 0xc5, 0x13, 0x5c, 0x52, 0x94, 0x9a, 0x98, 0x4b, 0xa9, + 0xe1, 0x0c, 0x1a, 0x8c, 0x06, 0x8c, 0x42, 0x7e, 0x5c, 0xdc, 0xbe, 0xa5, 0x39, 0x25, 0x99, 0x50, + 0xd3, 0xe4, 0xb1, 0x98, 0x06, 0x96, 0x87, 0x19, 0x29, 0x83, 0xc7, 0xc8, 0x62, 0x25, 0x06, 0xa7, + 0xf8, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x91, 0x4b, 0x2a, + 0xbf, 0x28, 0x5d, 0xaf, 0x2c, 0x25, 0x31, 0xb1, 0x58, 0x0f, 0x1c, 0xc8, 0x89, 0x05, 0x99, 0x20, + 0x7d, 0x20, 0xb6, 0x13, 0x57, 0x58, 0x62, 0x4e, 0x0a, 0xc4, 0x8a, 0x00, 0xc6, 0x28, 0xf5, 0xf4, + 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0x06, 0x48, 0x44, 0x81, 0x63, + 0x24, 0xbd, 0xa8, 0x20, 0x19, 0x16, 0x75, 0x49, 0x6c, 0xe0, 0x80, 0x36, 0x06, 0x04, 0x00, 0x00, + 0xff, 0xff, 0x61, 0x18, 0x87, 0xdc, 0xd7, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// UpsertClient is the client API for Upsert service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UpsertClient interface { + Upsert(ctx context.Context, in *payload.Upsert_Request, opts ...grpc.CallOption) (*payload.Object_Location, error) + StreamUpsert(ctx context.Context, opts ...grpc.CallOption) (Upsert_StreamUpsertClient, error) + MultiUpsert(ctx context.Context, in *payload.Upsert_MultiRequest, opts ...grpc.CallOption) (*payload.Object_Locations, error) +} + +type upsertClient struct { + cc *grpc.ClientConn +} + +func NewUpsertClient(cc *grpc.ClientConn) UpsertClient { + return &upsertClient{cc} +} + +func (c *upsertClient) Upsert(ctx context.Context, in *payload.Upsert_Request, opts ...grpc.CallOption) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/vald.v1.Upsert/Upsert", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *upsertClient) StreamUpsert(ctx context.Context, opts ...grpc.CallOption) (Upsert_StreamUpsertClient, error) { + stream, err := c.cc.NewStream(ctx, &_Upsert_serviceDesc.Streams[0], "/vald.v1.Upsert/StreamUpsert", opts...) + if err != nil { + return nil, err + } + x := &upsertStreamUpsertClient{stream} + return x, nil +} + +type Upsert_StreamUpsertClient interface { + Send(*payload.Upsert_Request) error + Recv() (*payload.Object_Location, error) + grpc.ClientStream +} + +type upsertStreamUpsertClient struct { + grpc.ClientStream +} + +func (x *upsertStreamUpsertClient) Send(m *payload.Upsert_Request) error { + return x.ClientStream.SendMsg(m) +} + +func (x *upsertStreamUpsertClient) Recv() (*payload.Object_Location, error) { + m := new(payload.Object_Location) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *upsertClient) MultiUpsert(ctx context.Context, in *payload.Upsert_MultiRequest, opts ...grpc.CallOption) (*payload.Object_Locations, error) { + out := new(payload.Object_Locations) + err := c.cc.Invoke(ctx, "/vald.v1.Upsert/MultiUpsert", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UpsertServer is the server API for Upsert service. +type UpsertServer interface { + Upsert(context.Context, *payload.Upsert_Request) (*payload.Object_Location, error) + StreamUpsert(Upsert_StreamUpsertServer) error + MultiUpsert(context.Context, *payload.Upsert_MultiRequest) (*payload.Object_Locations, error) +} + +// UnimplementedUpsertServer can be embedded to have forward compatible implementations. +type UnimplementedUpsertServer struct { +} + +func (*UnimplementedUpsertServer) Upsert(ctx context.Context, req *payload.Upsert_Request) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method Upsert not implemented") +} +func (*UnimplementedUpsertServer) StreamUpsert(srv Upsert_StreamUpsertServer) error { + return status.Errorf(codes.Unimplemented, "method StreamUpsert not implemented") +} +func (*UnimplementedUpsertServer) MultiUpsert(ctx context.Context, req *payload.Upsert_MultiRequest) (*payload.Object_Locations, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiUpsert not implemented") +} + +func RegisterUpsertServer(s *grpc.Server, srv UpsertServer) { + s.RegisterService(&_Upsert_serviceDesc, srv) +} + +func _Upsert_Upsert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Upsert_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UpsertServer).Upsert(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Upsert/Upsert", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UpsertServer).Upsert(ctx, req.(*payload.Upsert_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Upsert_StreamUpsert_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(UpsertServer).StreamUpsert(&upsertStreamUpsertServer{stream}) +} + +type Upsert_StreamUpsertServer interface { + Send(*payload.Object_Location) error + Recv() (*payload.Upsert_Request, error) + grpc.ServerStream +} + +type upsertStreamUpsertServer struct { + grpc.ServerStream +} + +func (x *upsertStreamUpsertServer) Send(m *payload.Object_Location) error { + return x.ServerStream.SendMsg(m) +} + +func (x *upsertStreamUpsertServer) Recv() (*payload.Upsert_Request, error) { + m := new(payload.Upsert_Request) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Upsert_MultiUpsert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(payload.Upsert_MultiRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UpsertServer).MultiUpsert(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Upsert/MultiUpsert", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UpsertServer).MultiUpsert(ctx, req.(*payload.Upsert_MultiRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Upsert_serviceDesc = grpc.ServiceDesc{ + ServiceName: "vald.v1.Upsert", + HandlerType: (*UpsertServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Upsert", + Handler: _Upsert_Upsert_Handler, + }, + { + MethodName: "MultiUpsert", + Handler: _Upsert_MultiUpsert_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamUpsert", + Handler: _Upsert_StreamUpsert_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "apis/proto/v1/vald/upsert.proto", +} diff --git a/apis/grpc/v1/vald/vald.go b/apis/grpc/v1/vald/vald.go new file mode 100644 index 0000000000..d442da43d1 --- /dev/null +++ b/apis/grpc/v1/vald/vald.go @@ -0,0 +1,84 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package vald provides vald server interface +package vald + +import ( + grpc "google.golang.org/grpc" +) + +type Server interface { + InsertServer + UpdateServer + UpsertServer + SearchServer + RemoveServer + ObjectServer +} + +type ServerWithFilter interface { + Server + FilterServer +} + +type Client interface { + InsertClient + UpdateClient + UpsertClient + SearchClient + RemoveClient + ObjectClient +} + +type ClientWithFilter interface { + Client + FilterClient +} + +type client struct { + InsertClient + UpdateClient + UpsertClient + SearchClient + RemoveClient + ObjectClient +} + +func RegisterValdServer(s *grpc.Server, srv Server) { + RegisterInsertServer(s, srv) + RegisterUpdateServer(s, srv) + RegisterUpsertServer(s, srv) + RegisterSearchServer(s, srv) + RegisterRemoveServer(s, srv) + RegisterObjectServer(s, srv) +} + +func RegisterValdServerWithFilter(s *grpc.Server, srv ServerWithFilter) { + RegisterValdServer(s, srv) + RegisterFilterServer(s, srv) +} + +func NewValdClient(conn *grpc.ClientConn) Client { + return &client{ + NewInsertClient(conn), + NewUpdateClient(conn), + NewUpsertClient(conn), + NewSearchClient(conn), + NewRemoveClient(conn), + NewObjectClient(conn), + } +} diff --git a/apis/proto/agent/core/agent.proto b/apis/proto/agent/core/agent.proto deleted file mode 100644 index d5a834bc70..0000000000 --- a/apis/proto/agent/core/agent.proto +++ /dev/null @@ -1,96 +0,0 @@ -// -// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package core; - -option go_package = "github.com/vdaas/vald/apis/grpc/agent/core"; -option java_multiple_files = true; -option java_package = "org.vdaas.vald.agent.core"; -option java_outer_classname = "ValdAgent"; - -import "payload.proto"; -import "google/api/annotations.proto"; - -service Agent { - rpc Exists(payload.Object.ID) returns (payload.Object.ID) { - option (google.api.http).get = "/exists/{id}"; - } - - rpc Search(payload.Search.Request) returns (payload.Search.Response) { - option (google.api.http) = { - post : "/search" - body : "*" - }; - } - rpc SearchByID(payload.Search.IDRequest) returns (payload.Search.Response) { - option (google.api.http) = { - post : "/search/id" - body : "*" - }; - } - rpc StreamSearch(stream payload.Search.Request) - returns (stream payload.Search.Response) {} - rpc StreamSearchByID(stream payload.Search.IDRequest) - returns (stream payload.Search.Response) {} - - rpc Insert(payload.Object.Vector) returns (payload.Empty) { - option (google.api.http) = { - post : "/insert" - body : "*" - }; - } - rpc StreamInsert(stream payload.Object.Vector) - returns (stream payload.Empty) {} - rpc MultiInsert(payload.Object.Vectors) returns (payload.Empty) {} - - rpc Update(payload.Object.Vector) returns (payload.Empty) { - option (google.api.http) = { - post : "/update" - body : "*" - }; - } - rpc StreamUpdate(stream payload.Object.Vector) - returns (stream payload.Empty) {} - rpc MultiUpdate(payload.Object.Vectors) returns (payload.Empty) {} - - rpc Remove(payload.Object.ID) returns (payload.Empty) { - option (google.api.http).delete = "/remove/{id}"; - } - rpc StreamRemove(stream payload.Object.ID) returns (stream payload.Empty) {} - rpc MultiRemove(payload.Object.IDs) returns (payload.Empty) {} - - rpc GetObject(payload.Object.ID) returns (payload.Object.Vector) { - option (google.api.http).get = "/object/{id}"; - } - rpc StreamGetObject(stream payload.Object.ID) - returns (stream payload.Object.Vector) {} - - rpc CreateIndex(payload.Control.CreateIndexRequest) returns (payload.Empty) { - option (google.api.http).get = "/index/create"; - } - rpc SaveIndex(payload.Empty) returns (payload.Empty) { - option (google.api.http).get = "/index/save"; - } - rpc CreateAndSaveIndex(payload.Control.CreateIndexRequest) - returns (payload.Empty) { - option (google.api.http).get = "/index/createandsave"; - } - rpc IndexInfo(payload.Empty) returns (payload.Info.Index.Count) { - option (google.api.http).get = "/index/info"; - } -} diff --git a/apis/proto/filter/egress/egress_filter.proto b/apis/proto/filter/egress/egress_filter.proto index c99d6ac05e..0754e1ff54 100644 --- a/apis/proto/filter/egress/egress_filter.proto +++ b/apis/proto/filter/egress/egress_filter.proto @@ -23,8 +23,7 @@ option java_multiple_files = true; option java_package = "org.vdaas.vald.filter.egress"; option java_outer_classname = "ValdEgressFilter"; -import "payload.proto"; -import "google/api/annotations.proto"; +import "apis/proto/payload/payload.proto"; service EgressFilter { rpc Filter(payload.Search.Response) returns (payload.Search.Response) {} diff --git a/apis/proto/gateway/filter/filter.proto b/apis/proto/gateway/filter/filter.proto new file mode 100644 index 0000000000..ab4d9ae00d --- /dev/null +++ b/apis/proto/gateway/filter/filter.proto @@ -0,0 +1,72 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package filter; + +option go_package = "github.com/vdaas/vald/apis/grpc/gateway/filter"; +option java_multiple_files = true; +option java_package = "org.vdaas.vald.gateway.filter"; +option java_outer_classname = "ValdFilterGateway"; + +import "apis/proto/payload/payload.proto"; +import "google/api/annotations.proto"; + + +service Filter { + rpc SearchObject(payload.Search.ObjectRequest) returns (payload.Search.Response) { + option (google.api.http) = { + post : "/search/object" + body : "*" + }; + } + rpc StreamSearchObject(stream payload.Search.ObjectRequest) + returns (stream payload.Search.Response) {} + + rpc InsertObject(payload.Object.Blob) returns (payload.Object.Location) { + option (google.api.http) = { + post : "/insert/object" + body : "*" + }; + + } + rpc StreamInsertObject(stream payload.Object.Blob) + returns (stream payload.Object.Location) {} + rpc MultiInsertObject(payload.Object.Blob) returns (payload.Object.Locations) {} + + rpc UpdateObject(payload.Object.Blob) returns (payload.Object.Location) { + option (google.api.http) = { + post : "/update/object" + body : "*" + }; + + } + rpc StreamUpdateObject(stream payload.Object.Blob) + returns (stream payload.Object.Location) {} + rpc MultiUpdateObject(payload.Object.Blob) returns (payload.Object.Locations) {} + + rpc UpsertObject(payload.Object.Blob) returns (payload.Object.Location) { + option (google.api.http) = { + post : "/upsert/object" + body : "*" + }; + + } + rpc StreamUpsertObject(stream payload.Object.Blob) + returns (stream payload.Object.Location) {} + rpc MultiUpsertObject(payload.Object.Blob) returns (payload.Object.Locations) {} +} diff --git a/apis/proto/gateway/vald/vald.proto b/apis/proto/gateway/vald/vald.proto index dbf926bd67..05ff1e94e5 100644 --- a/apis/proto/gateway/vald/vald.proto +++ b/apis/proto/gateway/vald/vald.proto @@ -23,7 +23,7 @@ option java_multiple_files = true; option java_package = "org.vdaas.vald.gateway.vald"; option java_outer_classname = "ValdApi"; -import "payload.proto"; +import "apis/proto/payload/payload.proto"; import "google/api/annotations.proto"; service Vald { @@ -48,45 +48,45 @@ service Vald { rpc StreamSearchByID(stream payload.Search.IDRequest) returns (stream payload.Search.Response) {} - rpc Insert(payload.Object.Vector) returns (payload.Empty) { + rpc Insert(payload.Object.Vector) returns (payload.Object.Location) { option (google.api.http) = { post : "/insert" body : "*" }; } rpc StreamInsert(stream payload.Object.Vector) - returns (stream payload.Empty) {} - rpc MultiInsert(payload.Object.Vectors) returns (payload.Empty) {} + returns (stream payload.Object.Location) {} + rpc MultiInsert(payload.Object.Vectors) returns (payload.Object.Locations) {} - rpc Update(payload.Object.Vector) returns (payload.Empty) { + rpc Update(payload.Object.Vector) returns (payload.Object.Location) { option (google.api.http) = { post : "/update" body : "*" }; } rpc StreamUpdate(stream payload.Object.Vector) - returns (stream payload.Empty) {} - rpc MultiUpdate(payload.Object.Vectors) returns (payload.Empty) {} + returns (stream payload.Object.Location) {} + rpc MultiUpdate(payload.Object.Vectors) returns (payload.Object.Locations) {} - rpc Upsert(payload.Object.Vector) returns (payload.Empty) { + rpc Upsert(payload.Object.Vector) returns (payload.Object.Location) { option (google.api.http) = { post : "/upsert" body : "*" }; } rpc StreamUpsert(stream payload.Object.Vector) - returns (stream payload.Empty) {} - rpc MultiUpsert(payload.Object.Vectors) returns (payload.Empty) {} + returns (stream payload.Object.Location) {} + rpc MultiUpsert(payload.Object.Vectors) returns (payload.Object.Locations) {} - rpc Remove(payload.Object.ID) returns (payload.Empty) { + rpc Remove(payload.Object.ID) returns (payload.Object.Location) { option (google.api.http).delete = "/remove/{id}"; } - rpc StreamRemove(stream payload.Object.ID) returns (stream payload.Empty) {} - rpc MultiRemove(payload.Object.IDs) returns (payload.Empty) {} + rpc StreamRemove(stream payload.Object.ID) returns (stream payload.Object.Location) {} + rpc MultiRemove(payload.Object.IDs) returns (payload.Object.Locations) {} - rpc GetObject(payload.Object.ID) returns (payload.Backup.MetaVector) { + rpc GetObject(payload.Object.ID) returns (payload.Object.Vector) { option (google.api.http).get = "/object/{id}"; } rpc StreamGetObject(stream payload.Object.ID) - returns (stream payload.Backup.MetaVector) {} + returns (stream payload.Object.Vector) {} } diff --git a/apis/proto/payload/payload.proto b/apis/proto/payload/payload.proto index 37838684cb..a9f0f0bd85 100644 --- a/apis/proto/payload/payload.proto +++ b/apis/proto/payload/payload.proto @@ -23,30 +23,46 @@ option java_multiple_files = true; option java_package = "org.vdaas.vald.payload"; option java_outer_classname = "ValdPayload"; -import "validate/validate.proto"; -/* import "github.com/envoyproxy/protoc-gen-validate/validate/validate.proto"; - */ message Search { message Request { repeated float vector = 1 [ (validate.rules).repeated .min_items = 2 ]; Config config = 2; } + message MultiRequest { + repeated Request requests = 1; + } message IDRequest { string id = 1; Config config = 2; } + message MultiIDRequest { + repeated IDRequest requests = 1; + } + + message ObjectRequest { + bytes object = 1; + Config config = 2; + } message Config { - uint32 num = 1 [ (validate.rules).uint32.gte = 1 ]; - float radius = 2; - float epsilon = 3; - int64 timeout = 4; + string request_id = 1; + uint32 num = 2 [ (validate.rules).uint32.gte = 1 ]; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; } - message Response { repeated Object.Distance results = 1; } + message Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Responses { + repeated Response responses = 1; + } } message Meta { @@ -75,6 +91,21 @@ message Object { repeated float vector = 2 [ (validate.rules).repeated .min_items = 2 ]; } message Vectors { repeated Vector vectors = 1; } + + message Blob { + string id = 1 [ (validate.rules).string.min_len = 1 ]; + bytes object = 2; + } + + message Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + message Locations { + repeated Location locations = 1; + } } message Control { @@ -141,7 +172,6 @@ message Backup { message MetaVector { string uuid = 1; - string meta = 2; repeated float vector = 3 [ (validate.rules).repeated .min_items = 2 ]; repeated string ips = 4; } @@ -150,7 +180,6 @@ message Backup { message Compressed { message MetaVector { string uuid = 1; - string meta = 2; bytes vector = 3; repeated string ips = 4; } diff --git a/apis/proto/v1/agent/core/agent.proto b/apis/proto/v1/agent/core/agent.proto new file mode 100644 index 0000000000..a20b20fe90 --- /dev/null +++ b/apis/proto/v1/agent/core/agent.proto @@ -0,0 +1,44 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package core.v1; + +option go_package = "github.com/vdaas/vald/apis/grpc/v1/agent/core"; +option java_multiple_files = true; +option java_package = "org.vdaas.vald.api.v1.agent.core"; +option java_outer_classname = "ValdAgent"; + +import "apis/proto/v1/payload/payload.proto"; +import "google/api/annotations.proto"; + + +service Agent { + rpc CreateIndex(payload.v1.Control.CreateIndexRequest) returns (payload.v1.Empty) { + option (google.api.http).get = "/index/create"; + } + rpc SaveIndex(payload.v1.Empty) returns (payload.v1.Empty) { + option (google.api.http).get = "/index/save"; + } + rpc CreateAndSaveIndex(payload.v1.Control.CreateIndexRequest) + returns (payload.v1.Empty) { + option (google.api.http).get = "/index/createandsave"; + } + rpc IndexInfo(payload.v1.Empty) returns (payload.v1.Info.Index.Count) { + option (google.api.http).get = "/index/info"; + } +} diff --git a/apis/proto/agent/sidecar/sidecar.proto b/apis/proto/v1/agent/sidecar/sidecar.proto similarity index 79% rename from apis/proto/agent/sidecar/sidecar.proto rename to apis/proto/v1/agent/sidecar/sidecar.proto index 8f65d76a36..884494e326 100644 --- a/apis/proto/agent/sidecar/sidecar.proto +++ b/apis/proto/v1/agent/sidecar/sidecar.proto @@ -16,15 +16,13 @@ syntax = "proto3"; -package sidecar; +package sidecar.v1; -option go_package = "github.com/vdaas/vald/apis/grpc/agent/sidecar"; +option go_package = "github.com/vdaas/vald/apis/grpc/v1/agent/sidecar"; option java_multiple_files = true; -option java_package = "org.vdaas.vald.agent.sidecar"; +option java_package = "org.vdaas.vald.api.v1.agent.sidecar"; option java_outer_classname = "ValdAgentSidecar"; -import "payload.proto"; -import "google/api/annotations.proto"; service Sidecar { } diff --git a/apis/proto/discoverer/discoverer.proto b/apis/proto/v1/discoverer/discoverer.proto similarity index 70% rename from apis/proto/discoverer/discoverer.proto rename to apis/proto/v1/discoverer/discoverer.proto index b23fe92565..d1f4cce119 100644 --- a/apis/proto/discoverer/discoverer.proto +++ b/apis/proto/v1/discoverer/discoverer.proto @@ -16,24 +16,26 @@ syntax = "proto3"; -package discoverer; +package discoverer.v1; -option go_package = "github.com/vdaas/vald/apis/grpc/discoverer"; +option go_package = "github.com/vdaas/vald/apis/grpc/v1/discoverer"; option java_multiple_files = true; -option java_package = "org.vdaas.vald.discoverer"; -option java_outer_classname = "Discoverer"; +option java_package = "org.vdaas.vald.api.v1.discoverer"; +option java_outer_classname = "ValdDiscoverer"; -import "payload.proto"; +import "apis/proto/v1/payload/payload.proto"; import "google/api/annotations.proto"; + service Discoverer { - rpc Pods(payload.Discoverer.Request) returns (payload.Info.Pods) { + + rpc Pods(payload.v1.Discoverer.Request) returns (payload.v1.Info.Pods) { option (google.api.http) = { post : "/discover/pods" body : "*" }; } - rpc Nodes(payload.Discoverer.Request) returns (payload.Info.Nodes) { + rpc Nodes(payload.v1.Discoverer.Request) returns (payload.v1.Info.Nodes) { option (google.api.http) = { post : "/discover/nodes" body : "*" diff --git a/apis/proto/errors/errors.proto b/apis/proto/v1/errors/errors.proto similarity index 79% rename from apis/proto/errors/errors.proto rename to apis/proto/v1/errors/errors.proto index 39ddfd686f..26037cf067 100644 --- a/apis/proto/errors/errors.proto +++ b/apis/proto/v1/errors/errors.proto @@ -16,18 +16,13 @@ syntax = "proto3"; -package errors; +package errors.v1; -option go_package = "github.com/vdaas/vald/apis/grpc/errors"; +option go_package = "github.com/vdaas/vald/apis/grpc/v1/errors"; option java_multiple_files = true; -option java_package = "org.vdaas.vald.errors"; +option java_package = "org.vdaas.vald.api.v1.errors"; option java_outer_classname = "ValdErrors"; -import "validate/validate.proto"; -/* -import "github.com/envoyproxy/protoc-gen-validate/validate/validate.proto"; - */ - message Errors { message RPC { string type = 1; diff --git a/apis/proto/filter/ingress/ingress_filter.proto b/apis/proto/v1/filter/egress/egress_filter.proto similarity index 60% rename from apis/proto/filter/ingress/ingress_filter.proto rename to apis/proto/v1/filter/egress/egress_filter.proto index 8d93e7c48c..763aef7ea7 100644 --- a/apis/proto/filter/ingress/ingress_filter.proto +++ b/apis/proto/v1/filter/egress/egress_filter.proto @@ -16,14 +16,20 @@ syntax = "proto3"; -package ingress_filter; +package filter.egress.v1; -option go_package = "github.com/vdaas/vald/apis/grpc/filter/ingress"; +option go_package = "github.com/vdaas/vald/apis/grpc/v1/filter/egress"; option java_multiple_files = true; -option java_package = "org.vdaas.vald.filter.ingress"; -option java_outer_classname = "ValdIngressFilter"; +option java_package = "org.vdaas.vald.api.v1.filter.egress"; +option java_outer_classname = "ValdEgressFilter"; -import "payload.proto"; +import "apis/proto/v1/payload/payload.proto"; import "google/api/annotations.proto"; -service IngressFilter {} + +service EgressFilter { + + rpc Filter(payload.v1.Object.Distance) returns (payload.v1.Object.Distance) {} + rpc StreamFilter(stream payload.v1.Object.Distance) + returns (stream payload.v1.Object.Distance) {} +} diff --git a/apis/proto/v1/filter/ingress/ingress_filter.proto b/apis/proto/v1/filter/ingress/ingress_filter.proto new file mode 100644 index 0000000000..724146393a --- /dev/null +++ b/apis/proto/v1/filter/ingress/ingress_filter.proto @@ -0,0 +1,49 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package filter.ingress.v1; + +option go_package = "github.com/vdaas/vald/apis/grpc/v1/filter/ingress"; +option java_multiple_files = true; +option java_package = "org.vdaas.vald.api.v1.filter.ingress"; +option java_outer_classname = "ValdIngressFilter"; + +import "apis/proto/v1/payload/payload.proto"; +import "google/api/annotations.proto"; + + +service IngressFilter { + rpc GenVector(payload.v1.Object.Blob) returns (payload.v1.Object.Vector) { + option (google.api.http) = { + post : "/object" + body : "*" + }; + } + rpc StreamGenVector(stream payload.v1.Object.Blob) + returns (stream payload.v1.Object.Vector) {} + + rpc FilterVector(payload.v1.Object.Vector) returns (payload.v1.Object.Vector) { + option (google.api.http) = { + post : "/vector" + body : "*" + }; + + } + rpc StreamFilterVector(stream payload.v1.Object.Vector) + returns (stream payload.v1.Object.Vector) {} +} diff --git a/apis/proto/v1/gateway/vald/vald.proto b/apis/proto/v1/gateway/vald/vald.proto new file mode 100644 index 0000000000..798bd5382f --- /dev/null +++ b/apis/proto/v1/gateway/vald/vald.proto @@ -0,0 +1,98 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package vald.v1; + +option go_package = "github.com/vdaas/vald/apis/grpc/v1/gateway/vald"; +option java_multiple_files = true; +option java_package = "org.vdaas.vald.api.v1.gateway.vald"; +option java_outer_classname = "ValdApi"; + +import "apis/proto/v1/payload/payload.proto"; +import "google/api/annotations.proto"; + + +service Vald { + + rpc Exists(payload.v1.Object.ID) returns (payload.v1.Object.ID) { + option (google.api.http).get = "/exists/{id}"; + } + + rpc Search(payload.v1.Search.Request) returns (payload.v1.Search.Response) { + option (google.api.http) = { + post : "/search" + body : "*" + }; + } + rpc SearchByID(payload.v1.Search.IDRequest) returns (payload.v1.Search.Response) { + option (google.api.http) = { + post : "/search/id" + body : "*" + }; + } + rpc StreamSearch(stream payload.v1.Search.Request) + returns (stream payload.v1.Search.Response) {} + rpc StreamSearchByID(stream payload.v1.Search.IDRequest) + returns (stream payload.v1.Search.Response) {} + + rpc Insert(payload.v1.Object.Vector) returns (payload.v1.Object.Location) { + option (google.api.http) = { + post : "/insert" + body : "*" + }; + + } + rpc StreamInsert(stream payload.v1.Object.Vector) + returns (stream payload.v1.Object.Location) {} + rpc MultiInsert(payload.v1.Object.Vectors) returns (payload.v1.Object.Locations) {} + + rpc Update(payload.v1.Object.Vector) returns (payload.v1.Object.Location) { + option (google.api.http) = { + post : "/update" + body : "*" + }; + + } + rpc StreamUpdate(stream payload.v1.Object.Vector) + returns (stream payload.v1.Object.Location) {} + rpc MultiUpdate(payload.v1.Object.Vectors) returns (payload.v1.Object.Locations) {} + + rpc Upsert(payload.v1.Object.Vector) returns (payload.v1.Object.Location) { + option (google.api.http) = { + post : "/upsert" + body : "*" + }; + + } + rpc StreamUpsert(stream payload.v1.Object.Vector) + returns (stream payload.v1.Object.Location) {} + rpc MultiUpsert(payload.v1.Object.Vectors) returns (payload.v1.Object.Locations) {} + + rpc Remove(payload.v1.Object.ID) returns (payload.v1.Object.Location) { + option (google.api.http).delete = "/remove/{id}"; + + } + rpc StreamRemove(stream payload.v1.Object.ID) returns (stream payload.v1.Object.Location) {} + rpc MultiRemove(payload.v1.Object.IDs) returns (payload.v1.Object.Locations) {} + + rpc GetObject(payload.v1.Object.ID) returns (payload.v1.Object.Vector) { + option (google.api.http).get = "/object/{id}"; + } + rpc StreamGetObject(stream payload.v1.Object.ID) + returns (stream payload.v1.Object.Vector) {} +} diff --git a/apis/proto/manager/backup/backup_manager.proto b/apis/proto/v1/manager/backup/backup_manager.proto similarity index 61% rename from apis/proto/manager/backup/backup_manager.proto rename to apis/proto/v1/manager/backup/backup_manager.proto index bec4c69d53..29caa852eb 100644 --- a/apis/proto/manager/backup/backup_manager.proto +++ b/apis/proto/v1/manager/backup/backup_manager.proto @@ -16,61 +16,70 @@ syntax = "proto3"; -package backup_manager; +package manager.backup.v1; -option go_package = "github.com/vdaas/vald/apis/grpc/manager/backup"; +option go_package = "github.com/vdaas/vald/apis/grpc/v1/manager/backup"; option java_multiple_files = true; -option java_package = "org.vdaas.vald.manager.backup"; +option java_package = "org.vdaas.vald.api.v1.manager.backup"; option java_outer_classname = "ValdBackupManager"; -import "payload.proto"; +import "apis/proto/v1/payload/payload.proto"; import "google/api/annotations.proto"; + service Backup { - rpc GetVector(payload.Backup.GetVector.Request) returns (payload.Backup.Compressed.MetaVector) { + + + rpc GetVector(payload.v1.Backup.GetVector.Request) returns (payload.v1.Backup.Compressed.Vector) { option (google.api.http).get = "/vector/{uuid}"; } - rpc Locations(payload.Backup.Locations.Request) returns (payload.Info.IPs) { + rpc Locations(payload.v1.Backup.Locations.Request) returns (payload.v1.Info.IPs) { option (google.api.http).get = "/locations/{uuid}"; } - rpc Register(payload.Backup.Compressed.MetaVector) returns (payload.Empty) { + rpc Register(payload.v1.Backup.Compressed.Vector) returns (payload.v1.Empty) { option (google.api.http) = { post : "/register" body : "*" }; + } - rpc RegisterMulti(payload.Backup.Compressed.MetaVectors) returns (payload.Empty) { + rpc RegisterMulti(payload.v1.Backup.Compressed.Vectors) returns (payload.v1.Empty) { option (google.api.http) = { post : "/register/multi" body : "*" }; + } - rpc Remove(payload.Backup.Remove.Request) returns (payload.Empty) { + rpc Remove(payload.v1.Backup.Remove.Request) returns (payload.v1.Empty) { option (google.api.http).delete = "/delete/{uuid}"; + } - rpc RemoveMulti(payload.Backup.Remove.RequestMulti) returns (payload.Empty) { + rpc RemoveMulti(payload.v1.Backup.Remove.RequestMulti) returns (payload.v1.Empty) { option (google.api.http) = { post : "/delete/multi" body : "*" }; + } - rpc RegisterIPs(payload.Backup.IP.Register.Request) returns (payload.Empty) { + rpc RegisterIPs(payload.v1.Backup.IP.Register.Request) returns (payload.v1.Empty) { option (google.api.http) = { post : "/ip" body : "*" }; + } - rpc RemoveIPs(payload.Backup.IP.Remove.Request) returns (payload.Empty) { + rpc RemoveIPs(payload.v1.Backup.IP.Remove.Request) returns (payload.v1.Empty) { option (google.api.http) = { post : "/ip/delete" body : "*" }; + } } diff --git a/apis/proto/manager/compressor/compressor.proto b/apis/proto/v1/manager/compressor/compressor.proto similarity index 59% rename from apis/proto/manager/compressor/compressor.proto rename to apis/proto/v1/manager/compressor/compressor.proto index 3b2fe595fd..7450e1fc2a 100644 --- a/apis/proto/manager/compressor/compressor.proto +++ b/apis/proto/v1/manager/compressor/compressor.proto @@ -16,65 +16,74 @@ syntax = "proto3"; -package compressor; +package manager.compressor.v1; -option go_package = "github.com/vdaas/vald/apis/grpc/manager/compressor"; +option go_package = "github.com/vdaas/vald/apis/grpc/v1/manager/compressor"; option java_multiple_files = true; -option java_package = "org.vdaas.vald.manager.compressor"; +option java_package = "org.vdaas.vald.api.v1.manager.compressor"; option java_outer_classname = "ValdCompressor"; -import "payload.proto"; +import "apis/proto/v1/payload/payload.proto"; import "google/api/annotations.proto"; + service Backup { - rpc GetVector(payload.Backup.GetVector.Request) returns (payload.Backup.MetaVector) { + + + rpc GetVector(payload.v1.Backup.GetVector.Request) returns (payload.v1.Backup.Vector) { option (google.api.http).get = "/vector/{uuid}"; } - // rpc GetVectorsByOwner(payload.Backup.GetVector.Owner) returns (stream payload.Backup.MetaVector) { + // rpc GetVectorsByOwner(payload.v1.Backup.GetVector.Owner) returns (stream payload.v1.Backup.Vector) { // option (google.api.http).get = "/vector/{uuid}"; - // } + // } - rpc Locations(payload.Backup.Locations.Request) returns (payload.Info.IPs) { + rpc Locations(payload.v1.Backup.Locations.Request) returns (payload.v1.Info.IPs) { option (google.api.http).get = "/locations/{uuid}"; } - rpc Register(payload.Backup.MetaVector) returns (payload.Empty) { + rpc Register(payload.v1.Backup.Vector) returns (payload.v1.Empty) { option (google.api.http) = { post : "/register" body : "*" }; + } - rpc RegisterMulti(payload.Backup.MetaVectors) returns (payload.Empty) { + rpc RegisterMulti(payload.v1.Backup.Vectors) returns (payload.v1.Empty) { option (google.api.http) = { post : "/register/multi" body : "*" }; + } - rpc Remove(payload.Backup.Remove.Request) returns (payload.Empty) { + rpc Remove(payload.v1.Backup.Remove.Request) returns (payload.v1.Empty) { option (google.api.http).delete = "/delete/{uuid}"; + } - rpc RemoveMulti(payload.Backup.Remove.RequestMulti) returns (payload.Empty) { + rpc RemoveMulti(payload.v1.Backup.Remove.RequestMulti) returns (payload.v1.Empty) { option (google.api.http) = { post : "/delete/multi" body : "*" }; + } - rpc RegisterIPs(payload.Backup.IP.Register.Request) returns (payload.Empty) { + rpc RegisterIPs(payload.v1.Backup.IP.Register.Request) returns (payload.v1.Empty) { option (google.api.http) = { post : "/ip" body : "*" }; + } - rpc RemoveIPs(payload.Backup.IP.Remove.Request) returns (payload.Empty) { + rpc RemoveIPs(payload.v1.Backup.IP.Remove.Request) returns (payload.v1.Empty) { option (google.api.http) = { post : "/ip/delete" body : "*" }; + } } diff --git a/apis/proto/manager/index/index_manager.proto b/apis/proto/v1/manager/index/index_manager.proto similarity index 75% rename from apis/proto/manager/index/index_manager.proto rename to apis/proto/v1/manager/index/index_manager.proto index faee763601..ce09f42132 100644 --- a/apis/proto/manager/index/index_manager.proto +++ b/apis/proto/v1/manager/index/index_manager.proto @@ -16,18 +16,20 @@ syntax = "proto3"; -package index_manager; +package manager.index.v1; -option go_package = "github.com/vdaas/vald/apis/grpc/manager/index"; +option go_package = "github.com/vdaas/vald/apis/grpc/v1/manager/index"; option java_multiple_files = true; -option java_package = "org.vdaas.vald.manager.index"; +option java_package = "org.vdaas.vald.api.v1.manager.index"; option java_outer_classname = "ValdIndexManager"; -import "payload.proto"; +import "apis/proto/v1/payload/payload.proto"; import "google/api/annotations.proto"; + service Index { - rpc IndexInfo(payload.Empty) returns (payload.Info.Index.Count) { + + rpc IndexInfo(payload.v1.Empty) returns (payload.v1.Info.Index.Count) { option (google.api.http).get = "/index/info"; } } diff --git a/apis/proto/manager/replication/agent/replication_manager.proto b/apis/proto/v1/manager/replication/agent/replication_manager.proto similarity index 68% rename from apis/proto/manager/replication/agent/replication_manager.proto rename to apis/proto/v1/manager/replication/agent/replication_manager.proto index ec8afc7a1f..2880f63b59 100644 --- a/apis/proto/manager/replication/agent/replication_manager.proto +++ b/apis/proto/v1/manager/replication/agent/replication_manager.proto @@ -16,24 +16,24 @@ syntax = "proto3"; -package replication_manager; +package manager.replication.agent.v1; -option go_package = "github.com/vdaas/vald/apis/grpc/manager/replication/agent"; +option go_package = "github.com/vdaas/vald/apis/grpc/v1/manager/replication/agent"; option java_multiple_files = true; -option java_package = "org.vdaas.vald.manager.replication.agent"; +option java_package = "org.vdaas.vald.api.v1.manager.replication.agent"; option java_outer_classname = "ValdReplicationManagerAgent"; -import "payload.proto"; +import "apis/proto/v1/payload/payload.proto"; import "google/api/annotations.proto"; service Replication { - rpc Recover(payload.Replication.Recovery) returns (payload.Empty) { + rpc Recover(payload.v1.Replication.Recovery) returns (payload.v1.Empty) { option (google.api.http).post = "/replication/recover"; } - rpc Rebalance(payload.Replication.Rebalance) returns (payload.Empty) { + rpc Rebalance(payload.v1.Replication.Rebalance) returns (payload.v1.Empty) { option (google.api.http).post = "/replication/rebalance"; } - rpc AgentInfo(payload.Empty) returns (payload.Replication.Agents) { + rpc AgentInfo(payload.v1.Empty) returns (payload.v1.Replication.Agents) { option (google.api.http).get = "/replication/agent/info"; } } diff --git a/apis/proto/manager/replication/controller/replication_manager.proto b/apis/proto/v1/manager/replication/controller/replication_manager.proto similarity index 72% rename from apis/proto/manager/replication/controller/replication_manager.proto rename to apis/proto/v1/manager/replication/controller/replication_manager.proto index c3c7a8443e..b1521e8ca1 100644 --- a/apis/proto/manager/replication/controller/replication_manager.proto +++ b/apis/proto/v1/manager/replication/controller/replication_manager.proto @@ -16,18 +16,19 @@ syntax = "proto3"; -package replication_manager; +package manager.replication.controller.v1; -option go_package = "github.com/vdaas/vald/apis/grpc/manager/replication/controller"; +option go_package = "github.com/vdaas/vald/apis/grpc/v1/manager/replication/controller"; option java_multiple_files = true; -option java_package = "org.vdaas.vald.manager.replication.controller"; +option java_package = "org.vdaas.vald.api.v1.manager.replication.controller"; option java_outer_classname = "ValdReplicationManagerController"; -import "payload.proto"; +import "apis/proto/v1/payload/payload.proto"; import "google/api/annotations.proto"; + service ReplicationController { - rpc ReplicationInfo(payload.Empty) returns (payload.Replication.Agents) { + rpc ReplicationInfo(payload.v1.Empty) returns (payload.v1.Replication.Agents) { option (google.api.http).get = "/replication/info"; } } diff --git a/apis/proto/meta/meta.proto b/apis/proto/v1/meta/meta.proto similarity index 57% rename from apis/proto/meta/meta.proto rename to apis/proto/v1/meta/meta.proto index ac958e0c7b..98129d3838 100644 --- a/apis/proto/meta/meta.proto +++ b/apis/proto/v1/meta/meta.proto @@ -16,45 +16,47 @@ syntax = "proto3"; -package meta_manager; +package meta.v1; -option go_package = "github.com/vdaas/vald/apis/grpc/meta"; +option go_package = "github.com/vdaas/vald/apis/grpc/v1/meta"; option java_multiple_files = true; -option java_package = "org.vdaas.vald.meta"; -option java_outer_classname = "meta"; +option java_package = "org.vdaas.vald.api.v1.meta"; +option java_outer_classname = "ValdMeta"; -import "payload.proto"; +import "apis/proto/v1/payload/payload.proto"; import "google/api/annotations.proto"; + service Meta { - rpc GetMeta(payload.Meta.Key) returns (payload.Meta.Val) { + + rpc GetMeta(payload.v1.Meta.Key) returns (payload.v1.Meta.Val) { option (google.api.http).post = "/meta"; } - rpc GetMetas(payload.Meta.Keys) returns (payload.Meta.Vals) { + rpc GetMetas(payload.v1.Meta.Keys) returns (payload.v1.Meta.Vals) { option (google.api.http).post = "/metas"; } - rpc GetMetaInverse(payload.Meta.Val) returns (payload.Meta.Key) { + rpc GetMetaInverse(payload.v1.Meta.Val) returns (payload.v1.Meta.Key) { option (google.api.http).post = "/inverse/meta"; } - rpc GetMetasInverse(payload.Meta.Vals) returns (payload.Meta.Keys) { + rpc GetMetasInverse(payload.v1.Meta.Vals) returns (payload.v1.Meta.Keys) { option (google.api.http).post = "/inverse/metas"; } - rpc SetMeta(payload.Meta.KeyVal) returns (payload.Empty) { + rpc SetMeta(payload.v1.Meta.KeyVal) returns (payload.v1.Empty) { option (google.api.http).post = "/meta"; } - rpc SetMetas(payload.Meta.KeyVals) returns (payload.Empty) { + rpc SetMetas(payload.v1.Meta.KeyVals) returns (payload.v1.Empty) { option (google.api.http).post = "/metas"; } - rpc DeleteMeta(payload.Meta.Key) returns (payload.Meta.Val) { + rpc DeleteMeta(payload.v1.Meta.Key) returns (payload.v1.Meta.Val) { option (google.api.http).delete = "/meta"; } - rpc DeleteMetas(payload.Meta.Keys) returns (payload.Meta.Vals) { + rpc DeleteMetas(payload.v1.Meta.Keys) returns (payload.v1.Meta.Vals) { option (google.api.http).delete = "/metas"; } - rpc DeleteMetaInverse(payload.Meta.Val) returns (payload.Meta.Key) { + rpc DeleteMetaInverse(payload.v1.Meta.Val) returns (payload.v1.Meta.Key) { option (google.api.http).delete = "/inverse/meta"; } - rpc DeleteMetasInverse(payload.Meta.Vals) returns (payload.Meta.Keys) { + rpc DeleteMetasInverse(payload.v1.Meta.Vals) returns (payload.v1.Meta.Keys) { option (google.api.http).delete = "/inverse/metas"; } } diff --git a/apis/proto/v1/payload/payload.proto b/apis/proto/v1/payload/payload.proto new file mode 100644 index 0000000000..a8471ea4f5 --- /dev/null +++ b/apis/proto/v1/payload/payload.proto @@ -0,0 +1,310 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package payload.v1; + +option go_package = "github.com/vdaas/vald/apis/grpc/v1/payload"; +option java_multiple_files = true; +option java_package = "org.vdaas.vald.api.v1.payload"; +option java_outer_classname = "ValdPayload"; + +import "github.com/envoyproxy/protoc-gen-validate/validate/validate.proto"; + +message Search { + message Request { + repeated float vector = 1 [ (validate.rules).repeated .min_items = 2 ]; + Config config = 2; + } + message MultiRequest { + repeated Request requests = 1; + } + + message IDRequest { + string id = 1; + Config config = 2; + } + message MultiIDRequest { + repeated IDRequest requests = 1; + } + + message ObjectRequest { + bytes object = 1; + Config config = 2; + } + + message Config { + string request_id = 1; + uint32 num = 2 [ (validate.rules).uint32.gte = 1 ]; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config filters = 6; + } + + message Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Responses { + repeated Response responses = 1; + } +} + +message Filter { + message Target { + string host = 1; + uint32 port = 2; + } + message Config{ + repeated string targets = 1; + } +} + +message Insert { + message Request { + Object.Vector vector = 1 [ (validate.rules).repeated .min_items = 2 ]; + Config config = 2; + } + message MultiRequest { + repeated Request requests = 1; + } + message Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + } +} + +message Update { + message Request { + Object.Vector vector = 1 [ (validate.rules).repeated .min_items = 2 ]; + Config config = 2; + } + message MultiRequest { + repeated Request requests = 1; + } + message Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + } +} + +message Upsert { + message Request { + Object.Vector vector = 1 [ (validate.rules).repeated .min_items = 2 ]; + Config config = 2; + } + message MultiRequest { + repeated Request requests = 1; + } + message Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + } +} + +message Remove { + message Request { + Object.ID id = 1; + Config config = 2; + } + message MultiRequest { + repeated Request requests = 1; + } + message Config { + bool skip_strict_exist_check = 1; + } +} + +message Meta { + message Key { string key = 1; } + message Keys { repeated string keys = 1; } + message Val { string val = 1; } + message Vals { repeated string vals = 1; } + message KeyVal { + string key = 1; + string val = 2; + } + message KeyVals { repeated KeyVal kvs = 1; } +} + +message Object { + message Distance { + string id = 1; + float distance = 2; + } + + message ID { string id = 1 [ (validate.rules).string.min_len = 1 ]; } + message IDs { repeated string ids = 1; } + + message Vector { + string id = 1 [ (validate.rules).string.min_len = 1 ]; + repeated float vector = 2 [ (validate.rules).repeated .min_items = 2 ]; + } + message Vectors { repeated Vector vectors = 1; } + + message Blob { + string id = 1 [ (validate.rules).string.min_len = 1 ]; + bytes object = 2; + } + + message Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + message Locations { + repeated Location locations = 1; + } +} + +message Control { + message CreateIndexRequest { + uint32 pool_size = 1 [ (validate.rules).uint32.gte = 0 ]; + } +} + +message Replication { + message Recovery { + repeated string deleted_agents = 1; + } + + message Rebalance { + repeated string high_usage_agents = 1; + repeated string low_usage_agents = 2; + } + + message Agents { + repeated string agents = 1; + repeated string removed_agents = 2; + repeated string replicating_agent = 3; + } +} + +message Discoverer { + message Request { + string name = 1 [ (validate.rules).string.min_len = 1 ]; + string namespace = 2; + string node = 3; + } +} + +message Backup { + message GetVector { + message Request { string uuid = 1 [ (validate.rules).string.min_len = 1 ]; } + message Owner { string ip = 1 [ (validate.rules).string.min_len = 1 ]; } + } + + message Locations { + message Request { string uuid = 1 [ (validate.rules).string.min_len = 1 ]; } + } + + message Remove { + message Request { string uuid = 1 [ (validate.rules).string.min_len = 1 ]; } + message RequestMulti { + repeated string uuids = 1 [ (validate.rules).repeated .min_items = 1 ]; + } + } + + message IP { + message Register { + message Request { + string uuid = 1 [ (validate.rules).string.min_len = 1 ]; + repeated string ips = 2 [ (validate.rules).repeated .min_items = 1 ]; + } + } + message Remove { + message Request { + repeated string ips = 1 [ (validate.rules).repeated .min_items = 1 ]; + } + } + } + + message Vector { + string uuid = 1; + repeated float vector = 3 [ (validate.rules).repeated .min_items = 2 ]; + repeated string ips = 4; + } + message Vectors { repeated Vector vectors = 1; } + + message Compressed { + message Vector { + string uuid = 1; + bytes vector = 3; + repeated string ips = 4; + } + + message Vectors { repeated Vector vectors = 1; } + } +} + +message Info { + message Index { + message Count { + uint32 stored = 1; + uint32 uncommitted = 2; + bool indexing = 3; + } + message UUID { + message Committed { + string uuid = 1; + } + message Uncommitted { + string uuid = 1; + } + } + } + + message Pod { + string app_name = 1; + string name = 2; + string namespace = 3; + string ip = 4 [ (validate.rules).string.ipv4 = true ]; + CPU cpu = 5; + Memory memory = 6; + Node node = 7; + } + message Node { + string name = 1; + string internal_addr = 2; + string external_addr = 3; + CPU cpu = 4; + Memory memory = 5; + Pods Pods = 6; + } + message CPU { + double limit = 1; + double request = 2; + double usage = 3; + } + message Memory { + double limit = 1; + double request = 2; + double usage = 3; + } + message Pods { + repeated Pod pods = 1 [ (validate.rules).repeated .min_items = 1 ]; + } + message Nodes { + repeated Node nodes = 1 [ (validate.rules).repeated .min_items = 1 ]; + } + message IPs { repeated string ip = 1; } +} + +message Empty {} diff --git a/apis/proto/v1/vald/filter.proto b/apis/proto/v1/vald/filter.proto new file mode 100644 index 0000000000..95b45eade2 --- /dev/null +++ b/apis/proto/v1/vald/filter.proto @@ -0,0 +1,70 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package vald.v1; + +option go_package = "github.com/vdaas/vald/apis/grpc/v1/vald"; +option java_multiple_files = true; +option java_package = "org.vdaas.vald.api.v1.vald"; +option java_outer_classname = "ValdFilter"; + +import "apis/proto/v1/payload/payload.proto"; +import "google/api/annotations.proto"; + + +service Filter { + rpc SearchObject(payload.v1.Search.ObjectRequest) returns (payload.v1.Search.Response) { + option (google.api.http) = { + post : "/search/object" + body : "*" + }; + } + rpc StreamSearchObject(stream payload.v1.Search.ObjectRequest) + returns (stream payload.v1.Search.Response) {} + + rpc InsertObject(payload.v1.Object.Blob) returns (payload.v1.Object.Location) { + option (google.api.http) = { + post : "/insert/object" + body : "*" + }; + + } + rpc StreamInsertObject(stream payload.v1.Object.Blob) + returns (stream payload.v1.Object.Location) {} + rpc MultiInsertObject(payload.v1.Object.Blob) returns (payload.v1.Object.Locations) {} + + rpc UpdateObject(payload.v1.Object.Blob) returns (payload.v1.Object.Location) { + option (google.api.http) = { + post : "/update/object" + body : "*" + }; + } + rpc StreamUpdateObject(stream payload.v1.Object.Blob) + returns (stream payload.v1.Object.Location) {} + rpc MultiUpdateObject(payload.v1.Object.Blob) returns (payload.v1.Object.Locations) {} + + rpc UpsertObject(payload.v1.Object.Blob) returns (payload.v1.Object.Location) { + option (google.api.http) = { + post : "/upsert/object" + body : "*" + }; + } + rpc StreamUpsertObject(stream payload.v1.Object.Blob) + returns (stream payload.v1.Object.Location) {} + rpc MultiUpsertObject(payload.v1.Object.Blob) returns (payload.v1.Object.Locations) {} +} diff --git a/apis/proto/v1/vald/insert.proto b/apis/proto/v1/vald/insert.proto new file mode 100644 index 0000000000..bbbd0af669 --- /dev/null +++ b/apis/proto/v1/vald/insert.proto @@ -0,0 +1,42 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package vald.v1; + +option go_package = "github.com/vdaas/vald/apis/grpc/v1/vald"; +option java_multiple_files = true; +option java_package = "org.vdaas.vald.api.v1.vald"; +option java_outer_classname = "ValdInsert"; + +import "apis/proto/v1/payload/payload.proto"; +import "google/api/annotations.proto"; + + +service Insert { + + rpc Insert(payload.v1.Insert.Request) returns (payload.v1.Object.Location) { + option (google.api.http) = { + post : "/insert" + body : "*" + }; + + } + rpc StreamInsert(stream payload.v1.Insert.Request) + returns (stream payload.v1.Object.Location) {} + rpc MultiInsert(payload.v1.Insert.MultiRequest) returns (payload.v1.Object.Locations) {} +} diff --git a/apis/proto/v1/vald/object.proto b/apis/proto/v1/vald/object.proto new file mode 100644 index 0000000000..67ea5c2878 --- /dev/null +++ b/apis/proto/v1/vald/object.proto @@ -0,0 +1,40 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package vald.v1; + +option go_package = "github.com/vdaas/vald/apis/grpc/v1/vald"; +option java_multiple_files = true; +option java_package = "org.vdaas.vald.api.v1.vald"; +option java_outer_classname = "ValdObject"; + +import "apis/proto/v1/payload/payload.proto"; +import "google/api/annotations.proto"; + + +service Object { + + rpc Exists(payload.v1.Object.ID) returns (payload.v1.Object.ID) { + option (google.api.http).get = "/exists/{id}"; + } + rpc GetObject(payload.v1.Object.ID) returns (payload.v1.Object.Vector) { + option (google.api.http).get = "/object/{id}"; + } + rpc StreamGetObject(stream payload.v1.Object.ID) + returns (stream payload.v1.Object.Vector) {} +} diff --git a/apis/proto/v1/vald/remove.proto b/apis/proto/v1/vald/remove.proto new file mode 100644 index 0000000000..7cac732333 --- /dev/null +++ b/apis/proto/v1/vald/remove.proto @@ -0,0 +1,35 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package vald.v1; + +option go_package = "github.com/vdaas/vald/apis/grpc/v1/vald"; +option java_multiple_files = true; +option java_package = "org.vdaas.vald.api.v1.vald"; +option java_outer_classname = "ValdRemove"; + +import "apis/proto/v1/payload/payload.proto"; +import "google/api/annotations.proto"; + + +service Remove { + + rpc Remove(payload.v1.Remove.Request) returns (payload.v1.Object.Location) {} + rpc StreamRemove(stream payload.v1.Remove.Request) returns (stream payload.v1.Object.Location) {} + rpc MultiRemove(payload.v1.Remove.MultiRequest) returns (payload.v1.Object.Locations) {} +} diff --git a/apis/proto/v1/vald/search.proto b/apis/proto/v1/vald/search.proto new file mode 100644 index 0000000000..9de505a340 --- /dev/null +++ b/apis/proto/v1/vald/search.proto @@ -0,0 +1,53 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package vald.v1; + +option go_package = "github.com/vdaas/vald/apis/grpc/v1/vald"; +option java_multiple_files = true; +option java_package = "org.vdaas.vald.api.v1.vald"; +option java_outer_classname = "ValdSearch"; + +import "apis/proto/v1/payload/payload.proto"; +import "google/api/annotations.proto"; + + +service Search { + + rpc Search(payload.v1.Search.Request) returns (payload.v1.Search.Response) { + option (google.api.http) = { + post : "/search" + body : "*" + }; + } + rpc SearchByID(payload.v1.Search.IDRequest) returns (payload.v1.Search.Response) { + option (google.api.http) = { + post : "/search/id" + body : "*" + }; + } + rpc StreamSearch(stream payload.v1.Search.Request) + returns (stream payload.v1.Search.Response) {} + rpc StreamSearchByID(stream payload.v1.Search.IDRequest) + returns (stream payload.v1.Search.Response) {} + rpc MultiSearch(payload.v1.Search.MultiRequest) + returns (payload.v1.Search.Responses) {} + rpc MultiSearchByID(payload.v1.Search.MultiIDRequest) + returns (payload.v1.Search.Responses) {} + +} diff --git a/apis/proto/v1/vald/update.proto b/apis/proto/v1/vald/update.proto new file mode 100644 index 0000000000..6ec04090bd --- /dev/null +++ b/apis/proto/v1/vald/update.proto @@ -0,0 +1,42 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package vald.v1; + +option go_package = "github.com/vdaas/vald/apis/grpc/v1/vald"; +option java_multiple_files = true; +option java_package = "org.vdaas.vald.api.v1.vald"; +option java_outer_classname = "ValdUpdate"; + +import "apis/proto/v1/payload/payload.proto"; +import "google/api/annotations.proto"; + + +service Update { + + rpc Update(payload.v1.Update.Request) returns (payload.v1.Object.Location) { + option (google.api.http) = { + post : "/update" + body : "*" + }; + + } + rpc StreamUpdate(stream payload.v1.Update.Request) + returns (stream payload.v1.Object.Location) {} + rpc MultiUpdate(payload.v1.Update.MultiRequest) returns (payload.v1.Object.Locations) {} +} diff --git a/apis/proto/v1/vald/upsert.proto b/apis/proto/v1/vald/upsert.proto new file mode 100644 index 0000000000..42d50fc526 --- /dev/null +++ b/apis/proto/v1/vald/upsert.proto @@ -0,0 +1,42 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package vald.v1; + +option go_package = "github.com/vdaas/vald/apis/grpc/v1/vald"; +option java_multiple_files = true; +option java_package = "org.vdaas.vald.api.v1.vald"; +option java_outer_classname = "ValdUpsert"; + +import "apis/proto/v1/payload/payload.proto"; +import "google/api/annotations.proto"; + + +service Upsert { + + rpc Upsert(payload.v1.Upsert.Request) returns (payload.v1.Object.Location) { + option (google.api.http) = { + post : "/upsert" + body : "*" + }; + + } + rpc StreamUpsert(stream payload.v1.Upsert.Request) + returns (stream payload.v1.Object.Location) {} + rpc MultiUpsert(payload.v1.Upsert.MultiRequest) returns (payload.v1.Object.Locations) {} +} diff --git a/apis/swagger/filter/egress/egress/egress_filter.swagger.json b/apis/swagger/filter/egress/apis/proto/filter/egress/egress_filter.swagger.json similarity index 93% rename from apis/swagger/filter/egress/egress/egress_filter.swagger.json rename to apis/swagger/filter/egress/apis/proto/filter/egress/egress_filter.swagger.json index 4874a4bf17..0750236a94 100644 --- a/apis/swagger/filter/egress/egress/egress_filter.swagger.json +++ b/apis/swagger/filter/egress/apis/proto/filter/egress/egress_filter.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "egress/egress_filter.proto", + "title": "apis/proto/filter/egress/egress_filter.proto", "version": "version not set" }, "consumes": [ @@ -27,6 +27,9 @@ "SearchResponse": { "type": "object", "properties": { + "requestId": { + "type": "string" + }, "results": { "type": "array", "items": { diff --git a/apis/swagger/gateway/filter/apis/proto/gateway/filter/filter.swagger.json b/apis/swagger/gateway/filter/apis/proto/gateway/filter/filter.swagger.json new file mode 100644 index 0000000000..af12d7870b --- /dev/null +++ b/apis/swagger/gateway/filter/apis/proto/gateway/filter/filter.swagger.json @@ -0,0 +1,305 @@ +{ + "swagger": "2.0", + "info": { + "title": "apis/proto/gateway/filter/filter.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/insert/object": { + "post": { + "operationId": "Filter_InsertObject", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/payloadObjectLocation" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ObjectBlob" + } + } + ], + "tags": [ + "Filter" + ] + } + }, + "/search/object": { + "post": { + "operationId": "Filter_SearchObject", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/SearchResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SearchObjectRequest" + } + } + ], + "tags": [ + "Filter" + ] + } + }, + "/update/object": { + "post": { + "operationId": "Filter_UpdateObject", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/payloadObjectLocation" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ObjectBlob" + } + } + ], + "tags": [ + "Filter" + ] + } + }, + "/upsert/object": { + "post": { + "operationId": "Filter_UpsertObject", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/payloadObjectLocation" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ObjectBlob" + } + } + ], + "tags": [ + "Filter" + ] + } + } + }, + "definitions": { + "ObjectBlob": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "object": { + "type": "string", + "format": "byte" + } + } + }, + "ObjectDistance": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "distance": { + "type": "number", + "format": "float" + } + } + }, + "SearchConfig": { + "type": "object", + "properties": { + "requestId": { + "type": "string" + }, + "num": { + "type": "integer", + "format": "int64" + }, + "radius": { + "type": "number", + "format": "float" + }, + "epsilon": { + "type": "number", + "format": "float" + }, + "timeout": { + "type": "string", + "format": "int64" + } + } + }, + "SearchObjectRequest": { + "type": "object", + "properties": { + "object": { + "type": "string", + "format": "byte" + }, + "config": { + "$ref": "#/definitions/SearchConfig" + } + } + }, + "SearchResponse": { + "type": "object", + "properties": { + "requestId": { + "type": "string" + }, + "results": { + "type": "array", + "items": { + "$ref": "#/definitions/ObjectDistance" + } + } + } + }, + "payloadObjectLocation": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "uuid": { + "type": "string" + }, + "ips": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "payloadObjectLocations": { + "type": "object", + "properties": { + "locations": { + "type": "array", + "items": { + "$ref": "#/definitions/payloadObjectLocation" + } + } + } + }, + "protobufAny": { + "type": "object", + "properties": { + "typeUrl": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + }, + "runtimeError": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpcCode": { + "type": "integer", + "format": "int32" + }, + "httpCode": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "httpStatus": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + } + } +} diff --git a/apis/swagger/gateway/vald/vald/vald.swagger.json b/apis/swagger/gateway/vald/apis/proto/gateway/vald/vald.swagger.json similarity index 93% rename from apis/swagger/gateway/vald/vald/vald.swagger.json rename to apis/swagger/gateway/vald/apis/proto/gateway/vald/vald.swagger.json index 14f2bfd2a8..d3aecc04ea 100644 --- a/apis/swagger/gateway/vald/vald/vald.swagger.json +++ b/apis/swagger/gateway/vald/apis/proto/gateway/vald/vald.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "vald/vald.proto", + "title": "apis/proto/gateway/vald/vald.proto", "version": "version not set" }, "consumes": [ @@ -48,7 +48,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/payloadObjectLocation" } }, "default": { @@ -80,7 +80,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadBackupMetaVector" + "$ref": "#/definitions/ObjectVector" } }, "default": { @@ -110,7 +110,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/payloadObjectLocation" } }, "default": { @@ -204,7 +204,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/payloadObjectLocation" } }, "default": { @@ -236,7 +236,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/payloadObjectLocation" } }, "default": { @@ -301,6 +301,9 @@ "SearchConfig": { "type": "object", "properties": { + "requestId": { + "type": "string" + }, "num": { "type": "integer", "format": "int64" @@ -333,6 +336,9 @@ "SearchResponse": { "type": "object", "properties": { + "requestId": { + "type": "string" + }, "results": { "type": "array", "items": { @@ -341,22 +347,15 @@ } } }, - "payloadBackupMetaVector": { + "payloadObjectLocation": { "type": "object", "properties": { - "uuid": { + "name": { "type": "string" }, - "meta": { + "uuid": { "type": "string" }, - "vector": { - "type": "array", - "items": { - "type": "number", - "format": "float" - } - }, "ips": { "type": "array", "items": { @@ -365,8 +364,16 @@ } } }, - "payloadEmpty": { - "type": "object" + "payloadObjectLocations": { + "type": "object", + "properties": { + "locations": { + "type": "array", + "items": { + "$ref": "#/definitions/payloadObjectLocation" + } + } + } }, "payloadSearchRequest": { "type": "object", diff --git a/apis/swagger/payload/payload.swagger.json b/apis/swagger/payload/apis/proto/payload/payload.swagger.json similarity index 94% rename from apis/swagger/payload/payload.swagger.json rename to apis/swagger/payload/apis/proto/payload/payload.swagger.json index e10d72564a..e7a62f2ab5 100644 --- a/apis/swagger/payload/payload.swagger.json +++ b/apis/swagger/payload/apis/proto/payload/payload.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "payload.proto", + "title": "apis/proto/payload/payload.proto", "version": "version not set" }, "consumes": [ diff --git a/apis/swagger/v1/agent/core/apis/proto/v1/agent/core/agent.swagger.json b/apis/swagger/v1/agent/core/apis/proto/v1/agent/core/agent.swagger.json new file mode 100644 index 0000000000..3da1935f69 --- /dev/null +++ b/apis/swagger/v1/agent/core/apis/proto/v1/agent/core/agent.swagger.json @@ -0,0 +1,175 @@ +{ + "swagger": "2.0", + "info": { + "title": "apis/proto/v1/agent/core/agent.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/index/create": { + "get": { + "operationId": "Agent_CreateIndex", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1Empty" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "poolSize", + "in": "query", + "required": false, + "type": "integer", + "format": "int64" + } + ], + "tags": [ + "Agent" + ] + } + }, + "/index/createandsave": { + "get": { + "operationId": "Agent_CreateAndSaveIndex", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1Empty" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "poolSize", + "in": "query", + "required": false, + "type": "integer", + "format": "int64" + } + ], + "tags": [ + "Agent" + ] + } + }, + "/index/info": { + "get": { + "operationId": "Agent_IndexInfo", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/IndexCount" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "tags": [ + "Agent" + ] + } + }, + "/index/save": { + "get": { + "operationId": "Agent_SaveIndex", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1Empty" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "tags": [ + "Agent" + ] + } + } + }, + "definitions": { + "IndexCount": { + "type": "object", + "properties": { + "stored": { + "type": "integer", + "format": "int64" + }, + "uncommitted": { + "type": "integer", + "format": "int64" + }, + "indexing": { + "type": "boolean" + } + } + }, + "protobufAny": { + "type": "object", + "properties": { + "typeUrl": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + }, + "runtimeError": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "v1Empty": { + "type": "object" + } + } +} diff --git a/apis/swagger/agent/sidecar/sidecar/sidecar.swagger.json b/apis/swagger/v1/agent/sidecar/apis/proto/v1/agent/sidecar/sidecar.swagger.json similarity index 93% rename from apis/swagger/agent/sidecar/sidecar/sidecar.swagger.json rename to apis/swagger/v1/agent/sidecar/apis/proto/v1/agent/sidecar/sidecar.swagger.json index a4e536e2ba..5b167cef79 100644 --- a/apis/swagger/agent/sidecar/sidecar/sidecar.swagger.json +++ b/apis/swagger/v1/agent/sidecar/apis/proto/v1/agent/sidecar/sidecar.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "sidecar/sidecar.proto", + "title": "apis/proto/v1/agent/sidecar/sidecar.proto", "version": "version not set" }, "consumes": [ diff --git a/apis/swagger/discoverer/discoverer.swagger.json b/apis/swagger/v1/discoverer/apis/proto/v1/discoverer/discoverer.swagger.json similarity index 95% rename from apis/swagger/discoverer/discoverer.swagger.json rename to apis/swagger/v1/discoverer/apis/proto/v1/discoverer/discoverer.swagger.json index c2739462a1..f8d303fc2f 100644 --- a/apis/swagger/discoverer/discoverer.swagger.json +++ b/apis/swagger/v1/discoverer/apis/proto/v1/discoverer/discoverer.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "discoverer.proto", + "title": "apis/proto/v1/discoverer/discoverer.proto", "version": "version not set" }, "consumes": [ @@ -34,7 +34,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/payloadDiscovererRequest" + "$ref": "#/definitions/v1DiscovererRequest" } } ], @@ -66,7 +66,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/payloadDiscovererRequest" + "$ref": "#/definitions/v1DiscovererRequest" } } ], @@ -182,20 +182,6 @@ } } }, - "payloadDiscovererRequest": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "node": { - "type": "string" - } - } - }, "protobufAny": { "type": "object", "properties": { @@ -228,6 +214,20 @@ } } } + }, + "v1DiscovererRequest": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "node": { + "type": "string" + } + } } } } diff --git a/apis/swagger/errors/errors.swagger.json b/apis/swagger/v1/errors/apis/proto/v1/errors/errors.swagger.json similarity index 94% rename from apis/swagger/errors/errors.swagger.json rename to apis/swagger/v1/errors/apis/proto/v1/errors/errors.swagger.json index 4dfff99df8..a4f1e1ec5a 100644 --- a/apis/swagger/errors/errors.swagger.json +++ b/apis/swagger/v1/errors/apis/proto/v1/errors/errors.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "errors.proto", + "title": "apis/proto/v1/errors/errors.proto", "version": "version not set" }, "consumes": [ diff --git a/apis/swagger/filter/ingress/ingress/ingress_filter.swagger.json b/apis/swagger/v1/filter/egress/apis/proto/v1/filter/egress/egress_filter.swagger.json similarity index 50% rename from apis/swagger/filter/ingress/ingress/ingress_filter.swagger.json rename to apis/swagger/v1/filter/egress/apis/proto/v1/filter/egress/egress_filter.swagger.json index 0f7da1b406..1cf8c542e1 100644 --- a/apis/swagger/filter/ingress/ingress/ingress_filter.swagger.json +++ b/apis/swagger/v1/filter/egress/apis/proto/v1/filter/egress/egress_filter.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "ingress/ingress_filter.proto", + "title": "apis/proto/v1/filter/egress/egress_filter.proto", "version": "version not set" }, "consumes": [ @@ -12,6 +12,18 @@ ], "paths": {}, "definitions": { + "ObjectDistance": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "distance": { + "type": "number", + "format": "float" + } + } + }, "protobufAny": { "type": "object", "properties": { @@ -44,6 +56,31 @@ } } } + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpcCode": { + "type": "integer", + "format": "int32" + }, + "httpCode": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "httpStatus": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } } } } diff --git a/apis/swagger/v1/filter/ingress/apis/proto/v1/filter/ingress/ingress_filter.swagger.json b/apis/swagger/v1/filter/ingress/apis/proto/v1/filter/ingress/ingress_filter.swagger.json new file mode 100644 index 0000000000..d9d1c0c9ce --- /dev/null +++ b/apis/swagger/v1/filter/ingress/apis/proto/v1/filter/ingress/ingress_filter.swagger.json @@ -0,0 +1,166 @@ +{ + "swagger": "2.0", + "info": { + "title": "apis/proto/v1/filter/ingress/ingress_filter.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/object": { + "post": { + "operationId": "IngressFilter_GenVector", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1ObjectVector" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ObjectBlob" + } + } + ], + "tags": [ + "IngressFilter" + ] + } + }, + "/vector": { + "post": { + "operationId": "IngressFilter_FilterVector", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1ObjectVector" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1ObjectVector" + } + } + ], + "tags": [ + "IngressFilter" + ] + } + } + }, + "definitions": { + "ObjectBlob": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "object": { + "type": "string", + "format": "byte" + } + } + }, + "protobufAny": { + "type": "object", + "properties": { + "typeUrl": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + }, + "runtimeError": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpcCode": { + "type": "integer", + "format": "int32" + }, + "httpCode": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "httpStatus": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "v1ObjectVector": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "vector": { + "type": "array", + "items": { + "type": "number", + "format": "float" + } + } + } + } + } +} diff --git a/apis/swagger/agent/core/core/agent.swagger.json b/apis/swagger/v1/gateway/vald/apis/proto/v1/gateway/vald/vald.swagger.json similarity index 71% rename from apis/swagger/agent/core/core/agent.swagger.json rename to apis/swagger/v1/gateway/vald/apis/proto/v1/gateway/vald/vald.swagger.json index 6c29cff38e..ef37e00d09 100644 --- a/apis/swagger/agent/core/core/agent.swagger.json +++ b/apis/swagger/v1/gateway/vald/apis/proto/v1/gateway/vald/vald.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "core/agent.proto", + "title": "apis/proto/v1/gateway/vald/vald.proto", "version": "version not set" }, "consumes": [ @@ -13,7 +13,7 @@ "paths": { "/exists/{id}": { "get": { - "operationId": "Agent_Exists", + "operationId": "Vald_Exists", "responses": { "200": { "description": "A successful response.", @@ -37,18 +37,18 @@ } ], "tags": [ - "Agent" + "Vald" ] } }, - "/index/create": { - "get": { - "operationId": "Agent_CreateIndex", + "/insert": { + "post": { + "operationId": "Vald_Insert", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1ObjectLocation" } }, "default": { @@ -60,101 +60,27 @@ }, "parameters": [ { - "name": "poolSize", - "in": "query", - "required": false, - "type": "integer", - "format": "int64" - } - ], - "tags": [ - "Agent" - ] - } - }, - "/index/createandsave": { - "get": { - "operationId": "Agent_CreateAndSaveIndex", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/payloadEmpty" - } - }, - "default": { - "description": "An unexpected error response.", + "name": "body", + "in": "body", + "required": true, "schema": { - "$ref": "#/definitions/runtimeError" + "$ref": "#/definitions/v1ObjectVector" } } - }, - "parameters": [ - { - "name": "poolSize", - "in": "query", - "required": false, - "type": "integer", - "format": "int64" - } ], "tags": [ - "Agent" + "Vald" ] } }, - "/index/info": { - "get": { - "operationId": "Agent_IndexInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/IndexCount" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Agent" - ] - } - }, - "/index/save": { + "/object/{id}": { "get": { - "operationId": "Agent_SaveIndex", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/payloadEmpty" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "tags": [ - "Agent" - ] - } - }, - "/insert": { - "post": { - "operationId": "Agent_Insert", + "operationId": "Vald_GetObject", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1ObjectVector" } }, "default": { @@ -166,27 +92,25 @@ }, "parameters": [ { - "name": "body", - "in": "body", + "name": "id", + "in": "path", "required": true, - "schema": { - "$ref": "#/definitions/ObjectVector" - } + "type": "string" } ], "tags": [ - "Agent" + "Vald" ] } }, - "/object/{id}": { - "get": { - "operationId": "Agent_GetObject", + "/remove/{id}": { + "delete": { + "operationId": "Vald_Remove", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/ObjectVector" + "$ref": "#/definitions/v1ObjectLocation" } }, "default": { @@ -205,18 +129,18 @@ } ], "tags": [ - "Agent" + "Vald" ] } }, - "/remove/{id}": { - "delete": { - "operationId": "Agent_Remove", + "/search": { + "post": { + "operationId": "Vald_Search", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/SearchResponse" } }, "default": { @@ -228,20 +152,22 @@ }, "parameters": [ { - "name": "id", - "in": "path", + "name": "body", + "in": "body", "required": true, - "type": "string" + "schema": { + "$ref": "#/definitions/v1SearchRequest" + } } ], "tags": [ - "Agent" + "Vald" ] } }, - "/search": { + "/search/id": { "post": { - "operationId": "Agent_Search", + "operationId": "Vald_SearchByID", "responses": { "200": { "description": "A successful response.", @@ -262,23 +188,23 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/payloadSearchRequest" + "$ref": "#/definitions/SearchIDRequest" } } ], "tags": [ - "Agent" + "Vald" ] } }, - "/search/id": { + "/update": { "post": { - "operationId": "Agent_SearchByID", + "operationId": "Vald_Update", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/SearchResponse" + "$ref": "#/definitions/v1ObjectLocation" } }, "default": { @@ -294,23 +220,23 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/SearchIDRequest" + "$ref": "#/definitions/v1ObjectVector" } } ], "tags": [ - "Agent" + "Vald" ] } }, - "/update": { + "/upsert": { "post": { - "operationId": "Agent_Update", + "operationId": "Vald_Upsert", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1ObjectLocation" } }, "default": { @@ -326,33 +252,17 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/ObjectVector" + "$ref": "#/definitions/v1ObjectVector" } } ], "tags": [ - "Agent" + "Vald" ] } } }, "definitions": { - "IndexCount": { - "type": "object", - "properties": { - "stored": { - "type": "integer", - "format": "int64" - }, - "uncommitted": { - "type": "integer", - "format": "int64" - }, - "indexing": { - "type": "boolean" - } - } - }, "ObjectDistance": { "type": "object", "properties": { @@ -373,42 +283,6 @@ } } }, - "ObjectVector": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "vector": { - "type": "array", - "items": { - "type": "number", - "format": "float" - } - } - } - }, - "SearchConfig": { - "type": "object", - "properties": { - "num": { - "type": "integer", - "format": "int64" - }, - "radius": { - "type": "number", - "format": "float" - }, - "epsilon": { - "type": "number", - "format": "float" - }, - "timeout": { - "type": "string", - "format": "int64" - } - } - }, "SearchIDRequest": { "type": "object", "properties": { @@ -416,13 +290,16 @@ "type": "string" }, "config": { - "$ref": "#/definitions/SearchConfig" + "$ref": "#/definitions/v1SearchConfig" } } }, "SearchResponse": { "type": "object", "properties": { + "requestId": { + "type": "string" + }, "results": { "type": "array", "items": { @@ -431,24 +308,6 @@ } } }, - "payloadEmpty": { - "type": "object" - }, - "payloadSearchRequest": { - "type": "object", - "properties": { - "vector": { - "type": "array", - "items": { - "type": "number", - "format": "float" - } - }, - "config": { - "$ref": "#/definitions/SearchConfig" - } - } - }, "protobufAny": { "type": "object", "properties": { @@ -506,6 +365,102 @@ } } } + }, + "v1FilterConfig": { + "type": "object", + "properties": { + "targets": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1ObjectLocation": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "uuid": { + "type": "string" + }, + "ips": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1ObjectLocations": { + "type": "object", + "properties": { + "locations": { + "type": "array", + "items": { + "$ref": "#/definitions/v1ObjectLocation" + } + } + } + }, + "v1ObjectVector": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "vector": { + "type": "array", + "items": { + "type": "number", + "format": "float" + } + } + } + }, + "v1SearchConfig": { + "type": "object", + "properties": { + "requestId": { + "type": "string" + }, + "num": { + "type": "integer", + "format": "int64" + }, + "radius": { + "type": "number", + "format": "float" + }, + "epsilon": { + "type": "number", + "format": "float" + }, + "timeout": { + "type": "string", + "format": "int64" + }, + "filters": { + "$ref": "#/definitions/v1FilterConfig" + } + } + }, + "v1SearchRequest": { + "type": "object", + "properties": { + "vector": { + "type": "array", + "items": { + "type": "number", + "format": "float" + } + }, + "config": { + "$ref": "#/definitions/v1SearchConfig" + } + } } } } diff --git a/apis/swagger/manager/backup/backup/backup_manager.swagger.json b/apis/swagger/v1/manager/backup/apis/proto/v1/manager/backup/backup_manager.swagger.json similarity index 91% rename from apis/swagger/manager/backup/backup/backup_manager.swagger.json rename to apis/swagger/v1/manager/backup/apis/proto/v1/manager/backup/backup_manager.swagger.json index f9e333e136..12b8f94153 100644 --- a/apis/swagger/manager/backup/backup/backup_manager.swagger.json +++ b/apis/swagger/v1/manager/backup/apis/proto/v1/manager/backup/backup_manager.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "backup/backup_manager.proto", + "title": "apis/proto/v1/manager/backup/backup_manager.proto", "version": "version not set" }, "consumes": [ @@ -18,7 +18,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -50,7 +50,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -80,7 +80,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -112,7 +112,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -174,7 +174,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -190,7 +190,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/BackupCompressedMetaVector" + "$ref": "#/definitions/BackupCompressedVector" } } ], @@ -206,7 +206,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -222,7 +222,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/BackupCompressedMetaVectors" + "$ref": "#/definitions/BackupCompressedVectors" } } ], @@ -238,7 +238,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/BackupCompressedMetaVector" + "$ref": "#/definitions/BackupCompressedVector" } }, "default": { @@ -263,15 +263,12 @@ } }, "definitions": { - "BackupCompressedMetaVector": { + "BackupCompressedVector": { "type": "object", "properties": { "uuid": { "type": "string" }, - "meta": { - "type": "string" - }, "vector": { "type": "string", "format": "byte" @@ -284,13 +281,13 @@ } } }, - "BackupCompressedMetaVectors": { + "BackupCompressedVectors": { "type": "object", "properties": { "vectors": { "type": "array", "items": { - "$ref": "#/definitions/BackupCompressedMetaVector" + "$ref": "#/definitions/BackupCompressedVector" } } } @@ -342,9 +339,6 @@ } } }, - "payloadEmpty": { - "type": "object" - }, "protobufAny": { "type": "object", "properties": { @@ -377,6 +371,9 @@ } } } + }, + "v1Empty": { + "type": "object" } } } diff --git a/apis/swagger/manager/compressor/compressor/compressor.swagger.json b/apis/swagger/v1/manager/compressor/apis/proto/v1/manager/compressor/compressor.swagger.json similarity index 91% rename from apis/swagger/manager/compressor/compressor/compressor.swagger.json rename to apis/swagger/v1/manager/compressor/apis/proto/v1/manager/compressor/compressor.swagger.json index 002e9f2261..17f769c461 100644 --- a/apis/swagger/manager/compressor/compressor/compressor.swagger.json +++ b/apis/swagger/v1/manager/compressor/apis/proto/v1/manager/compressor/compressor.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "compressor/compressor.proto", + "title": "apis/proto/v1/manager/compressor/compressor.proto", "version": "version not set" }, "consumes": [ @@ -18,7 +18,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -50,7 +50,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -80,7 +80,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -112,7 +112,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -174,7 +174,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -190,7 +190,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/payloadBackupMetaVector" + "$ref": "#/definitions/v1BackupVector" } } ], @@ -206,7 +206,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -222,7 +222,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/payloadBackupMetaVectors" + "$ref": "#/definitions/v1BackupVectors" } } ], @@ -238,7 +238,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadBackupMetaVector" + "$ref": "#/definitions/v1BackupVector" } }, "default": { @@ -310,44 +310,6 @@ } } }, - "payloadBackupMetaVector": { - "type": "object", - "properties": { - "uuid": { - "type": "string" - }, - "meta": { - "type": "string" - }, - "vector": { - "type": "array", - "items": { - "type": "number", - "format": "float" - } - }, - "ips": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "payloadBackupMetaVectors": { - "type": "object", - "properties": { - "vectors": { - "type": "array", - "items": { - "$ref": "#/definitions/payloadBackupMetaVector" - } - } - } - }, - "payloadEmpty": { - "type": "object" - }, "protobufAny": { "type": "object", "properties": { @@ -380,6 +342,41 @@ } } } + }, + "v1BackupVector": { + "type": "object", + "properties": { + "uuid": { + "type": "string" + }, + "vector": { + "type": "array", + "items": { + "type": "number", + "format": "float" + } + }, + "ips": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1BackupVectors": { + "type": "object", + "properties": { + "vectors": { + "type": "array", + "items": { + "$ref": "#/definitions/v1BackupVector" + } + } + } + }, + "v1Empty": { + "type": "object" } } } diff --git a/apis/swagger/manager/index/index/index_manager.swagger.json b/apis/swagger/v1/manager/index/apis/proto/v1/manager/index/index_manager.swagger.json similarity index 96% rename from apis/swagger/manager/index/index/index_manager.swagger.json rename to apis/swagger/v1/manager/index/apis/proto/v1/manager/index/index_manager.swagger.json index c78463d97d..cfe89f3008 100644 --- a/apis/swagger/manager/index/index/index_manager.swagger.json +++ b/apis/swagger/v1/manager/index/apis/proto/v1/manager/index/index_manager.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "index/index_manager.proto", + "title": "apis/proto/v1/manager/index/index_manager.proto", "version": "version not set" }, "consumes": [ diff --git a/apis/swagger/manager/replication/agent/replication/agent/replication_manager.swagger.json b/apis/swagger/v1/manager/replication/agent/apis/proto/v1/manager/replication/agent/replication_manager.swagger.json similarity index 93% rename from apis/swagger/manager/replication/agent/replication/agent/replication_manager.swagger.json rename to apis/swagger/v1/manager/replication/agent/apis/proto/v1/manager/replication/agent/replication_manager.swagger.json index b22d06119f..924585a35c 100644 --- a/apis/swagger/manager/replication/agent/replication/agent/replication_manager.swagger.json +++ b/apis/swagger/v1/manager/replication/agent/apis/proto/v1/manager/replication/agent/replication_manager.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "replication/agent/replication_manager.proto", + "title": "apis/proto/v1/manager/replication/agent/replication_manager.proto", "version": "version not set" }, "consumes": [ @@ -40,7 +40,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -62,7 +62,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -102,9 +102,6 @@ } } }, - "payloadEmpty": { - "type": "object" - }, "protobufAny": { "type": "object", "properties": { @@ -137,6 +134,9 @@ } } } + }, + "v1Empty": { + "type": "object" } } } diff --git a/apis/swagger/manager/replication/controller/replication/controller/replication_manager.swagger.json b/apis/swagger/v1/manager/replication/controller/apis/proto/v1/manager/replication/controller/replication_manager.swagger.json similarity index 95% rename from apis/swagger/manager/replication/controller/replication/controller/replication_manager.swagger.json rename to apis/swagger/v1/manager/replication/controller/apis/proto/v1/manager/replication/controller/replication_manager.swagger.json index 3e449e8318..d6ac8609bb 100644 --- a/apis/swagger/manager/replication/controller/replication/controller/replication_manager.swagger.json +++ b/apis/swagger/v1/manager/replication/controller/apis/proto/v1/manager/replication/controller/replication_manager.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "replication/controller/replication_manager.proto", + "title": "apis/proto/v1/manager/replication/controller/replication_manager.proto", "version": "version not set" }, "consumes": [ diff --git a/apis/swagger/meta/meta.swagger.json b/apis/swagger/v1/meta/apis/proto/v1/meta/meta.swagger.json similarity index 97% rename from apis/swagger/meta/meta.swagger.json rename to apis/swagger/v1/meta/apis/proto/v1/meta/meta.swagger.json index 75f99495a6..a251f33f1d 100644 --- a/apis/swagger/meta/meta.swagger.json +++ b/apis/swagger/v1/meta/apis/proto/v1/meta/meta.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "meta.proto", + "title": "apis/proto/v1/meta/meta.proto", "version": "version not set" }, "consumes": [ @@ -150,7 +150,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -204,7 +204,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/payloadEmpty" + "$ref": "#/definitions/v1Empty" } }, "default": { @@ -270,9 +270,6 @@ } } }, - "payloadEmpty": { - "type": "object" - }, "protobufAny": { "type": "object", "properties": { @@ -305,6 +302,9 @@ } } } + }, + "v1Empty": { + "type": "object" } } } diff --git a/apis/swagger/manager/traffic/traffic/traffic_manager.swagger.json b/apis/swagger/v1/payload/apis/proto/v1/payload/payload.swagger.json similarity index 94% rename from apis/swagger/manager/traffic/traffic/traffic_manager.swagger.json rename to apis/swagger/v1/payload/apis/proto/v1/payload/payload.swagger.json index de6473e8e5..2917389ff3 100644 --- a/apis/swagger/manager/traffic/traffic/traffic_manager.swagger.json +++ b/apis/swagger/v1/payload/apis/proto/v1/payload/payload.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "traffic/traffic_manager.proto", + "title": "apis/proto/v1/payload/payload.proto", "version": "version not set" }, "consumes": [ diff --git a/apis/swagger/v1/vald/apis/proto/v1/vald/filter.swagger.json b/apis/swagger/v1/vald/apis/proto/v1/vald/filter.swagger.json new file mode 100644 index 0000000000..3c25d08fbe --- /dev/null +++ b/apis/swagger/v1/vald/apis/proto/v1/vald/filter.swagger.json @@ -0,0 +1,319 @@ +{ + "swagger": "2.0", + "info": { + "title": "apis/proto/v1/vald/filter.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/insert/object": { + "post": { + "operationId": "Filter_InsertObject", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1ObjectLocation" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ObjectBlob" + } + } + ], + "tags": [ + "Filter" + ] + } + }, + "/search/object": { + "post": { + "operationId": "Filter_SearchObject", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/SearchResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SearchObjectRequest" + } + } + ], + "tags": [ + "Filter" + ] + } + }, + "/update/object": { + "post": { + "operationId": "Filter_UpdateObject", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1ObjectLocation" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ObjectBlob" + } + } + ], + "tags": [ + "Filter" + ] + } + }, + "/upsert/object": { + "post": { + "operationId": "Filter_UpsertObject", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1ObjectLocation" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ObjectBlob" + } + } + ], + "tags": [ + "Filter" + ] + } + } + }, + "definitions": { + "ObjectBlob": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "object": { + "type": "string", + "format": "byte" + } + } + }, + "ObjectDistance": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "distance": { + "type": "number", + "format": "float" + } + } + }, + "SearchObjectRequest": { + "type": "object", + "properties": { + "object": { + "type": "string", + "format": "byte" + }, + "config": { + "$ref": "#/definitions/v1SearchConfig" + } + } + }, + "SearchResponse": { + "type": "object", + "properties": { + "requestId": { + "type": "string" + }, + "results": { + "type": "array", + "items": { + "$ref": "#/definitions/ObjectDistance" + } + } + } + }, + "protobufAny": { + "type": "object", + "properties": { + "typeUrl": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + }, + "runtimeError": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpcCode": { + "type": "integer", + "format": "int32" + }, + "httpCode": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "httpStatus": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "v1FilterConfig": { + "type": "object", + "properties": { + "targets": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1ObjectLocation": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "uuid": { + "type": "string" + }, + "ips": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1ObjectLocations": { + "type": "object", + "properties": { + "locations": { + "type": "array", + "items": { + "$ref": "#/definitions/v1ObjectLocation" + } + } + } + }, + "v1SearchConfig": { + "type": "object", + "properties": { + "requestId": { + "type": "string" + }, + "num": { + "type": "integer", + "format": "int64" + }, + "radius": { + "type": "number", + "format": "float" + }, + "epsilon": { + "type": "number", + "format": "float" + }, + "timeout": { + "type": "string", + "format": "int64" + }, + "filters": { + "$ref": "#/definitions/v1FilterConfig" + } + } + } + } +} diff --git a/apis/swagger/v1/vald/apis/proto/v1/vald/insert.swagger.json b/apis/swagger/v1/vald/apis/proto/v1/vald/insert.swagger.json new file mode 100644 index 0000000000..55fb42a592 --- /dev/null +++ b/apis/swagger/v1/vald/apis/proto/v1/vald/insert.swagger.json @@ -0,0 +1,183 @@ +{ + "swagger": "2.0", + "info": { + "title": "apis/proto/v1/vald/insert.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/insert": { + "post": { + "operationId": "Insert_Insert", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1ObjectLocation" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1InsertRequest" + } + } + ], + "tags": [ + "Insert" + ] + } + } + }, + "definitions": { + "protobufAny": { + "type": "object", + "properties": { + "typeUrl": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + }, + "runtimeError": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpcCode": { + "type": "integer", + "format": "int32" + }, + "httpCode": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "httpStatus": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "v1FilterConfig": { + "type": "object", + "properties": { + "targets": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1InsertConfig": { + "type": "object", + "properties": { + "skipStrictExistCheck": { + "type": "boolean" + }, + "filters": { + "$ref": "#/definitions/v1FilterConfig" + } + } + }, + "v1InsertRequest": { + "type": "object", + "properties": { + "vector": { + "$ref": "#/definitions/v1ObjectVector" + }, + "config": { + "$ref": "#/definitions/v1InsertConfig" + } + } + }, + "v1ObjectLocation": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "uuid": { + "type": "string" + }, + "ips": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1ObjectLocations": { + "type": "object", + "properties": { + "locations": { + "type": "array", + "items": { + "$ref": "#/definitions/v1ObjectLocation" + } + } + } + }, + "v1ObjectVector": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "vector": { + "type": "array", + "items": { + "type": "number", + "format": "float" + } + } + } + } + } +} diff --git a/apis/swagger/v1/vald/apis/proto/v1/vald/object.swagger.json b/apis/swagger/v1/vald/apis/proto/v1/vald/object.swagger.json new file mode 100644 index 0000000000..bc7d6fd874 --- /dev/null +++ b/apis/swagger/v1/vald/apis/proto/v1/vald/object.swagger.json @@ -0,0 +1,158 @@ +{ + "swagger": "2.0", + "info": { + "title": "apis/proto/v1/vald/object.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/exists/{id}": { + "get": { + "operationId": "Object_Exists", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/ObjectID" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Object" + ] + } + }, + "/object/{id}": { + "get": { + "operationId": "Object_GetObject", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1ObjectVector" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Object" + ] + } + } + }, + "definitions": { + "ObjectID": { + "type": "object", + "properties": { + "id": { + "type": "string" + } + } + }, + "protobufAny": { + "type": "object", + "properties": { + "typeUrl": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + }, + "runtimeError": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpcCode": { + "type": "integer", + "format": "int32" + }, + "httpCode": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "httpStatus": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "v1ObjectVector": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "vector": { + "type": "array", + "items": { + "type": "number", + "format": "float" + } + } + } + } + } +} diff --git a/apis/swagger/v1/vald/apis/proto/v1/vald/remove.swagger.json b/apis/swagger/v1/vald/apis/proto/v1/vald/remove.swagger.json new file mode 100644 index 0000000000..6d0905c50c --- /dev/null +++ b/apis/swagger/v1/vald/apis/proto/v1/vald/remove.swagger.json @@ -0,0 +1,129 @@ +{ + "swagger": "2.0", + "info": { + "title": "apis/proto/v1/vald/remove.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "ObjectID": { + "type": "object", + "properties": { + "id": { + "type": "string" + } + } + }, + "payloadv1RemoveRequest": { + "type": "object", + "properties": { + "id": { + "$ref": "#/definitions/ObjectID" + }, + "config": { + "$ref": "#/definitions/v1RemoveConfig" + } + } + }, + "protobufAny": { + "type": "object", + "properties": { + "typeUrl": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + }, + "runtimeError": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpcCode": { + "type": "integer", + "format": "int32" + }, + "httpCode": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "httpStatus": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "v1ObjectLocation": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "uuid": { + "type": "string" + }, + "ips": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1ObjectLocations": { + "type": "object", + "properties": { + "locations": { + "type": "array", + "items": { + "$ref": "#/definitions/v1ObjectLocation" + } + } + } + }, + "v1RemoveConfig": { + "type": "object", + "properties": { + "skipStrictExistCheck": { + "type": "boolean" + } + } + } + } +} diff --git a/apis/swagger/v1/vald/apis/proto/v1/vald/search.swagger.json b/apis/swagger/v1/vald/apis/proto/v1/vald/search.swagger.json new file mode 100644 index 0000000000..e884fcae06 --- /dev/null +++ b/apis/swagger/v1/vald/apis/proto/v1/vald/search.swagger.json @@ -0,0 +1,240 @@ +{ + "swagger": "2.0", + "info": { + "title": "apis/proto/v1/vald/search.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/search": { + "post": { + "operationId": "Search_Search", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/SearchResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1SearchRequest" + } + } + ], + "tags": [ + "Search" + ] + } + }, + "/search/id": { + "post": { + "operationId": "Search_SearchByID", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/SearchResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/SearchIDRequest" + } + } + ], + "tags": [ + "Search" + ] + } + } + }, + "definitions": { + "ObjectDistance": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "distance": { + "type": "number", + "format": "float" + } + } + }, + "SearchIDRequest": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "config": { + "$ref": "#/definitions/v1SearchConfig" + } + } + }, + "SearchResponse": { + "type": "object", + "properties": { + "requestId": { + "type": "string" + }, + "results": { + "type": "array", + "items": { + "$ref": "#/definitions/ObjectDistance" + } + } + } + }, + "SearchResponses": { + "type": "object", + "properties": { + "responses": { + "type": "array", + "items": { + "$ref": "#/definitions/SearchResponse" + } + } + } + }, + "protobufAny": { + "type": "object", + "properties": { + "typeUrl": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + }, + "runtimeError": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpcCode": { + "type": "integer", + "format": "int32" + }, + "httpCode": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "httpStatus": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "v1FilterConfig": { + "type": "object", + "properties": { + "targets": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1SearchConfig": { + "type": "object", + "properties": { + "requestId": { + "type": "string" + }, + "num": { + "type": "integer", + "format": "int64" + }, + "radius": { + "type": "number", + "format": "float" + }, + "epsilon": { + "type": "number", + "format": "float" + }, + "timeout": { + "type": "string", + "format": "int64" + }, + "filters": { + "$ref": "#/definitions/v1FilterConfig" + } + } + }, + "v1SearchRequest": { + "type": "object", + "properties": { + "vector": { + "type": "array", + "items": { + "type": "number", + "format": "float" + } + }, + "config": { + "$ref": "#/definitions/v1SearchConfig" + } + } + } + } +} diff --git a/apis/swagger/v1/vald/apis/proto/v1/vald/update.swagger.json b/apis/swagger/v1/vald/apis/proto/v1/vald/update.swagger.json new file mode 100644 index 0000000000..f9864deac1 --- /dev/null +++ b/apis/swagger/v1/vald/apis/proto/v1/vald/update.swagger.json @@ -0,0 +1,183 @@ +{ + "swagger": "2.0", + "info": { + "title": "apis/proto/v1/vald/update.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/update": { + "post": { + "operationId": "Update_Update", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1ObjectLocation" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1UpdateRequest" + } + } + ], + "tags": [ + "Update" + ] + } + } + }, + "definitions": { + "protobufAny": { + "type": "object", + "properties": { + "typeUrl": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + }, + "runtimeError": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpcCode": { + "type": "integer", + "format": "int32" + }, + "httpCode": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "httpStatus": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "v1FilterConfig": { + "type": "object", + "properties": { + "targets": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1ObjectLocation": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "uuid": { + "type": "string" + }, + "ips": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1ObjectLocations": { + "type": "object", + "properties": { + "locations": { + "type": "array", + "items": { + "$ref": "#/definitions/v1ObjectLocation" + } + } + } + }, + "v1ObjectVector": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "vector": { + "type": "array", + "items": { + "type": "number", + "format": "float" + } + } + } + }, + "v1UpdateConfig": { + "type": "object", + "properties": { + "skipStrictExistCheck": { + "type": "boolean" + }, + "filters": { + "$ref": "#/definitions/v1FilterConfig" + } + } + }, + "v1UpdateRequest": { + "type": "object", + "properties": { + "vector": { + "$ref": "#/definitions/v1ObjectVector" + }, + "config": { + "$ref": "#/definitions/v1UpdateConfig" + } + } + } + } +} diff --git a/apis/swagger/v1/vald/apis/proto/v1/vald/upsert.swagger.json b/apis/swagger/v1/vald/apis/proto/v1/vald/upsert.swagger.json new file mode 100644 index 0000000000..b9c007713d --- /dev/null +++ b/apis/swagger/v1/vald/apis/proto/v1/vald/upsert.swagger.json @@ -0,0 +1,183 @@ +{ + "swagger": "2.0", + "info": { + "title": "apis/proto/v1/vald/upsert.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/upsert": { + "post": { + "operationId": "Upsert_Upsert", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1ObjectLocation" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1UpsertRequest" + } + } + ], + "tags": [ + "Upsert" + ] + } + } + }, + "definitions": { + "protobufAny": { + "type": "object", + "properties": { + "typeUrl": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + }, + "runtimeError": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpcCode": { + "type": "integer", + "format": "int32" + }, + "httpCode": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "httpStatus": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "v1FilterConfig": { + "type": "object", + "properties": { + "targets": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1ObjectLocation": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "uuid": { + "type": "string" + }, + "ips": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1ObjectLocations": { + "type": "object", + "properties": { + "locations": { + "type": "array", + "items": { + "$ref": "#/definitions/v1ObjectLocation" + } + } + } + }, + "v1ObjectVector": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "vector": { + "type": "array", + "items": { + "type": "number", + "format": "float" + } + } + } + }, + "v1UpsertConfig": { + "type": "object", + "properties": { + "skipStrictExistCheck": { + "type": "boolean" + }, + "filters": { + "$ref": "#/definitions/v1FilterConfig" + } + } + }, + "v1UpsertRequest": { + "type": "object", + "properties": { + "vector": { + "$ref": "#/definitions/v1ObjectVector" + }, + "config": { + "$ref": "#/definitions/v1UpsertConfig" + } + } + } + } +} diff --git a/assets/image/svg/Vald Architecture Overview.svg b/assets/image/svg/Vald Architecture Overview.svg index 62b3bcd222..03e3fa1009 100644 --- a/assets/image/svg/Vald Architecture Overview.svg +++ b/assets/image/svg/Vald Architecture Overview.svg @@ -1,3 +1,3 @@ -
Persistent Layer
<b>Persistent Layer<br></b>
Kubernetes
<b>Kubernetes<br></b>
Architecture Overview
Architecture Overview
        Ingress Node
        Ingress Node
      Ingress
[Not supported by viewer]
Vald Ingress
[Not supported by viewer]
Vald Filter Ingress
[Not supported by viewer]
      Redis
    
[Not supported by viewer]
      MySQL
    
[Not supported by viewer]
        Node
[Not supported by viewer]
       Deployment
[Not supported by viewer]
         HPA
[Not supported by viewer]
         HPA
[Not supported by viewer]
        Daemonset
[Not supported by viewer]
        Pod
[Not supported by viewer]
Vald Egress Filter
Filterling results
[Not supported by viewer]
        Pod
[Not supported by viewer]
Vald Replication Manager
manage replication
[Not supported by viewer]
        Pod
[Not supported by viewer]
Vald Index Manger
indexing manage
[Not supported by viewer]
      Cassandra
   
[Not supported by viewer]
        Pod
[Not supported by viewer]
  Vald Agent
  NGT, SPTAG, faiss
[Not supported by viewer]
        Pod
[Not supported by viewer]
Vald Meta
RDB, NoSQL, etc...
[Not supported by viewer]
        Master
[Not supported by viewer]
       Kube-APIServer
       Kube-APIServer
        Pod
[Not supported by viewer]
Agent Discoverer
discover pods
[Not supported by viewer]
        Pod
[Not supported by viewer]
Vald Ingress Filter
Filterling requests
[Not supported by viewer]
        Pod
[Not supported by viewer]
Vald Gateway
Balancing Gateway
[Not supported by viewer]
        Pod
[Not supported by viewer]
Vald Compressor
LZ4, ZSTD, GZIP, GOB
[Not supported by viewer]
        Pod
[Not supported by viewer]
Vald Manager Backup
MySQL, Cassandra
[Not supported by viewer]
\ No newline at end of file +
Kubernetess
Kubernetess

Node
Node...

Ingress Node
Ingress Node...
      Custom Controller
      Custom Controller

Persistent Layer
Persistent Layer

Ingress
Ingress...

Deployment
Deployment...

HPA
HPA...
 Pod
 Pod

Vald Ingress Filter
Filter Requests

Vald Ingress Filter...
 Pod
 Pod

Vald Filter Gateway
Filter Requests / Responses
Vald Filter Gateway...
 Pod
 Pod

Vald Meta Gateway
Meta Gateway

Vald Meta Gateway...
 Pod
 Pod

Vald Backup Gateway
Meta Gateway
Vald Backup Gateway...
 Pod
 Pod

Vald LB Gateway
Balancing Gateway

Vald LB Gateway...
 Pod
 Pod

Agent Discoverer
Discover Pods

Agent Discoverer...

HPA
HPA...
 Pod
 Pod

Vald Egress Filter
Filter Responses
Vald Egress Filter...
 Pod
 Pod

Vald Meta
RDB,NoSQL,etc...

Vald Meta...
 Pod
 Pod

Vald Backup Manager
MySQL,Cassndra

Vald Backup Manager...
 Pod
 Pod

Vald Compressor
LZ4,ZSTD,GZIP,GOB
Vald Compressor...
 Pod
 Pod

Vald Agent
NGT,SPTAG,Faiss
Vald Agent...

Master
Master

Kube-APIServer
Kube-APIServer

Daemonset
Daemonset
Text
Text
 Pod
 Pod

Vald Index Manager
            Manage Indexing
Vald Index Manager            Man...
async
async

Architecture Overview

Architecture Overview
 Pod
 Pod

Vald CRD
Deploy & Manage Vald
Vald CRD...
 Pod
 Pod

Vald Agent Scheduler
Schedule Vald Agent
Vald Agent Scheduler...
 Pod
 Pod

Vald Replication
Manager Controller
Control Replication Agent
Vald Replication...
 Pod
 Pod

Vald Replication
Manager Agent
Manage Replication of Index
Vald Replication...
async
async
optional
optional
optional
optional

Vald Ingress
Vald Ingress
optional
optional
optional
optional
 
 
Redis
Redis
   
   
Cassandra
Cassan...
   
   
MySQL
MySQL
Object Storage
GCS, S3
Object S...
Vald Agent Sidecar
Backup Index Data
Vald Age...
Volume
To Save Index Data
Volume...
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/assets/test/templates/common/function.tmpl b/assets/test/templates/common/function.tmpl index 359378b228..8e53c6e608 100644 --- a/assets/test/templates/common/function.tmpl +++ b/assets/test/templates/common/function.tmpl @@ -2,11 +2,12 @@ {{- $f := . }} func {{ .TestName }}(t *testing.T) { + t.Parallel() {{- if .TestParameters }} type args struct { - {{- range .TestParameters }} - {{ Param . }} {{ .Type }} - {{- end}} + {{- range .TestParameters }} + {{ Param . }} {{ .Type }} + {{- end}} } {{- end }} {{- $hasFields := false -}} @@ -106,9 +107,11 @@ func {{ .TestName }}(t *testing.T) { */ } - for {{- if (or .Subtests (not .IsNaked)) }} _, test := {{- end }} range tests { + for {{- if (or .Subtests (not .IsNaked)) }} _, tc := {{- end }} range tests { {{- if .Subtests }} + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc({{- if .TestParameters }} test.args {{- end }}) @@ -139,7 +142,7 @@ func {{ .TestName }}(t *testing.T) { {{ $len := len .Results }} {{- if or (ge $len 1) (.OnlyReturnsOneValue) (.OnlyReturnsError) }} {{ template "results" $f }} := {{ template "call" $f }} - if err := test.checkFunc(test.want, {{ template "results" $f }} ); err != nil { + if err := test.checkFunc(test.want, {{ template "results" $f }} {{- range .Parameters }}{{- if .IsWriter }}, {{ Param . }}.String(){{- end }}{{- end }}); err != nil { tt.Errorf("error = %v", err) } {{ else }} diff --git a/assets/test/templates/common/header.tmpl b/assets/test/templates/common/header.tmpl index 7426cd65e2..fea59430b6 100644 --- a/assets/test/templates/common/header.tmpl +++ b/assets/test/templates/common/header.tmpl @@ -4,7 +4,7 @@ package {{.Package}} import ( - "go.uber.org/goleak" + "go.uber.org/goleak" {{range .Imports}}{{.Name}} {{.Path}} {{end}} ) diff --git a/assets/test/templates/option/function.tmpl b/assets/test/templates/option/function.tmpl index 20aa92ed61..83e373db16 100644 --- a/assets/test/templates/option/function.tmpl +++ b/assets/test/templates/option/function.tmpl @@ -2,6 +2,7 @@ {{- $f := . }} func {{ .TestName }}(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} {{- if .TestParameters }} @@ -115,9 +116,11 @@ func {{ .TestName }}(t *testing.T) { */ } - for {{- if (or .Subtests (not .IsNaked)) }} _, test := {{- end }} range tests { + for {{- if (or .Subtests (not .IsNaked)) }} _, tc := {{- end }} range tests { {{- if .Subtests }} + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc({{- if .TestParameters }} test.args {{- end }}) diff --git a/charts/vald/README.md b/charts/vald/README.md index ef8a867e87..479a5c2caf 100644 --- a/charts/vald/README.md +++ b/charts/vald/README.md @@ -89,7 +89,7 @@ Configuration | agent.ngt.default_pool_size | int | `10000` | default create index batch pool size | | agent.ngt.default_radius | float | `-1` | default radius used for search | | agent.ngt.dimension | int | `4096` | vector dimension | -| agent.ngt.distance_type | string | `"l2"` | distance type. it should be `l1`, `l2`, `angle`, `hamming`, `cosine`, `normalizedangle`, `normalizedcosine` or `jaccard`. for further details about NGT libraries supported distance is https://github.com/yahoojapan/NGT/wiki/Command-Quick-Reference and vald agent's supported NGT distance type is https://pkg.go.dev/github.com/vdaas/vald/internal/core/ngt#pkg-constants | +| agent.ngt.distance_type | string | `"l2"` | distance type. it should be `l1`, `l2`, `angle`, `hamming`, `cosine`, `normalizedangle`, `normalizedcosine` or `jaccard`. for further details about NGT libraries supported distance is https://github.com/yahoojapan/NGT/wiki/Command-Quick-Reference and vald agent's supported NGT distance type is https://pkg.go.dev/github.com/vdaas/vald/internal/core/algorithm/ngt#pkg-constants | | agent.ngt.enable_in_memory_mode | bool | `true` | in-memory mode enabled | | agent.ngt.enable_proactive_gc | bool | `true` | enable proactive GC call for reducing heap memory allocation | | agent.ngt.index_path | string | `""` | path to index data | @@ -235,7 +235,7 @@ Configuration | backupManager.cassandra.config.max_prepared_stmts | int | `1000` | maximum number of prepared statements | | backupManager.cassandra.config.max_routing_key_info | int | `1000` | maximum number of routing key info | | backupManager.cassandra.config.max_wait_schema_agreement | string | `"1m"` | maximum duration to wait for schema agreement | -| backupManager.cassandra.config.meta_table | string | `"meta_vector"` | table name of backup | +| backupManager.cassandra.config.vector_backup_table | string | `"backup_vector"` | table name of backup | | backupManager.cassandra.config.num_conns | int | `2` | number of connections per hosts | | backupManager.cassandra.config.page_size | int | `5000` | page size | | backupManager.cassandra.config.password | string | `"_CASSANDRA_PASSWORD_"` | cassandra password | @@ -755,7 +755,7 @@ Configuration | indexManager.volumeMounts | list | `[]` | volume mounts | | indexManager.volumes | list | `[]` | volumes | | initializer.cassandra.configmap.backup.enabled | bool | `true` | backup table enabled | -| initializer.cassandra.configmap.backup.name | string | `"meta_vector"` | name of backup table | +| initializer.cassandra.configmap.backup.name | string | `"backup_vector"` | name of backup table | | initializer.cassandra.configmap.enabled | bool | `false` | cassandra schema configmap will be created | | initializer.cassandra.configmap.filename | string | `"init.cql"` | cassandra schema filename | | initializer.cassandra.configmap.keyspace | string | `"vald"` | cassandra keyspace | @@ -825,7 +825,7 @@ Configuration | meta.cassandra.config.max_prepared_stmts | int | `1000` | maximum number of prepared statements | | meta.cassandra.config.max_routing_key_info | int | `1000` | maximum number of routing key info | | meta.cassandra.config.max_wait_schema_agreement | string | `"1m"` | maximum duration to wait for schema agreement | -| meta.cassandra.config.meta_table | string | `"meta_vector"` | table name of backup | +| meta.cassandra.config.vector_backup_table | string | `"backup_vector"` | table name of backup | | meta.cassandra.config.num_conns | int | `2` | number of connections per hosts | | meta.cassandra.config.page_size | int | `5000` | page size | | meta.cassandra.config.password | string | `"_CASSANDRA_PASSWORD_"` | cassandra password | diff --git a/charts/vald/templates/NOTES.txt b/charts/vald/templates/NOTES.txt index 8c53fc1715..beb7f0f839 100644 --- a/charts/vald/templates/NOTES.txt +++ b/charts/vald/templates/NOTES.txt @@ -1,3 +1,6 @@ Release {{ .Release.Name }} is created. -Ingress: {{ .Values.gateway.ingress.host }} +{{- $gateway := .Values.gateway.vald -}} +{{- if and $gateway.enabled $gateway.ingress.enabled }} +Ingress: {{ $gateway.ingress.host }} +{{- end }} diff --git a/charts/vald/templates/_helpers.tpl b/charts/vald/templates/_helpers.tpl index abfedb20c1..d6243bbdb9 100755 --- a/charts/vald/templates/_helpers.tpl +++ b/charts/vald/templates/_helpers.tpl @@ -691,9 +691,9 @@ initContainers - -c - | {{- if eq .target "compressor" }} - {{- $compressorReadinessPort := default $.Values.defaults.server_config.healths.readiness.port $.Values.compressor.server_config.healths.readiness.port }} + {{- $compressorReadinessPort := default $.Values.defaults.server_config.healths.readiness.port $.Values.manager.compressor.server_config.healths.readiness.port }} {{- $compressorReadinessPath := default $.Values.defaults.server_config.healths.readiness.readinessProbe.httpGet.path .readinessPath }} - until [ "$(wget --server-response --spider --quiet http://{{ $.Values.compressor.name }}.{{ $.namespace }}.svc.cluster.local:{{ $compressorReadinessPort }}{{ $compressorReadinessPath }} 2>&1 | awk 'NR==1{print $2}')" == "200" ]; do + until [ "$(wget --server-response --spider --quiet http://{{ $.Values.manager.compressor.name }}.{{ $.namespace }}.svc.cluster.local:{{ $compressorReadinessPort }}{{ $compressorReadinessPath }} 2>&1 | awk 'NR==1{print $2}')" == "200" ]; do {{- else if eq .target "meta" }} {{- $metaReadinessPort := default $.Values.defaults.server_config.healths.readiness.port $.Values.meta.server_config.healths.readiness.port }} {{- $metaReadinessPath := default $.Values.defaults.server_config.healths.readiness.readinessProbe.httpGet.path .readinessPath }} @@ -707,9 +707,25 @@ initContainers {{- $agentReadinessPath := default $.Values.defaults.server_config.healths.readiness.readinessProbe.httpGet.path .readinessPath }} until [ "$(wget --server-response --spider --quiet http://{{ $.Values.agent.name }}.{{ $.namespace }}.svc.cluster.local:{{ $agentReadinessPort }}{{ $agentReadinessPath }} 2>&1 | awk 'NR==1{print $2}')" == "200" ]; do {{- else if eq .target "manager-backup" }} - {{- $backupManagerReadinessPort := default $.Values.defaults.server_config.healths.readiness.port $.Values.backupManager.server_config.healths.readiness.port }} + {{- $backupManagerReadinessPort := default $.Values.defaults.server_config.healths.readiness.port $.Values.manager.backup.server_config.healths.readiness.port }} {{- $backupManagerReadinessPath := default $.Values.defaults.server_config.healths.readiness.readinessProbe.httpGet.path .readinessPath }} - until [ "$(wget --server-response --spider --quiet http://{{ $.Values.backupManager.name }}.{{ $.namespace }}.svc.cluster.local:{{ $backupManagerReadinessPort }}{{ $backupManagerReadinessPath }} 2>&1 | awk 'NR==1{print $2}')" == "200" ]; do + until [ "$(wget --server-response --spider --quiet http://{{ $.Values.manager.backup.name }}.{{ $.namespace }}.svc.cluster.local:{{ $backupManagerReadinessPort }}{{ $backupManagerReadinessPath }} 2>&1 | awk 'NR==1{print $2}')" == "200" ]; do + {{- else if eq .target "gateway-backup" }} + {{- $backupGatewayReadinessPort := default $.Values.defaults.server_config.healths.readiness.port $.Values.gateway.backup.server_config.healths.readiness.port }} + {{- $backupGatewayReadinessPath := default $.Values.defaults.server_config.healths.readiness.readinessProbe.httpGet.path .readinessPath }} + until [ "$(wget --server-response --spider --quiet http://{{ $.Values.gateway.backup.name }}.{{ $.namespace }}.svc.cluster.local:{{ $backupGatewayReadinessPort }}{{ $backupGatewayReadinessPath }} 2>&1 | awk 'NR==1{print $2}')" == "200" ]; do + {{- else if eq .target "gateway-filter" }} + {{- $filterGatewayReadinessPort := default $.Values.defaults.server_config.healths.readiness.port $.Values.gateway.filter.server_config.healths.readiness.port }} + {{- $filterGatewayReadinessPath := default $.Values.defaults.server_config.healths.readiness.readinessProbe.httpGet.path .readinessPath }} + until [ "$(wget --server-response --spider --quiet http://{{ $.Values.gateway.filter.name }}.{{ $.namespace }}.svc.cluster.local:{{ $filterGatewayReadinessPort }}{{ $filterGatewayReadinessPath }} 2>&1 | awk 'NR==1{print $2}')" == "200" ]; do + {{- else if eq .target "gateway-lb" }} + {{- $lbGatewayReadinessPort := default $.Values.defaults.server_config.healths.readiness.port $.Values.gateway.lb.server_config.healths.readiness.port }} + {{- $lbGatewayReadinessPath := default $.Values.defaults.server_config.healths.readiness.readinessProbe.httpGet.path .readinessPath }} + until [ "$(wget --server-response --spider --quiet http://{{ $.Values.gateway.lb.name }}.{{ $.namespace }}.svc.cluster.local:{{ $lbGatewayReadinessPort }}{{ $lbGatewayReadinessPath }} 2>&1 | awk 'NR==1{print $2}')" == "200" ]; do + {{- else if eq .target "gateway-meta" }} + {{- $metaGatewayReadinessPort := default $.Values.defaults.server_config.healths.readiness.port $.Values.gateway.meta.server_config.healths.readiness.port }} + {{- $metaGatewayReadinessPath := default $.Values.defaults.server_config.healths.readiness.readinessProbe.httpGet.path .readinessPath }} + until [ "$(wget --server-response --spider --quiet http://{{ $.Values.gateway.meta.name }}.{{ $.namespace }}.svc.cluster.local:{{ $metaGatewayReadinessPort }}{{ $metaGatewayReadinessPath }} 2>&1 | awk 'NR==1{print $2}')" == "200" ]; do {{- else if .untilCondition }} until [ {{ .untilCondition }} ]; do {{- else if .whileCondition }} diff --git a/charts/vald/templates/agent/configmap.yaml b/charts/vald/templates/agent/configmap.yaml index ff8bf26167..037d0c3505 100644 --- a/charts/vald/templates/agent/configmap.yaml +++ b/charts/vald/templates/agent/configmap.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.agent.enabled }} +{{- $agent := .Values.agent -}} +{{- if $agent.enabled }} apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.agent.name }}-config + name: {{ $agent.name }}-config labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -28,17 +29,17 @@ metadata: data: config.yaml: | --- - version: {{ .Values.agent.version }} - time_zone: {{ default .Values.defaults.time_zone .Values.agent.time_zone }} + version: {{ $agent.version }} + time_zone: {{ default .Values.defaults.time_zone $agent.time_zone }} logging: - {{- $logging := dict "Values" .Values.agent.logging "default" .Values.defaults.logging }} + {{- $logging := dict "Values" $agent.logging "default" .Values.defaults.logging }} {{- include "vald.logging" $logging | nindent 6 }} server_config: - {{- $servers := dict "Values" .Values.agent.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $agent.server_config "default" .Values.defaults.server_config }} {{- include "vald.servers" $servers | nindent 6 }} observability: - {{- $observability := dict "Values" .Values.agent.observability "default" .Values.defaults.observability }} + {{- $observability := dict "Values" $agent.observability "default" .Values.defaults.observability }} {{- include "vald.observability" $observability | nindent 6 }} ngt: - {{- toYaml .Values.agent.ngt | nindent 6 }} + {{- toYaml $agent.ngt | nindent 6 }} {{- end }} diff --git a/charts/vald/templates/agent/hpa.yaml b/charts/vald/templates/agent/hpa.yaml index 914ee805df..187fd1a995 100644 --- a/charts/vald/templates/agent/hpa.yaml +++ b/charts/vald/templates/agent/hpa.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.agent.enabled .Values.agent.hpa.enabled }} +{{- $agent := .Values.agent -}} +{{- if and $agent.enabled $agent.hpa.enabled }} apiVersion: autoscaling/v1 kind: HorizontalPodAutoscaler metadata: - name: {{ .Values.agent.name }} + name: {{ $agent.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -26,12 +27,12 @@ metadata: app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: agent spec: - maxReplicas: {{ .Values.agent.maxReplicas }} - minReplicas: {{ .Values.agent.minReplicas }} + maxReplicas: {{ $agent.maxReplicas }} + minReplicas: {{ $agent.minReplicas }} scaleTargetRef: apiVersion: apps/v1 - kind: {{ .Values.agent.kind }} - name: {{ .Values.agent.name }} - targetCPUUtilizationPercentage: {{ .Values.agent.hpa.targetCPUUtilizationPercentage }} + kind: {{ $agent.kind }} + name: {{ $agent.name }} + targetCPUUtilizationPercentage: {{ $agent.hpa.targetCPUUtilizationPercentage }} status: {{- end }} diff --git a/charts/vald/templates/agent/pdb.yaml b/charts/vald/templates/agent/pdb.yaml index b25362e5ff..9c326903f8 100644 --- a/charts/vald/templates/agent/pdb.yaml +++ b/charts/vald/templates/agent/pdb.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.agent.enabled }} +{{- $agent := .Values.agent -}} +{{- if $agent.enabled }} apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: - name: {{ .Values.agent.name }} + name: {{ $agent.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -26,8 +27,8 @@ metadata: app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: agent spec: - maxUnavailable: {{ .Values.agent.maxUnavailable }} + maxUnavailable: {{ $agent.maxUnavailable }} selector: matchLabels: - app: {{ .Values.agent.name }} + app: {{ $agent.name }} {{- end }} diff --git a/charts/vald/templates/agent/priorityclass.yaml b/charts/vald/templates/agent/priorityclass.yaml index 918872063e..d807059d5e 100644 --- a/charts/vald/templates/agent/priorityclass.yaml +++ b/charts/vald/templates/agent/priorityclass.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.agent.enabled .Values.agent.podPriority.enabled }} +{{- $agent := .Values.agent -}} +{{- if and $agent.enabled $agent.podPriority.enabled }} apiVersion: scheduling.k8s.io/v1 kind: PriorityClass metadata: - name: {{ .Values.agent.name }}-priority + name: {{ $agent.name }}-priority labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -25,7 +26,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: agent -value: {{ .Values.agent.podPriority.value }} +value: {{ $agent.podPriority.value }} preemptionPolicy: Never globalDefault: false description: "A priority class for Vald agent." diff --git a/charts/vald/templates/agent/sidecar-svc.yaml b/charts/vald/templates/agent/sidecar-svc.yaml index ea1cf41db4..6bc7668e54 100644 --- a/charts/vald/templates/agent/sidecar-svc.yaml +++ b/charts/vald/templates/agent/sidecar-svc.yaml @@ -13,14 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.agent.enabled (and .Values.agent.sidecar.enabled .Values.agent.sidecar.service.enabled) }} +{{- $agent := .Values.agent -}} +{{- if and $agent.enabled (and $agent.sidecar.enabled $agent.sidecar.service.enabled) }} apiVersion: v1 kind: Service metadata: - name: {{ .Values.agent.sidecar.name }} - {{- if .Values.agent.sidecar.service.annotations }} + name: {{ $agent.sidecar.name }} + {{- if $agent.sidecar.service.annotations }} annotations: - {{- toYaml .Values.agent.sidecar.service.annotations | nindent 4 }} + {{- toYaml $agent.sidecar.service.annotations | nindent 4 }} {{- end }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} @@ -29,20 +30,20 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: agent - {{- if .Values.agent.sidecar.service.labels }} - {{- toYaml .Values.agent.sidecar.service.labels | nindent 4 }} + {{- if $agent.sidecar.service.labels }} + {{- toYaml $agent.sidecar.service.labels | nindent 4 }} {{- end }} spec: - {{- $servers := dict "Values" .Values.agent.sidecar.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $agent.sidecar.server_config "default" .Values.defaults.server_config }} {{- include "vald.servicePorts" $servers | nindent 2 }} selector: app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/component: agent - {{- if eq .Values.agent.sidecar.service.type "ClusterIP" }} + {{- if eq $agent.sidecar.service.type "ClusterIP" }} clusterIP: None {{- end }} - type: {{ .Values.agent.sidecar.service.type }} - {{- if .Values.agent.sidecar.service.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.agent.sidecar.service.externalTrafficPolicy }} + type: {{ $agent.sidecar.service.type }} + {{- if $agent.sidecar.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ $agent.sidecar.service.externalTrafficPolicy }} {{- end }} {{- end }} diff --git a/charts/vald/templates/agent/svc.yaml b/charts/vald/templates/agent/svc.yaml index 39447eca8d..cfbeb3e9f3 100644 --- a/charts/vald/templates/agent/svc.yaml +++ b/charts/vald/templates/agent/svc.yaml @@ -13,14 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.agent.enabled }} +{{- $agent := .Values.agent -}} +{{- if $agent.enabled }} apiVersion: v1 kind: Service metadata: - name: {{ .Values.agent.name }} - {{- if .Values.agent.service.annotations }} + name: {{ $agent.name }} + {{- if $agent.service.annotations }} annotations: - {{- toYaml .Values.agent.service.annotations | nindent 4 }} + {{- toYaml $agent.service.annotations | nindent 4 }} {{- end }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} @@ -29,20 +30,20 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: agent - {{- if .Values.agent.service.labels }} - {{- toYaml .Values.agent.service.labels | nindent 4 }} + {{- if $agent.service.labels }} + {{- toYaml $agent.service.labels | nindent 4 }} {{- end }} spec: - {{- $servers := dict "Values" .Values.agent.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $agent.server_config "default" .Values.defaults.server_config }} {{- include "vald.servicePorts" $servers | nindent 2 }} selector: app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/component: agent - {{- if eq .Values.agent.serviceType "ClusterIP" }} + {{- if eq $agent.serviceType "ClusterIP" }} clusterIP: None {{- end }} - type: {{ .Values.agent.serviceType }} - {{- if .Values.agent.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.agent.externalTrafficPolicy }} + type: {{ $agent.serviceType }} + {{- if $agent.externalTrafficPolicy }} + externalTrafficPolicy: {{ $agent.externalTrafficPolicy }} {{- end }} {{- end }} diff --git a/charts/vald/templates/discoverer/clusterrole.yaml b/charts/vald/templates/discoverer/clusterrole.yaml index 7d27a29f58..e568db7b34 100644 --- a/charts/vald/templates/discoverer/clusterrole.yaml +++ b/charts/vald/templates/discoverer/clusterrole.yaml @@ -13,7 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.discoverer.enabled .Values.discoverer.clusterRole.enabled }} +{{- $discoverer := .Values.discoverer -}} +{{- if and $discoverer.enabled $discoverer.clusterRole.enabled }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/charts/vald/templates/discoverer/clusterrolebinding.yaml b/charts/vald/templates/discoverer/clusterrolebinding.yaml index 55ddb1795d..bb6e0ca922 100644 --- a/charts/vald/templates/discoverer/clusterrolebinding.yaml +++ b/charts/vald/templates/discoverer/clusterrolebinding.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.discoverer.enabled .Values.discoverer.clusterRoleBinding.enabled }} +{{- $discoverer := .Values.discoverer -}} +{{- if and $discoverer.enabled $discoverer.clusterRoleBinding.enabled }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: {{ .Values.discoverer.clusterRoleBinding.name }} + name: {{ $discoverer.clusterRoleBinding.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -28,9 +29,9 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: {{ .Values.discoverer.clusterRole.name }} + name: {{ $discoverer.clusterRole.name }} subjects: - kind: ServiceAccount - name: {{ .Values.discoverer.serviceAccount.name }} + name: {{ $discoverer.serviceAccount.name }} namespace: {{ .Release.Namespace }} {{- end }} diff --git a/charts/vald/templates/discoverer/configmap.yaml b/charts/vald/templates/discoverer/configmap.yaml index 870ed89b05..3bcabf0afa 100644 --- a/charts/vald/templates/discoverer/configmap.yaml +++ b/charts/vald/templates/discoverer/configmap.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.discoverer.enabled }} +{{- $discoverer := .Values.discoverer -}} +{{- if $discoverer.enabled }} apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.discoverer.name }}-config + name: {{ $discoverer.name }}-config labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -28,17 +29,17 @@ metadata: data: config.yaml: | --- - version: {{ .Values.discoverer.version }} - time_zone: {{ default .Values.defaults.time_zone .Values.discoverer.time_zone }} + version: {{ $discoverer.version }} + time_zone: {{ default .Values.defaults.time_zone $discoverer.time_zone }} logging: - {{- $logging := dict "Values" .Values.discoverer.logging "default" .Values.defaults.logging }} + {{- $logging := dict "Values" $discoverer.logging "default" .Values.defaults.logging }} {{- include "vald.logging" $logging | nindent 6 }} server_config: - {{- $servers := dict "Values" .Values.discoverer.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $discoverer.server_config "default" .Values.defaults.server_config }} {{- include "vald.servers" $servers | nindent 6 }} observability: - {{- $observability := dict "Values" .Values.discoverer.observability "default" .Values.defaults.observability }} + {{- $observability := dict "Values" $discoverer.observability "default" .Values.defaults.observability }} {{- include "vald.observability" $observability | nindent 6 }} discoverer: - {{- toYaml .Values.discoverer.discoverer | nindent 6 }} + {{- toYaml $discoverer.discoverer | nindent 6 }} {{- end }} diff --git a/charts/vald/templates/discoverer/daemonset.yaml b/charts/vald/templates/discoverer/daemonset.yaml index 03bcb462a5..fc751361a1 100644 --- a/charts/vald/templates/discoverer/daemonset.yaml +++ b/charts/vald/templates/discoverer/daemonset.yaml @@ -13,76 +13,77 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.discoverer.enabled (eq .Values.discoverer.kind "DaemonSet") }} +{{- $discoverer := .Values.discoverer -}} +{{- if and $discoverer.enabled (eq $discoverer.kind "DaemonSet") }} apiVersion: apps/v1 kind: DaemonSet metadata: - name: {{ .Values.discoverer.name }} + name: {{ $discoverer.name }} labels: - app: {{ .Values.discoverer.name }} + app: {{ $discoverer.name }} app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: discoverer - {{- if .Values.discoverer.annotations }} + {{- if $discoverer.annotations }} annotations: - {{- toYaml .Values.discoverer.annotations | nindent 4 }} + {{- toYaml $discoverer.annotations | nindent 4 }} {{- end }} spec: - revisionHistoryLimit: {{ .Values.discoverer.revisionHistoryLimit }} + revisionHistoryLimit: {{ $discoverer.revisionHistoryLimit }} selector: matchLabels: - app: {{ .Values.discoverer.name }} + app: {{ $discoverer.name }} updateStrategy: rollingUpdate: - maxUnavailable: {{ .Values.discoverer.rollingUpdate.maxUnavailable }} + maxUnavailable: {{ $discoverer.rollingUpdate.maxUnavailable }} type: RollingUpdate template: metadata: creationTimestamp: null labels: - app: {{ .Values.discoverer.name }} + app: {{ $discoverer.name }} app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: discoverer annotations: checksum/configmap: {{ include (print $.Template.BasePath "/discoverer/configmap.yaml") . | sha256sum }} - {{- if .Values.discoverer.podAnnotations }} - {{- toYaml .Values.discoverer.podAnnotations | nindent 8 }} + {{- if $discoverer.podAnnotations }} + {{- toYaml $discoverer.podAnnotations | nindent 8 }} {{- end }} spec: - {{- if .Values.discoverer.initContainers }} + {{- if $discoverer.initContainers }} initContainers: - {{- $initContainers := dict "initContainers" .Values.discoverer.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- $initContainers := dict "initContainers" $discoverer.initContainers "Values" .Values "namespace" .Release.Namespace -}} {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} {{- end }} affinity: - {{- include "vald.affinity" .Values.discoverer.affinity | nindent 8 }} - {{- if .Values.discoverer.topologySpreadConstraints }} + {{- include "vald.affinity" $discoverer.affinity | nindent 8 }} + {{- if $discoverer.topologySpreadConstraints }} topologySpreadConstraints: - {{- toYaml .Values.discoverer.topologySpreadConstraints | nindent 8 }} + {{- toYaml $discoverer.topologySpreadConstraints | nindent 8 }} {{- end }} containers: - - name: {{ .Values.discoverer.name }} - image: "{{ .Values.discoverer.image.repository }}:{{ default .Values.defaults.image.tag .Values.discoverer.image.tag }}" - imagePullPolicy: {{ .Values.discoverer.image.pullPolicy }} - {{- $servers := dict "Values" .Values.discoverer.server_config "default" .Values.defaults.server_config -}} + - name: {{ $discoverer.name }} + image: "{{ $discoverer.image.repository }}:{{ default .Values.defaults.image.tag $discoverer.image.tag }}" + imagePullPolicy: {{ $discoverer.image.pullPolicy }} + {{- $servers := dict "Values" $discoverer.server_config "default" .Values.defaults.server_config -}} {{- include "vald.containerPorts" $servers | trim | nindent 10 }} resources: - {{- toYaml .Values.discoverer.resources | nindent 12 }} + {{- toYaml $discoverer.resources | nindent 12 }} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File - {{- if .Values.discoverer.env }} + {{- if $discoverer.env }} env: - {{- toYaml .Values.discoverer.env | nindent 12 }} + {{- toYaml $discoverer.env | nindent 12 }} {{- end }} volumeMounts: - - name: {{ .Values.discoverer.name }}-config + - name: {{ $discoverer.name }}-config mountPath: /etc/server/ - {{- if .Values.discoverer.volumeMounts }} - {{- toYaml .Values.discoverer.volumeMounts | nindent 12 }} + {{- if $discoverer.volumeMounts }} + {{- toYaml $discoverer.volumeMounts | nindent 12 }} {{- end }} dnsPolicy: ClusterFirst restartPolicy: Always @@ -90,29 +91,29 @@ spec: serviceAccount: vald serviceAccountName: vald securityContext: {} - terminationGracePeriodSeconds: {{ .Values.discoverer.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ $discoverer.terminationGracePeriodSeconds }} volumes: - - name: {{ .Values.discoverer.name }}-config + - name: {{ $discoverer.name }}-config configMap: defaultMode: 420 - name: {{ .Values.discoverer.name }}-config - {{- if .Values.discoverer.volumes }} - {{- toYaml .Values.discoverer.volumes | nindent 8 }} + name: {{ $discoverer.name }}-config + {{- if $discoverer.volumes }} + {{- toYaml $discoverer.volumes | nindent 8 }} {{- end }} - {{- if .Values.discoverer.nodeName }} - nodeName: {{ .Values.discoverer.nodeName }} + {{- if $discoverer.nodeName }} + nodeName: {{ $discoverer.nodeName }} {{- end }} - {{- if .Values.discoverer.nodeSelector }} + {{- if $discoverer.nodeSelector }} nodeSelector: - {{- toYaml .Values.discoverer.nodeSelector | nindent 8 }} + {{- toYaml $discoverer.nodeSelector | nindent 8 }} {{- end }} - {{- if .Values.discoverer.tolerations }} + {{- if $discoverer.tolerations }} tolerations: - {{- toYaml .Values.discoverer.tolerations | nindent 8 }} + {{- toYaml $discoverer.tolerations | nindent 8 }} {{- end }} - {{- if .Values.discoverer.podPriority }} - {{- if .Values.discoverer.podPriority.enabled }} - priorityClassName: {{ .Values.discoverer.name }}-priority + {{- if $discoverer.podPriority }} + {{- if $discoverer.podPriority.enabled }} + priorityClassName: {{ $discoverer.name }}-priority {{- end }} {{- end }} status: diff --git a/charts/vald/templates/discoverer/deployment.yaml b/charts/vald/templates/discoverer/deployment.yaml index 11017eb8c4..6297d8d309 100644 --- a/charts/vald/templates/discoverer/deployment.yaml +++ b/charts/vald/templates/discoverer/deployment.yaml @@ -13,81 +13,82 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.discoverer.enabled (eq .Values.discoverer.kind "Deployment") }} +{{- $discoverer := .Values.discoverer -}} +{{- if and $discoverer.enabled (eq $discoverer.kind "Deployment") }} apiVersion: apps/v1 kind: Deployment metadata: - name: {{ .Values.discoverer.name }} + name: {{ $discoverer.name }} labels: - app: {{ .Values.discoverer.name }} + app: {{ $discoverer.name }} app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: discoverer - {{- if .Values.discoverer.annotations }} + {{- if $discoverer.annotations }} annotations: - {{- toYaml .Values.discoverer.annotations | nindent 4 }} + {{- toYaml $discoverer.annotations | nindent 4 }} {{- end }} spec: - progressDeadlineSeconds: {{ .Values.discoverer.progressDeadlineSeconds }} - {{- if not .Values.discoverer.hpa.enabled }} - replicas: {{ .Values.discoverer.minReplicas }} + progressDeadlineSeconds: {{ $discoverer.progressDeadlineSeconds }} + {{- if not $discoverer.hpa.enabled }} + replicas: {{ $discoverer.minReplicas }} {{- end }} - revisionHistoryLimit: {{ .Values.discoverer.revisionHistoryLimit }} + revisionHistoryLimit: {{ $discoverer.revisionHistoryLimit }} selector: matchLabels: - app: {{ .Values.discoverer.name }} + app: {{ $discoverer.name }} strategy: rollingUpdate: - maxSurge: {{ .Values.discoverer.rollingUpdate.maxSurge }} - maxUnavailable: {{ .Values.discoverer.rollingUpdate.maxUnavailable }} + maxSurge: {{ $discoverer.rollingUpdate.maxSurge }} + maxUnavailable: {{ $discoverer.rollingUpdate.maxUnavailable }} type: RollingUpdate template: metadata: creationTimestamp: null labels: - app: {{ .Values.discoverer.name }} + app: {{ $discoverer.name }} app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: discoverer annotations: checksum/configmap: {{ include (print $.Template.BasePath "/discoverer/configmap.yaml") . | sha256sum }} - {{- if .Values.discoverer.podAnnotations }} - {{- toYaml .Values.discoverer.podAnnotations | nindent 8 }} + {{- if $discoverer.podAnnotations }} + {{- toYaml $discoverer.podAnnotations | nindent 8 }} {{- end }} spec: - {{- if .Values.discoverer.initContainers }} + {{- if $discoverer.initContainers }} initContainers: - {{- $initContainers := dict "initContainers" .Values.discoverer.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- $initContainers := dict "initContainers" $discoverer.initContainers "Values" .Values "namespace" .Release.Namespace -}} {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} {{- end }} affinity: - {{- include "vald.affinity" .Values.discoverer.affinity | nindent 8 }} - {{- if .Values.discoverer.topologySpreadConstraints }} + {{- include "vald.affinity" $discoverer.affinity | nindent 8 }} + {{- if $discoverer.topologySpreadConstraints }} topologySpreadConstraints: - {{- toYaml .Values.discoverer.topologySpreadConstraints | nindent 8 }} + {{- toYaml $discoverer.topologySpreadConstraints | nindent 8 }} {{- end }} containers: - - name: {{ .Values.discoverer.name }} - image: "{{ .Values.discoverer.image.repository }}:{{ default .Values.defaults.image.tag .Values.discoverer.image.tag }}" - imagePullPolicy: {{ .Values.discoverer.image.pullPolicy }} - {{- $servers := dict "Values" .Values.discoverer.server_config "default" .Values.defaults.server_config -}} + - name: {{ $discoverer.name }} + image: "{{ $discoverer.image.repository }}:{{ default .Values.defaults.image.tag $discoverer.image.tag }}" + imagePullPolicy: {{ $discoverer.image.pullPolicy }} + {{- $servers := dict "Values" $discoverer.server_config "default" .Values.defaults.server_config -}} {{- include "vald.containerPorts" $servers | trim | nindent 10 }} resources: - {{- toYaml .Values.discoverer.resources | nindent 12 }} + {{- toYaml $discoverer.resources | nindent 12 }} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File - {{- if .Values.discoverer.env }} + {{- if $discoverer.env }} env: - {{- toYaml .Values.discoverer.env | nindent 12 }} + {{- toYaml $discoverer.env | nindent 12 }} {{- end }} volumeMounts: - - name: {{ .Values.discoverer.name }}-config + - name: {{ $discoverer.name }}-config mountPath: /etc/server/ - {{- if .Values.discoverer.volumeMounts }} - {{- toYaml .Values.discoverer.volumeMounts | nindent 12 }} + {{- if $discoverer.volumeMounts }} + {{- toYaml $discoverer.volumeMounts | nindent 12 }} {{- end }} dnsPolicy: ClusterFirst restartPolicy: Always @@ -95,29 +96,29 @@ spec: serviceAccount: vald serviceAccountName: vald securityContext: {} - terminationGracePeriodSeconds: {{ .Values.discoverer.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ $discoverer.terminationGracePeriodSeconds }} volumes: - - name: {{ .Values.discoverer.name }}-config + - name: {{ $discoverer.name }}-config configMap: defaultMode: 420 - name: {{ .Values.discoverer.name }}-config - {{- if .Values.discoverer.volumes }} - {{- toYaml .Values.discoverer.volumes | nindent 8 }} + name: {{ $discoverer.name }}-config + {{- if $discoverer.volumes }} + {{- toYaml $discoverer.volumes | nindent 8 }} {{- end }} - {{- if .Values.discoverer.nodeName }} - nodeName: {{ .Values.discoverer.nodeName }} + {{- if $discoverer.nodeName }} + nodeName: {{ $discoverer.nodeName }} {{- end }} - {{- if .Values.discoverer.nodeSelector }} + {{- if $discoverer.nodeSelector }} nodeSelector: - {{- toYaml .Values.discoverer.nodeSelector | nindent 8 }} + {{- toYaml $discoverer.nodeSelector | nindent 8 }} {{- end }} - {{- if .Values.discoverer.tolerations }} + {{- if $discoverer.tolerations }} tolerations: - {{- toYaml .Values.discoverer.tolerations | nindent 8 }} + {{- toYaml $discoverer.tolerations | nindent 8 }} {{- end }} - {{- if .Values.discoverer.podPriority }} - {{- if .Values.discoverer.podPriority.enabled }} - priorityClassName: {{ .Values.discoverer.name }}-priority + {{- if $discoverer.podPriority }} + {{- if $discoverer.podPriority.enabled }} + priorityClassName: {{ $discoverer.name }}-priority {{- end }} {{- end }} status: diff --git a/charts/vald/templates/discoverer/hpa.yaml b/charts/vald/templates/discoverer/hpa.yaml index e8bbed5ffe..bdf0793334 100644 --- a/charts/vald/templates/discoverer/hpa.yaml +++ b/charts/vald/templates/discoverer/hpa.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.discoverer.hpa.enabled }} +{{- $discoverer := .Values.discoverer -}} +{{- if $discoverer.hpa.enabled }} apiVersion: autoscaling/v1 kind: HorizontalPodAutoscaler metadata: - name: {{ .Values.discoverer.name }} + name: {{ $discoverer.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -26,12 +27,12 @@ metadata: app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: discoverer spec: - maxReplicas: {{ .Values.discoverer.maxReplicas }} - minReplicas: {{ .Values.discoverer.minReplicas }} + maxReplicas: {{ $discoverer.maxReplicas }} + minReplicas: {{ $discoverer.minReplicas }} scaleTargetRef: apiVersion: apps/v1 - kind: {{ .Values.discoverer.kind }} - name: {{ .Values.discoverer.name }} - targetCPUUtilizationPercentage: {{ .Values.discoverer.hpa.targetCPUUtilizationPercentage }} + kind: {{ $discoverer.kind }} + name: {{ $discoverer.name }} + targetCPUUtilizationPercentage: {{ $discoverer.hpa.targetCPUUtilizationPercentage }} status: {{- end }} diff --git a/charts/vald/templates/discoverer/pdb.yaml b/charts/vald/templates/discoverer/pdb.yaml index 6eda9a95bb..92337cb379 100644 --- a/charts/vald/templates/discoverer/pdb.yaml +++ b/charts/vald/templates/discoverer/pdb.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.discoverer.enabled }} +{{- $discoverer := .Values.discoverer -}} +{{- if $discoverer.enabled }} apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: - name: {{ .Values.discoverer.name }} + name: {{ $discoverer.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -26,8 +27,8 @@ metadata: app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: discoverer spec: - maxUnavailable: {{ .Values.discoverer.maxUnavailable }} + maxUnavailable: {{ $discoverer.maxUnavailable }} selector: matchLabels: - app: {{ .Values.discoverer.name }} + app: {{ $discoverer.name }} {{- end }} diff --git a/charts/vald/templates/discoverer/priorityclass.yaml b/charts/vald/templates/discoverer/priorityclass.yaml index b9ab80ea09..f55da1ba06 100644 --- a/charts/vald/templates/discoverer/priorityclass.yaml +++ b/charts/vald/templates/discoverer/priorityclass.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.discoverer.enabled .Values.discoverer.podPriority.enabled }} +{{- $discoverer := .Values.discoverer -}} +{{- if and $discoverer.enabled $discoverer.podPriority.enabled }} apiVersion: scheduling.k8s.io/v1 kind: PriorityClass metadata: - name: {{ .Values.discoverer.name }}-priority + name: {{ $discoverer.name }}-priority labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -25,7 +26,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: discoverer -value: {{ .Values.discoverer.podPriority.value }} +value: {{ $discoverer.podPriority.value }} globalDefault: false description: "A priority class for Vald discoverer." {{- end }} diff --git a/charts/vald/templates/discoverer/serviceaccount.yaml b/charts/vald/templates/discoverer/serviceaccount.yaml index b60e4d81be..dac16f8767 100644 --- a/charts/vald/templates/discoverer/serviceaccount.yaml +++ b/charts/vald/templates/discoverer/serviceaccount.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.discoverer.enabled .Values.discoverer.serviceAccount.enabled }} +{{- $discoverer := .Values.discoverer -}} +{{- if and $discoverer.enabled $discoverer.serviceAccount.enabled }} apiVersion: v1 kind: ServiceAccount metadata: - name: {{ .Values.discoverer.serviceAccount.name }} + name: {{ $discoverer.serviceAccount.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} diff --git a/charts/vald/templates/discoverer/svc.yaml b/charts/vald/templates/discoverer/svc.yaml index 866ef36352..8cce156237 100644 --- a/charts/vald/templates/discoverer/svc.yaml +++ b/charts/vald/templates/discoverer/svc.yaml @@ -13,14 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.discoverer.enabled }} +{{- $discoverer := .Values.discoverer -}} +{{- if $discoverer.enabled }} apiVersion: v1 kind: Service metadata: - name: {{ .Values.discoverer.name }} - {{- if .Values.discoverer.service.annotations }} + name: {{ $discoverer.name }} + {{- if $discoverer.service.annotations }} annotations: - {{- toYaml .Values.discoverer.service.annotations | nindent 4 }} + {{- toYaml $discoverer.service.annotations | nindent 4 }} {{- end }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} @@ -29,20 +30,20 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: discoverer - {{- if .Values.discoverer.service.labels }} - {{- toYaml .Values.discoverer.service.labels | nindent 4 }} + {{- if $discoverer.service.labels }} + {{- toYaml $discoverer.service.labels | nindent 4 }} {{- end }} spec: - {{- $servers := dict "Values" .Values.discoverer.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $discoverer.server_config "default" .Values.defaults.server_config }} {{- include "vald.servicePorts" $servers | nindent 2 }} selector: app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/component: discoverer - {{- if eq .Values.discoverer.serviceType "ClusterIP" }} + {{- if eq $discoverer.serviceType "ClusterIP" }} clusterIP: None {{- end }} - type: {{ .Values.discoverer.serviceType }} - {{- if .Values.discoverer.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.discoverer.externalTrafficPolicy }} + type: {{ $discoverer.serviceType }} + {{- if $discoverer.externalTrafficPolicy }} + externalTrafficPolicy: {{ $discoverer.externalTrafficPolicy }} {{- end }} {{- end }} diff --git a/charts/vald/templates/gateway/backup/configmap.yaml b/charts/vald/templates/gateway/backup/configmap.yaml new file mode 100644 index 0000000000..a30e3e3b4f --- /dev/null +++ b/charts/vald/templates/gateway/backup/configmap.yaml @@ -0,0 +1,52 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.backup -}} +{{- if $gateway.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $gateway.name }}-config + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-backup +data: + config.yaml: | + --- + version: {{ $gateway.version }} + time_zone: {{ default .Values.defaults.time_zone $gateway.time_zone }} + logging: + {{- $logging := dict "Values" $gateway.logging "default" .Values.defaults.logging }} + {{- include "vald.logging" $logging | nindent 6 }} + server_config: + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config }} + {{- include "vald.servers" $servers | nindent 6 }} + observability: + {{- $observability := dict "Values" $gateway.observability "default" .Values.defaults.observability }} + {{- include "vald.observability" $observability | nindent 6 }} + client: + {{- $client := dict "Values" $gateway.gateway_config.client "default" .Values.defaults.grpc.client }} + {{- include "vald.grpc.client" $client | nindent 6 }} + backup: + host: {{ .Values.manager.compressor.name }}.{{ .Release.Namespace }}.svc.cluster.local + port: {{ default .Values.defaults.server_config.servers.grpc.port .Values.manager.compressor.server_config.servers.grpc.port }} + client: + {{- $backupClient := dict "Values" $gateway.gateway_config.backup.client "default" .Values.defaults.grpc.client }} + {{- include "vald.grpc.client" $backupClient | nindent 8 }} +{{- end }} diff --git a/charts/vald/templates/gateway/backup/daemonset.yaml b/charts/vald/templates/gateway/backup/daemonset.yaml new file mode 100644 index 0000000000..4c83900e10 --- /dev/null +++ b/charts/vald/templates/gateway/backup/daemonset.yaml @@ -0,0 +1,118 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.backup -}} +{{- if and $gateway.enabled (eq $gateway.kind "DaemonSet") }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ $gateway.name }} + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-backup + {{- if $gateway.annotations }} + annotations: + {{- toYaml $gateway.annotations | nindent 4 }} + {{- end }} +spec: + revisionHistoryLimit: {{ $gateway.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ $gateway.name }} + updateStrategy: + rollingUpdate: + maxUnavailable: {{ $gateway.rollingUpdate.maxUnavailable }} + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: gateway + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/gateway/backup/configmap.yaml") . | sha256sum }} + {{- if $gateway.podAnnotations }} + {{- toYaml $gateway.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- if $gateway.initContainers }} + initContainers: + {{- $initContainers := dict "initContainers" $gateway.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} + {{- end }} + affinity: + {{- include "vald.affinity" $gateway.affinity | nindent 8 }} + {{- if $gateway.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml $gateway.topologySpreadConstraints | nindent 8 }} + {{- end }} + containers: + - name: {{ $gateway.name }} + image: "{{ $gateway.image.repository }}:{{ default .Values.defaults.image.tag $gateway.image.tag }}" + imagePullPolicy: {{ $gateway.image.pullPolicy }} + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config -}} + {{- include "vald.containerPorts" $servers | trim | nindent 10 }} + resources: + {{- toYaml $gateway.resources | nindent 12 }} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + {{- if $gateway.env }} + env: + {{- toYaml $gateway.env | nindent 12 }} + {{- end }} + volumeMounts: + - name: {{ $gateway.name }}-config + mountPath: /etc/server/ + {{- if $gateway.volumeMounts }} + {{- toYaml $gateway.volumeMounts | nindent 12 }} + {{- end }} + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: {{ $gateway.terminationGracePeriodSeconds }} + volumes: + - name: {{ $gateway.name }}-config + configMap: + defaultMode: 420 + name: {{ $gateway.name }}-config + {{- if $gateway.volumes }} + {{- toYaml $gateway.volumes | nindent 8 }} + {{- end }} + {{- if $gateway.nodeName }} + nodeName: {{ $gateway.nodeName }} + {{- end }} + {{- if $gateway.nodeSelector }} + nodeSelector: + {{- toYaml $gateway.nodeSelector | nindent 8 }} + {{- end }} + {{- if $gateway.tolerations }} + tolerations: + {{- toYaml $gateway.tolerations | nindent 8 }} + {{- end }} + {{- if $gateway.podPriority }} + {{- if $gateway.podPriority.enabled }} + priorityClassName: {{ $gateway.name }}-priority + {{- end }} + {{- end }} +status: +{{- end }} diff --git a/charts/vald/templates/gateway/backup/deployment.yaml b/charts/vald/templates/gateway/backup/deployment.yaml new file mode 100644 index 0000000000..541266a72e --- /dev/null +++ b/charts/vald/templates/gateway/backup/deployment.yaml @@ -0,0 +1,123 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.backup -}} +{{- if and $gateway.enabled (eq $gateway.kind "Deployment") }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ $gateway.name }} + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-backup + {{- if $gateway.annotations }} + annotations: + {{- toYaml $gateway.annotations | nindent 4 }} + {{- end }} +spec: + progressDeadlineSeconds: {{ $gateway.progressDeadlineSeconds }} + {{- if not $gateway.hpa.enabled }} + replicas: {{ $gateway.minReplicas }} + {{- end }} + revisionHistoryLimit: {{ $gateway.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ $gateway.name }} + strategy: + rollingUpdate: + maxSurge: {{ $gateway.rollingUpdate.maxSurge }} + maxUnavailable: {{ $gateway.rollingUpdate.maxUnavailable }} + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: gateway + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/gateway/backup/configmap.yaml") . | sha256sum }} + {{- if $gateway.podAnnotations }} + {{- toYaml $gateway.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- if $gateway.initContainers }} + initContainers: + {{- $initContainers := dict "initContainers" $gateway.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} + {{- end }} + affinity: + {{- include "vald.affinity" $gateway.affinity | nindent 8 }} + {{- if $gateway.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml $gateway.topologySpreadConstraints | nindent 8 }} + {{- end }} + containers: + - name: {{ $gateway.name }} + image: "{{ $gateway.image.repository }}:{{ default .Values.defaults.image.tag $gateway.image.tag }}" + imagePullPolicy: {{ $gateway.image.pullPolicy }} + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config -}} + {{- include "vald.containerPorts" $servers | trim | nindent 10 }} + resources: + {{- toYaml $gateway.resources | nindent 12 }} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + {{- if $gateway.env }} + env: + {{- toYaml $gateway.env | nindent 12 }} + {{- end }} + volumeMounts: + - name: {{ $gateway.name }}-config + mountPath: /etc/server/ + {{- if $gateway.volumeMounts }} + {{- toYaml $gateway.volumeMounts | nindent 12 }} + {{- end }} + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: {{ $gateway.terminationGracePeriodSeconds }} + volumes: + - name: {{ $gateway.name }}-config + configMap: + defaultMode: 420 + name: {{ $gateway.name }}-config + {{- if $gateway.volumes }} + {{- toYaml $gateway.volumes | nindent 8 }} + {{- end }} + {{- if $gateway.nodeName }} + nodeName: {{ $gateway.nodeName }} + {{- end }} + {{- if $gateway.nodeSelector }} + nodeSelector: + {{- toYaml $gateway.nodeSelector | nindent 8 }} + {{- end }} + {{- if $gateway.tolerations }} + tolerations: + {{- toYaml $gateway.tolerations | nindent 8 }} + {{- end }} + {{- if $gateway.podPriority }} + {{- if $gateway.podPriority.enabled }} + priorityClassName: {{ $gateway.name }}-priority + {{- end }} + {{- end }} +status: +{{- end }} diff --git a/charts/vald/templates/gateway/backup/hpa.yaml b/charts/vald/templates/gateway/backup/hpa.yaml new file mode 100644 index 0000000000..649369e456 --- /dev/null +++ b/charts/vald/templates/gateway/backup/hpa.yaml @@ -0,0 +1,38 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.backup -}} +{{- if and $gateway.enabled $gateway.hpa.enabled }} +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ $gateway.name }} + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-backup +spec: + maxReplicas: {{ $gateway.maxReplicas }} + minReplicas: {{ $gateway.minReplicas }} + scaleTargetRef: + apiVersion: apps/v1 + kind: {{ $gateway.kind }} + name: {{ $gateway.name }} + targetCPUUtilizationPercentage: {{ $gateway.hpa.targetCPUUtilizationPercentage }} +status: +{{- end }} diff --git a/charts/vald/templates/gateway/backup/ing.yaml b/charts/vald/templates/gateway/backup/ing.yaml new file mode 100644 index 0000000000..b1f70f894e --- /dev/null +++ b/charts/vald/templates/gateway/backup/ing.yaml @@ -0,0 +1,41 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.backup -}} +{{- if and $gateway.enabled $gateway.ingress.enabled }} +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + annotations: + {{- toYaml $gateway.ingress.annotations | nindent 4 }} + labels: + name: {{ $gateway.name }}-ingress + app: {{ $gateway.name }}-ingress + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-backup + name: {{ $gateway.name }}-ingress +spec: + rules: + - host: {{ $gateway.ingress.host }} + http: + paths: + - backend: + serviceName: {{ $gateway.name }} + servicePort: {{ $gateway.ingress.servicePort }} +{{- end }} diff --git a/charts/vald/templates/gateway/backup/pdb.yaml b/charts/vald/templates/gateway/backup/pdb.yaml new file mode 100644 index 0000000000..e083135268 --- /dev/null +++ b/charts/vald/templates/gateway/backup/pdb.yaml @@ -0,0 +1,34 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.backup -}} +{{- if $gateway.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ $gateway.name }} + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-backup +spec: + maxUnavailable: {{ $gateway.maxUnavailable }} + selector: + matchLabels: + app: {{ $gateway.name }} +{{- end }} diff --git a/charts/vald/templates/gateway/backup/priorityclass.yaml b/charts/vald/templates/gateway/backup/priorityclass.yaml new file mode 100644 index 0000000000..b30a8eda29 --- /dev/null +++ b/charts/vald/templates/gateway/backup/priorityclass.yaml @@ -0,0 +1,32 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.backup -}} +{{- if and $gateway.enabled $gateway.podPriority.enabled }} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ $gateway.name }}-priority + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-backup +value: {{ $gateway.podPriority.value }} +globalDefault: false +description: "A priority class for Vald backup gateway." +{{- end }} diff --git a/charts/vald/templates/gateway/backup/svc.yaml b/charts/vald/templates/gateway/backup/svc.yaml new file mode 100644 index 0000000000..f9b1694a7f --- /dev/null +++ b/charts/vald/templates/gateway/backup/svc.yaml @@ -0,0 +1,49 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.backup -}} +{{- if $gateway.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ $gateway.name }} + {{- if $gateway.service.annotations }} + annotations: + {{- toYaml $gateway.service.annotations | nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-backup + {{- if $gateway.service.labels }} + {{- toYaml $gateway.service.labels | nindent 4 }} + {{- end }} +spec: + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config }} + {{- include "vald.servicePorts" $servers | nindent 2 }} + selector: + app.kubernetes.io/name: {{ include "vald.name" . }} + app.kubernetes.io/component: gateway + {{- if eq $gateway.serviceType "ClusterIP" }} + clusterIP: None + {{- end }} + type: {{ $gateway.serviceType }} + {{- if $gateway.externalTrafficPolicy }} + externalTrafficPolicy: {{ $gateway.externalTrafficPolicy }} + {{- end }} +{{- end }} diff --git a/charts/vald/templates/gateway/filter/configmap.yaml b/charts/vald/templates/gateway/filter/configmap.yaml new file mode 100644 index 0000000000..fff1846663 --- /dev/null +++ b/charts/vald/templates/gateway/filter/configmap.yaml @@ -0,0 +1,79 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.filter -}} +{{- if $gateway.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $gateway.name }}-config + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-filter +data: + config.yaml: | + --- + version: {{ $gateway.version }} + time_zone: {{ default .Values.defaults.time_zone $gateway.time_zone }} + logging: + {{- $logging := dict "Values" $gateway.logging "default" .Values.defaults.logging }} + {{- include "vald.logging" $logging | nindent 6 }} + server_config: + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config }} + {{- include "vald.servers" $servers | nindent 6 }} + observability: + {{- $observability := dict "Values" $gateway.observability "default" .Values.defaults.observability }} + {{- include "vald.observability" $observability | nindent 6 }} + client: + {{- $client := dict "Values" $gateway.gateway_config.client "default" .Values.defaults.grpc.client }} + {{- include "vald.grpc.client" $client | nindent 6 }} + ingress_filter: + client: + {{- $ingressFilterClient := dict "Values" $gateway.gateway_config.ingress_filter.client "default" .Values.defaults.grpc.client }} + {{- include "vald.grpc.client" $ingressFilterClient | nindent 8 }} + {{- if $gateway.gateway_config.ingress_filter.search }} + search: + {{- toYaml $gateway.gateway_config.ingress_filter.search | nindent 8 }} + {{- else }} + search: [] + {{- end }} + {{- if $gateway.gateway_config.ingress_filter.insert }} + insert: + {{- toYaml $gateway.gateway_config.ingress_filter.insert | nindent 8 }} + {{- else }} + insert: [] + {{- end }} + {{- if $gateway.gateway_config.ingress_filter.update }} + update: + {{- toYaml $gateway.gateway_config.ingress_filter.update | nindent 8 }} + {{- else }} + update: [] + {{- end }} + {{- if $gateway.gateway_config.ingress_filter.upsert }} + upsert: + {{- toYaml $gateway.gateway_config.ingress_filter.upsert | nindent 8 }} + {{- else }} + upsert: [] + {{- end }} + egress_filter: + client: + {{- $egressFilterClient := dict "Values" $gateway.gateway_config.egress_filter.client "default" .Values.defaults.grpc.client }} + {{- include "vald.grpc.client" $egressFilterClient | nindent 8 }} + gateway: {} # TODO +{{- end }} diff --git a/charts/vald/templates/gateway/filter/daemonset.yaml b/charts/vald/templates/gateway/filter/daemonset.yaml new file mode 100644 index 0000000000..bcae920756 --- /dev/null +++ b/charts/vald/templates/gateway/filter/daemonset.yaml @@ -0,0 +1,118 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.filter -}} +{{- if and $gateway.enabled (eq $gateway.kind "DaemonSet") }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ $gateway.name }} + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-filter + {{- if $gateway.annotations }} + annotations: + {{- toYaml $gateway.annotations | nindent 4 }} + {{- end }} +spec: + revisionHistoryLimit: {{ $gateway.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ $gateway.name }} + updateStrategy: + rollingUpdate: + maxUnavailable: {{ $gateway.rollingUpdate.maxUnavailable }} + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: gateway + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/gateway/filter/configmap.yaml") . | sha256sum }} + {{- if $gateway.podAnnotations }} + {{- toYaml $gateway.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- if $gateway.initContainers }} + initContainers: + {{- $initContainers := dict "initContainers" $gateway.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} + {{- end }} + affinity: + {{- include "vald.affinity" $gateway.affinity | nindent 8 }} + {{- if $gateway.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml $gateway.topologySpreadConstraints | nindent 8 }} + {{- end }} + containers: + - name: {{ $gateway.name }} + image: "{{ $gateway.image.repository }}:{{ default .Values.defaults.image.tag $gateway.image.tag }}" + imagePullPolicy: {{ $gateway.image.pullPolicy }} + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config -}} + {{- include "vald.containerPorts" $servers | trim | nindent 10 }} + resources: + {{- toYaml $gateway.resources | nindent 12 }} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + {{- if $gateway.env }} + env: + {{- toYaml $gateway.env | nindent 12 }} + {{- end }} + volumeMounts: + - name: {{ $gateway.name }}-config + mountPath: /etc/server/ + {{- if $gateway.volumeMounts }} + {{- toYaml $gateway.volumeMounts | nindent 12 }} + {{- end }} + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: {{ $gateway.terminationGracePeriodSeconds }} + volumes: + - name: {{ $gateway.name }}-config + configMap: + defaultMode: 420 + name: {{ $gateway.name }}-config + {{- if $gateway.volumes }} + {{- toYaml $gateway.volumes | nindent 8 }} + {{- end }} + {{- if $gateway.nodeName }} + nodeName: {{ $gateway.nodeName }} + {{- end }} + {{- if $gateway.nodeSelector }} + nodeSelector: + {{- toYaml $gateway.nodeSelector | nindent 8 }} + {{- end }} + {{- if $gateway.tolerations }} + tolerations: + {{- toYaml $gateway.tolerations | nindent 8 }} + {{- end }} + {{- if $gateway.podPriority }} + {{- if $gateway.podPriority.enabled }} + priorityClassName: {{ $gateway.name }}-priority + {{- end }} + {{- end }} +status: +{{- end }} diff --git a/charts/vald/templates/gateway/filter/deployment.yaml b/charts/vald/templates/gateway/filter/deployment.yaml new file mode 100644 index 0000000000..6f79452f7a --- /dev/null +++ b/charts/vald/templates/gateway/filter/deployment.yaml @@ -0,0 +1,123 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.filter -}} +{{- if and $gateway.enabled (eq $gateway.kind "Deployment") }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ $gateway.name }} + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-filter + {{- if $gateway.annotations }} + annotations: + {{- toYaml $gateway.annotations | nindent 4 }} + {{- end }} +spec: + progressDeadlineSeconds: {{ $gateway.progressDeadlineSeconds }} + {{- if not $gateway.hpa.enabled }} + replicas: {{ $gateway.minReplicas }} + {{- end }} + revisionHistoryLimit: {{ $gateway.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ $gateway.name }} + strategy: + rollingUpdate: + maxSurge: {{ $gateway.rollingUpdate.maxSurge }} + maxUnavailable: {{ $gateway.rollingUpdate.maxUnavailable }} + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: gateway + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/gateway/filter/configmap.yaml") . | sha256sum }} + {{- if $gateway.podAnnotations }} + {{- toYaml $gateway.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- if $gateway.initContainers }} + initContainers: + {{- $initContainers := dict "initContainers" $gateway.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} + {{- end }} + affinity: + {{- include "vald.affinity" $gateway.affinity | nindent 8 }} + {{- if $gateway.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml $gateway.topologySpreadConstraints | nindent 8 }} + {{- end }} + containers: + - name: {{ $gateway.name }} + image: "{{ $gateway.image.repository }}:{{ default .Values.defaults.image.tag $gateway.image.tag }}" + imagePullPolicy: {{ $gateway.image.pullPolicy }} + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config -}} + {{- include "vald.containerPorts" $servers | trim | nindent 10 }} + resources: + {{- toYaml $gateway.resources | nindent 12 }} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + {{- if $gateway.env }} + env: + {{- toYaml $gateway.env | nindent 12 }} + {{- end }} + volumeMounts: + - name: {{ $gateway.name }}-config + mountPath: /etc/server/ + {{- if $gateway.volumeMounts }} + {{- toYaml $gateway.volumeMounts | nindent 12 }} + {{- end }} + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: {{ $gateway.terminationGracePeriodSeconds }} + volumes: + - name: {{ $gateway.name }}-config + configMap: + defaultMode: 420 + name: {{ $gateway.name }}-config + {{- if $gateway.volumes }} + {{- toYaml $gateway.volumes | nindent 8 }} + {{- end }} + {{- if $gateway.nodeName }} + nodeName: {{ $gateway.nodeName }} + {{- end }} + {{- if $gateway.nodeSelector }} + nodeSelector: + {{- toYaml $gateway.nodeSelector | nindent 8 }} + {{- end }} + {{- if $gateway.tolerations }} + tolerations: + {{- toYaml $gateway.tolerations | nindent 8 }} + {{- end }} + {{- if $gateway.podPriority }} + {{- if $gateway.podPriority.enabled }} + priorityClassName: {{ $gateway.name }}-priority + {{- end }} + {{- end }} +status: +{{- end }} diff --git a/charts/vald/templates/gateway/filter/hpa.yaml b/charts/vald/templates/gateway/filter/hpa.yaml new file mode 100644 index 0000000000..f6f26baaf7 --- /dev/null +++ b/charts/vald/templates/gateway/filter/hpa.yaml @@ -0,0 +1,38 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.filter -}} +{{- if and $gateway.enabled $gateway.hpa.enabled }} +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ $gateway.name }} + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-filter +spec: + maxReplicas: {{ $gateway.maxReplicas }} + minReplicas: {{ $gateway.minReplicas }} + scaleTargetRef: + apiVersion: apps/v1 + kind: {{ $gateway.kind }} + name: {{ $gateway.name }} + targetCPUUtilizationPercentage: {{ $gateway.hpa.targetCPUUtilizationPercentage }} +status: +{{- end }} diff --git a/charts/vald/templates/gateway/filter/ing.yaml b/charts/vald/templates/gateway/filter/ing.yaml new file mode 100644 index 0000000000..4ab91f2e15 --- /dev/null +++ b/charts/vald/templates/gateway/filter/ing.yaml @@ -0,0 +1,41 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.filter -}} +{{- if and $gateway.enabled $gateway.ingress.enabled }} +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + annotations: + {{- toYaml $gateway.ingress.annotations | nindent 4 }} + labels: + name: {{ $gateway.name }}-ingress + app: {{ $gateway.name }}-ingress + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-filter + name: {{ $gateway.name }}-ingress +spec: + rules: + - host: {{ $gateway.ingress.host }} + http: + paths: + - backend: + serviceName: {{ $gateway.name }} + servicePort: {{ $gateway.ingress.servicePort }} +{{- end }} diff --git a/charts/vald/templates/gateway/filter/pdb.yaml b/charts/vald/templates/gateway/filter/pdb.yaml new file mode 100644 index 0000000000..40ec4b8831 --- /dev/null +++ b/charts/vald/templates/gateway/filter/pdb.yaml @@ -0,0 +1,34 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.filter -}} +{{- if $gateway.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ $gateway.name }} + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-filter +spec: + maxUnavailable: {{ $gateway.maxUnavailable }} + selector: + matchLabels: + app: {{ $gateway.name }} +{{- end }} diff --git a/charts/vald/templates/gateway/filter/priorityclass.yaml b/charts/vald/templates/gateway/filter/priorityclass.yaml new file mode 100644 index 0000000000..c789ccdc59 --- /dev/null +++ b/charts/vald/templates/gateway/filter/priorityclass.yaml @@ -0,0 +1,32 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.filter -}} +{{- if and $gateway.enabled $gateway.podPriority.enabled }} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ $gateway.name }}-priority + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-filter +value: {{ $gateway.podPriority.value }} +globalDefault: false +description: "A priority class for Vald filter gateway." +{{- end }} diff --git a/charts/vald/templates/gateway/filter/svc.yaml b/charts/vald/templates/gateway/filter/svc.yaml new file mode 100644 index 0000000000..3b09e193b9 --- /dev/null +++ b/charts/vald/templates/gateway/filter/svc.yaml @@ -0,0 +1,49 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.filter -}} +{{- if $gateway.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ $gateway.name }} + {{- if $gateway.service.annotations }} + annotations: + {{- toYaml $gateway.service.annotations | nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-filter + {{- if $gateway.service.labels }} + {{- toYaml $gateway.service.labels | nindent 4 }} + {{- end }} +spec: + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config }} + {{- include "vald.servicePorts" $servers | nindent 2 }} + selector: + app.kubernetes.io/name: {{ include "vald.name" . }} + app.kubernetes.io/component: gateway + {{- if eq $gateway.serviceType "ClusterIP" }} + clusterIP: None + {{- end }} + type: {{ $gateway.serviceType }} + {{- if $gateway.externalTrafficPolicy }} + externalTrafficPolicy: {{ $gateway.externalTrafficPolicy }} + {{- end }} +{{- end }} diff --git a/charts/vald/templates/gateway/lb/configmap.yaml b/charts/vald/templates/gateway/lb/configmap.yaml new file mode 100644 index 0000000000..879e092acb --- /dev/null +++ b/charts/vald/templates/gateway/lb/configmap.yaml @@ -0,0 +1,60 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.lb -}} +{{- if $gateway.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $gateway.name }}-config + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-lb +data: + config.yaml: | + --- + version: {{ $gateway.version }} + time_zone: {{ default .Values.defaults.time_zone $gateway.time_zone }} + logging: + {{- $logging := dict "Values" $gateway.logging "default" .Values.defaults.logging }} + {{- include "vald.logging" $logging | nindent 6 }} + server_config: + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config }} + {{- include "vald.servers" $servers | nindent 6 }} + observability: + {{- $observability := dict "Values" $gateway.observability "default" .Values.defaults.observability }} + {{- include "vald.observability" $observability | nindent 6 }} + gateway: + agent_port: {{ default .Values.defaults.server_config.servers.grpc.port .Values.agent.server_config.servers.grpc.port }} + agent_name: {{ .Values.agent.name | quote }} + agent_dns: {{ .Values.agent.name }}.{{ .Release.Namespace }}.svc.cluster.local + agent_namespace: {{ $gateway.gateway_config.agent_namespace | quote }} + node_name: {{ $gateway.gateway_config.node_name | quote }} + index_replica: {{ $gateway.gateway_config.index_replica }} + discoverer: + host: {{ .Values.discoverer.name }}.{{ .Release.Namespace }}.svc.cluster.local + port: {{ default .Values.defaults.server_config.servers.grpc.port .Values.discoverer.server_config.servers.grpc.port }} + duration: {{ $gateway.gateway_config.discoverer.duration }} + discover_client: + {{- $discoverClient := dict "Values" $gateway.gateway_config.discoverer.discover_client "default" .Values.defaults.grpc.client }} + {{- include "vald.grpc.client" $discoverClient | nindent 10 }} + agent_client: + {{- $agentClient := dict "Values" $gateway.gateway_config.discoverer.agent_client "default" .Values.defaults.grpc.client }} + {{- include "vald.grpc.client" $agentClient | nindent 10 }} +{{- end }} diff --git a/charts/vald/templates/gateway/lb/daemonset.yaml b/charts/vald/templates/gateway/lb/daemonset.yaml new file mode 100644 index 0000000000..d678f7b55a --- /dev/null +++ b/charts/vald/templates/gateway/lb/daemonset.yaml @@ -0,0 +1,118 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.lb -}} +{{- if and $gateway.enabled (eq $gateway.kind "DaemonSet") }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ $gateway.name }} + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-lb + {{- if $gateway.annotations }} + annotations: + {{- toYaml $gateway.annotations | nindent 4 }} + {{- end }} +spec: + revisionHistoryLimit: {{ $gateway.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ $gateway.name }} + updateStrategy: + rollingUpdate: + maxUnavailable: {{ $gateway.rollingUpdate.maxUnavailable }} + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: gateway + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/gateway/lb/configmap.yaml") . | sha256sum }} + {{- if $gateway.podAnnotations }} + {{- toYaml $gateway.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- if $gateway.initContainers }} + initContainers: + {{- $initContainers := dict "initContainers" $gateway.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} + {{- end }} + affinity: + {{- include "vald.affinity" $gateway.affinity | nindent 8 }} + {{- if $gateway.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml $gateway.topologySpreadConstraints | nindent 8 }} + {{- end }} + containers: + - name: {{ $gateway.name }} + image: "{{ $gateway.image.repository }}:{{ default .Values.defaults.image.tag $gateway.image.tag }}" + imagePullPolicy: {{ $gateway.image.pullPolicy }} + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config -}} + {{- include "vald.containerPorts" $servers | trim | nindent 10 }} + resources: + {{- toYaml $gateway.resources | nindent 12 }} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + {{- if $gateway.env }} + env: + {{- toYaml $gateway.env | nindent 12 }} + {{- end }} + volumeMounts: + - name: {{ $gateway.name }}-config + mountPath: /etc/server/ + {{- if $gateway.volumeMounts }} + {{- toYaml $gateway.volumeMounts | nindent 12 }} + {{- end }} + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: {{ $gateway.terminationGracePeriodSeconds }} + volumes: + - name: {{ $gateway.name }}-config + configMap: + defaultMode: 420 + name: {{ $gateway.name }}-config + {{- if $gateway.volumes }} + {{- toYaml $gateway.volumes | nindent 8 }} + {{- end }} + {{- if $gateway.nodeName }} + nodeName: {{ $gateway.nodeName }} + {{- end }} + {{- if $gateway.nodeSelector }} + nodeSelector: + {{- toYaml $gateway.nodeSelector | nindent 8 }} + {{- end }} + {{- if $gateway.tolerations }} + tolerations: + {{- toYaml $gateway.tolerations | nindent 8 }} + {{- end }} + {{- if $gateway.podPriority }} + {{- if $gateway.podPriority.enabled }} + priorityClassName: {{ $gateway.name }}-priority + {{- end }} + {{- end }} +status: +{{- end }} diff --git a/charts/vald/templates/gateway/lb/deployment.yaml b/charts/vald/templates/gateway/lb/deployment.yaml new file mode 100644 index 0000000000..0a79d548e4 --- /dev/null +++ b/charts/vald/templates/gateway/lb/deployment.yaml @@ -0,0 +1,123 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.lb -}} +{{- if and $gateway.enabled (eq $gateway.kind "Deployment") }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ $gateway.name }} + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-lb + {{- if $gateway.annotations }} + annotations: + {{- toYaml $gateway.annotations | nindent 4 }} + {{- end }} +spec: + progressDeadlineSeconds: {{ $gateway.progressDeadlineSeconds }} + {{- if not $gateway.hpa.enabled }} + replicas: {{ $gateway.minReplicas }} + {{- end }} + revisionHistoryLimit: {{ $gateway.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ $gateway.name }} + strategy: + rollingUpdate: + maxSurge: {{ $gateway.rollingUpdate.maxSurge }} + maxUnavailable: {{ $gateway.rollingUpdate.maxUnavailable }} + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: gateway + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/gateway/lb/configmap.yaml") . | sha256sum }} + {{- if $gateway.podAnnotations }} + {{- toYaml $gateway.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- if $gateway.initContainers }} + initContainers: + {{- $initContainers := dict "initContainers" $gateway.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} + {{- end }} + affinity: + {{- include "vald.affinity" $gateway.affinity | nindent 8 }} + {{- if $gateway.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml $gateway.topologySpreadConstraints | nindent 8 }} + {{- end }} + containers: + - name: {{ $gateway.name }} + image: "{{ $gateway.image.repository }}:{{ default .Values.defaults.image.tag $gateway.image.tag }}" + imagePullPolicy: {{ $gateway.image.pullPolicy }} + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config -}} + {{- include "vald.containerPorts" $servers | trim | nindent 10 }} + resources: + {{- toYaml $gateway.resources | nindent 12 }} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + {{- if $gateway.env }} + env: + {{- toYaml $gateway.env | nindent 12 }} + {{- end }} + volumeMounts: + - name: {{ $gateway.name }}-config + mountPath: /etc/server/ + {{- if $gateway.volumeMounts }} + {{- toYaml $gateway.volumeMounts | nindent 12 }} + {{- end }} + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: {{ $gateway.terminationGracePeriodSeconds }} + volumes: + - name: {{ $gateway.name }}-config + configMap: + defaultMode: 420 + name: {{ $gateway.name }}-config + {{- if $gateway.volumes }} + {{- toYaml $gateway.volumes | nindent 8 }} + {{- end }} + {{- if $gateway.nodeName }} + nodeName: {{ $gateway.nodeName }} + {{- end }} + {{- if $gateway.nodeSelector }} + nodeSelector: + {{- toYaml $gateway.nodeSelector | nindent 8 }} + {{- end }} + {{- if $gateway.tolerations }} + tolerations: + {{- toYaml $gateway.tolerations | nindent 8 }} + {{- end }} + {{- if $gateway.podPriority }} + {{- if $gateway.podPriority.enabled }} + priorityClassName: {{ $gateway.name }}-priority + {{- end }} + {{- end }} +status: +{{- end }} diff --git a/charts/vald/templates/gateway/lb/hpa.yaml b/charts/vald/templates/gateway/lb/hpa.yaml new file mode 100644 index 0000000000..b74f314cfb --- /dev/null +++ b/charts/vald/templates/gateway/lb/hpa.yaml @@ -0,0 +1,38 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.lb -}} +{{- if and $gateway.enabled $gateway.hpa.enabled }} +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ $gateway.name }} + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-lb +spec: + maxReplicas: {{ $gateway.maxReplicas }} + minReplicas: {{ $gateway.minReplicas }} + scaleTargetRef: + apiVersion: apps/v1 + kind: {{ $gateway.kind }} + name: {{ $gateway.name }} + targetCPUUtilizationPercentage: {{ $gateway.hpa.targetCPUUtilizationPercentage }} +status: +{{- end }} diff --git a/charts/vald/templates/gateway/lb/ing.yaml b/charts/vald/templates/gateway/lb/ing.yaml new file mode 100644 index 0000000000..090b67113b --- /dev/null +++ b/charts/vald/templates/gateway/lb/ing.yaml @@ -0,0 +1,41 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.lb -}} +{{- if and $gateway.enabled $gateway.ingress.enabled }} +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + annotations: + {{- toYaml $gateway.ingress.annotations | nindent 4 }} + labels: + name: {{ $gateway.name }}-ingress + app: {{ $gateway.name }}-ingress + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-lb + name: {{ $gateway.name }}-ingress +spec: + rules: + - host: {{ $gateway.ingress.host }} + http: + paths: + - backend: + serviceName: {{ $gateway.name }} + servicePort: {{ $gateway.ingress.servicePort }} +{{- end }} diff --git a/charts/vald/templates/gateway/lb/pdb.yaml b/charts/vald/templates/gateway/lb/pdb.yaml new file mode 100644 index 0000000000..93d458f38b --- /dev/null +++ b/charts/vald/templates/gateway/lb/pdb.yaml @@ -0,0 +1,34 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.lb -}} +{{- if $gateway.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ $gateway.name }} + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-lb +spec: + maxUnavailable: {{ $gateway.maxUnavailable }} + selector: + matchLabels: + app: {{ $gateway.name }} +{{- end }} diff --git a/charts/vald/templates/gateway/lb/priorityclass.yaml b/charts/vald/templates/gateway/lb/priorityclass.yaml new file mode 100644 index 0000000000..2676f395e0 --- /dev/null +++ b/charts/vald/templates/gateway/lb/priorityclass.yaml @@ -0,0 +1,32 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.lb -}} +{{- if and $gateway.enabled $gateway.podPriority.enabled }} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ $gateway.name }}-priority + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-lb +value: {{ $gateway.podPriority.value }} +globalDefault: false +description: "A priority class for Vald lb gateway." +{{- end }} diff --git a/charts/vald/templates/gateway/lb/svc.yaml b/charts/vald/templates/gateway/lb/svc.yaml new file mode 100644 index 0000000000..3f0ff881ba --- /dev/null +++ b/charts/vald/templates/gateway/lb/svc.yaml @@ -0,0 +1,49 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.lb -}} +{{- if $gateway.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ $gateway.name }} + {{- if $gateway.service.annotations }} + annotations: + {{- toYaml $gateway.service.annotations | nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-lb + {{- if $gateway.service.labels }} + {{- toYaml $gateway.service.labels | nindent 4 }} + {{- end }} +spec: + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config }} + {{- include "vald.servicePorts" $servers | nindent 2 }} + selector: + app.kubernetes.io/name: {{ include "vald.name" . }} + app.kubernetes.io/component: gateway + {{- if eq $gateway.serviceType "ClusterIP" }} + clusterIP: None + {{- end }} + type: {{ $gateway.serviceType }} + {{- if $gateway.externalTrafficPolicy }} + externalTrafficPolicy: {{ $gateway.externalTrafficPolicy }} + {{- end }} +{{- end }} diff --git a/charts/vald/templates/gateway/meta/configmap.yaml b/charts/vald/templates/gateway/meta/configmap.yaml new file mode 100644 index 0000000000..cc8389b99c --- /dev/null +++ b/charts/vald/templates/gateway/meta/configmap.yaml @@ -0,0 +1,55 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.meta -}} +{{- if $gateway.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $gateway.name }}-config + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-meta +data: + config.yaml: | + --- + version: {{ $gateway.version }} + time_zone: {{ default .Values.defaults.time_zone $gateway.time_zone }} + logging: + {{- $logging := dict "Values" $gateway.logging "default" .Values.defaults.logging }} + {{- include "vald.logging" $logging | nindent 6 }} + server_config: + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config }} + {{- include "vald.servers" $servers | nindent 6 }} + observability: + {{- $observability := dict "Values" $gateway.observability "default" .Values.defaults.observability }} + {{- include "vald.observability" $observability | nindent 6 }} + client: + {{- $client := dict "Values" $gateway.gateway_config.client "default" .Values.defaults.grpc.client }} + {{- include "vald.grpc.client" $client | nindent 6 }} + meta: + host: {{ .Values.meta.name }}.{{ .Release.Namespace }}.svc.cluster.local + port: {{ default .Values.defaults.server_config.servers.grpc.port .Values.meta.server_config.servers.grpc.port }} + client: + {{- $metaClient := dict "Values" $gateway.gateway_config.meta.client "default" .Values.defaults.grpc.client }} + {{- include "vald.grpc.client" $metaClient | nindent 8 }} + enable_cache: {{ $gateway.gateway_config.meta.enable_cache }} + cache_expiration: {{ $gateway.gateway_config.meta.cache_expiration }} + expired_cache_check_duration: {{ $gateway.gateway_config.meta.expired_cache_check_duration }} +{{- end }} diff --git a/charts/vald/templates/gateway/meta/daemonset.yaml b/charts/vald/templates/gateway/meta/daemonset.yaml new file mode 100644 index 0000000000..6add6dd5e7 --- /dev/null +++ b/charts/vald/templates/gateway/meta/daemonset.yaml @@ -0,0 +1,118 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.meta -}} +{{- if and $gateway.enabled (eq $gateway.kind "DaemonSet") }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ $gateway.name }} + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-meta + {{- if $gateway.annotations }} + annotations: + {{- toYaml $gateway.annotations | nindent 4 }} + {{- end }} +spec: + revisionHistoryLimit: {{ $gateway.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ $gateway.name }} + updateStrategy: + rollingUpdate: + maxUnavailable: {{ $gateway.rollingUpdate.maxUnavailable }} + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: gateway + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/gateway/meta/configmap.yaml") . | sha256sum }} + {{- if $gateway.podAnnotations }} + {{- toYaml $gateway.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- if $gateway.initContainers }} + initContainers: + {{- $initContainers := dict "initContainers" $gateway.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} + {{- end }} + affinity: + {{- include "vald.affinity" $gateway.affinity | nindent 8 }} + {{- if $gateway.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml $gateway.topologySpreadConstraints | nindent 8 }} + {{- end }} + containers: + - name: {{ $gateway.name }} + image: "{{ $gateway.image.repository }}:{{ default .Values.defaults.image.tag $gateway.image.tag }}" + imagePullPolicy: {{ $gateway.image.pullPolicy }} + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config -}} + {{- include "vald.containerPorts" $servers | trim | nindent 10 }} + resources: + {{- toYaml $gateway.resources | nindent 12 }} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + {{- if $gateway.env }} + env: + {{- toYaml $gateway.env | nindent 12 }} + {{- end }} + volumeMounts: + - name: {{ $gateway.name }}-config + mountPath: /etc/server/ + {{- if $gateway.volumeMounts }} + {{- toYaml $gateway.volumeMounts | nindent 12 }} + {{- end }} + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: {{ $gateway.terminationGracePeriodSeconds }} + volumes: + - name: {{ $gateway.name }}-config + configMap: + defaultMode: 420 + name: {{ $gateway.name }}-config + {{- if $gateway.volumes }} + {{- toYaml $gateway.volumes | nindent 8 }} + {{- end }} + {{- if $gateway.nodeName }} + nodeName: {{ $gateway.nodeName }} + {{- end }} + {{- if $gateway.nodeSelector }} + nodeSelector: + {{- toYaml $gateway.nodeSelector | nindent 8 }} + {{- end }} + {{- if $gateway.tolerations }} + tolerations: + {{- toYaml $gateway.tolerations | nindent 8 }} + {{- end }} + {{- if $gateway.podPriority }} + {{- if $gateway.podPriority.enabled }} + priorityClassName: {{ $gateway.name }}-priority + {{- end }} + {{- end }} +status: +{{- end }} diff --git a/charts/vald/templates/gateway/meta/deployment.yaml b/charts/vald/templates/gateway/meta/deployment.yaml new file mode 100644 index 0000000000..d80bf835ae --- /dev/null +++ b/charts/vald/templates/gateway/meta/deployment.yaml @@ -0,0 +1,123 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.meta -}} +{{- if and $gateway.enabled (eq $gateway.kind "Deployment") }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ $gateway.name }} + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway + {{- if $gateway.annotations }} + annotations: + {{- toYaml $gateway.annotations | nindent 4 }} + {{- end }} +spec: + progressDeadlineSeconds: {{ $gateway.progressDeadlineSeconds }} + {{- if not $gateway.hpa.enabled }} + replicas: {{ $gateway.minReplicas }} + {{- end }} + revisionHistoryLimit: {{ $gateway.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ $gateway.name }} + strategy: + rollingUpdate: + maxSurge: {{ $gateway.rollingUpdate.maxSurge }} + maxUnavailable: {{ $gateway.rollingUpdate.maxUnavailable }} + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: {{ $gateway.name }} + app.kubernetes.io/name: {{ include "vald.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: gateway + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/gateway/meta/configmap.yaml") . | sha256sum }} + {{- if $gateway.podAnnotations }} + {{- toYaml $gateway.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- if $gateway.initContainers }} + initContainers: + {{- $initContainers := dict "initContainers" $gateway.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} + {{- end }} + affinity: + {{- include "vald.affinity" $gateway.affinity | nindent 8 }} + {{- if $gateway.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml $gateway.topologySpreadConstraints | nindent 8 }} + {{- end }} + containers: + - name: {{ $gateway.name }} + image: "{{ $gateway.image.repository }}:{{ default .Values.defaults.image.tag $gateway.image.tag }}" + imagePullPolicy: {{ $gateway.image.pullPolicy }} + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config -}} + {{- include "vald.containerPorts" $servers | trim | nindent 10 }} + resources: + {{- toYaml $gateway.resources | nindent 12 }} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + {{- if $gateway.env }} + env: + {{- toYaml $gateway.env | nindent 12 }} + {{- end }} + volumeMounts: + - name: {{ $gateway.name }}-config + mountPath: /etc/server/ + {{- if $gateway.volumeMounts }} + {{- toYaml $gateway.volumeMounts | nindent 12 }} + {{- end }} + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: {{ $gateway.terminationGracePeriodSeconds }} + volumes: + - name: {{ $gateway.name }}-config + configMap: + defaultMode: 420 + name: {{ $gateway.name }}-config + {{- if $gateway.volumes }} + {{- toYaml $gateway.volumes | nindent 8 }} + {{- end }} + {{- if $gateway.nodeName }} + nodeName: {{ $gateway.nodeName }} + {{- end }} + {{- if $gateway.nodeSelector }} + nodeSelector: + {{- toYaml $gateway.nodeSelector | nindent 8 }} + {{- end }} + {{- if $gateway.tolerations }} + tolerations: + {{- toYaml $gateway.tolerations | nindent 8 }} + {{- end }} + {{- if $gateway.podPriority }} + {{- if $gateway.podPriority.enabled }} + priorityClassName: {{ $gateway.name }}-priority + {{- end }} + {{- end }} +status: +{{- end }} diff --git a/charts/vald/templates/gateway/meta/hpa.yaml b/charts/vald/templates/gateway/meta/hpa.yaml new file mode 100644 index 0000000000..a838b58e40 --- /dev/null +++ b/charts/vald/templates/gateway/meta/hpa.yaml @@ -0,0 +1,38 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.meta -}} +{{- if and $gateway.enabled $gateway.hpa.enabled }} +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ $gateway.name }} + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-meta +spec: + maxReplicas: {{ $gateway.maxReplicas }} + minReplicas: {{ $gateway.minReplicas }} + scaleTargetRef: + apiVersion: apps/v1 + kind: {{ $gateway.kind }} + name: {{ $gateway.name }} + targetCPUUtilizationPercentage: {{ $gateway.hpa.targetCPUUtilizationPercentage }} +status: +{{- end }} diff --git a/charts/vald/templates/gateway/meta/ing.yaml b/charts/vald/templates/gateway/meta/ing.yaml new file mode 100644 index 0000000000..2e4ba71fbe --- /dev/null +++ b/charts/vald/templates/gateway/meta/ing.yaml @@ -0,0 +1,41 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.meta -}} +{{- if and $gateway.enabled $gateway.ingress.enabled }} +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + annotations: + {{- toYaml $gateway.ingress.annotations | nindent 4 }} + labels: + name: {{ $gateway.name }}-ingress + app: {{ $gateway.name }}-ingress + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-meta + name: {{ $gateway.name }}-ingress +spec: + rules: + - host: {{ $gateway.ingress.host }} + http: + paths: + - backend: + serviceName: {{ $gateway.name }} + servicePort: {{ $gateway.ingress.servicePort }} +{{- end }} diff --git a/charts/vald/templates/gateway/meta/pdb.yaml b/charts/vald/templates/gateway/meta/pdb.yaml new file mode 100644 index 0000000000..d7190c0262 --- /dev/null +++ b/charts/vald/templates/gateway/meta/pdb.yaml @@ -0,0 +1,34 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.meta -}} +{{- if $gateway.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ $gateway.name }} + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-meta +spec: + maxUnavailable: {{ $gateway.maxUnavailable }} + selector: + matchLabels: + app: {{ $gateway.name }} +{{- end }} diff --git a/charts/vald/templates/gateway/meta/priorityclass.yaml b/charts/vald/templates/gateway/meta/priorityclass.yaml new file mode 100644 index 0000000000..fb6a65e007 --- /dev/null +++ b/charts/vald/templates/gateway/meta/priorityclass.yaml @@ -0,0 +1,32 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.meta -}} +{{- if and $gateway.enabled $gateway.podPriority.enabled }} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ $gateway.name }}-priority + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-meta +value: {{ $gateway.podPriority.value }} +globalDefault: false +description: "A priority class for Vald meta gateway." +{{- end }} diff --git a/charts/vald/templates/gateway/meta/svc.yaml b/charts/vald/templates/gateway/meta/svc.yaml new file mode 100644 index 0000000000..efa783bc71 --- /dev/null +++ b/charts/vald/templates/gateway/meta/svc.yaml @@ -0,0 +1,49 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{{- $gateway := .Values.gateway.meta -}} +{{- if $gateway.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ $gateway.name }} + {{- if $gateway.service.annotations }} + annotations: + {{- toYaml $gateway.service.annotations | nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "vald.name" . }} + helm.sh/chart: {{ include "vald.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.Version }} + app.kubernetes.io/component: gateway-meta + {{- if $gateway.service.labels }} + {{- toYaml $gateway.service.labels | nindent 4 }} + {{- end }} +spec: + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config }} + {{- include "vald.servicePorts" $servers | nindent 2 }} + selector: + app.kubernetes.io/name: {{ include "vald.name" . }} + app.kubernetes.io/component: gateway + {{- if eq $gateway.serviceType "ClusterIP" }} + clusterIP: None + {{- end }} + type: {{ $gateway.serviceType }} + {{- if $gateway.externalTrafficPolicy }} + externalTrafficPolicy: {{ $gateway.externalTrafficPolicy }} + {{- end }} +{{- end }} diff --git a/charts/vald/templates/gateway/vald/configmap.yaml b/charts/vald/templates/gateway/vald/configmap.yaml index 74a74b2740..cef437c3a1 100644 --- a/charts/vald/templates/gateway/vald/configmap.yaml +++ b/charts/vald/templates/gateway/vald/configmap.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.gateway.enabled }} +{{- $gateway := .Values.gateway.vald -}} +{{- if $gateway.enabled }} apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.gateway.name }}-config + name: {{ $gateway.name }}-config labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -28,48 +29,48 @@ metadata: data: config.yaml: | --- - version: {{ .Values.gateway.version }} - time_zone: {{ default .Values.defaults.time_zone .Values.gateway.time_zone }} + version: {{ $gateway.version }} + time_zone: {{ default .Values.defaults.time_zone $gateway.time_zone }} logging: - {{- $logging := dict "Values" .Values.gateway.logging "default" .Values.defaults.logging }} + {{- $logging := dict "Values" $gateway.logging "default" .Values.defaults.logging }} {{- include "vald.logging" $logging | nindent 6 }} server_config: - {{- $servers := dict "Values" .Values.gateway.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config }} {{- include "vald.servers" $servers | nindent 6 }} observability: - {{- $observability := dict "Values" .Values.gateway.observability "default" .Values.defaults.observability }} + {{- $observability := dict "Values" $gateway.observability "default" .Values.defaults.observability }} {{- include "vald.observability" $observability | nindent 6 }} gateway: agent_port: {{ default .Values.defaults.server_config.servers.grpc.port .Values.agent.server_config.servers.grpc.port }} agent_name: {{ .Values.agent.name | quote }} agent_dns: {{ .Values.agent.name }}.{{ .Release.Namespace }}.svc.cluster.local - agent_namespace: {{ .Values.gateway.gateway_config.agent_namespace | quote }} - node_name: {{ .Values.gateway.gateway_config.node_name | quote }} - index_replica: {{ .Values.gateway.gateway_config.index_replica }} + agent_namespace: {{ $gateway.gateway_config.agent_namespace | quote }} + node_name: {{ $gateway.gateway_config.node_name | quote }} + index_replica: {{ $gateway.gateway_config.index_replica }} discoverer: host: {{ .Values.discoverer.name }}.{{ .Release.Namespace }}.svc.cluster.local port: {{ default .Values.defaults.server_config.servers.grpc.port .Values.discoverer.server_config.servers.grpc.port }} - duration: {{ .Values.gateway.gateway_config.discoverer.duration }} + duration: {{ $gateway.gateway_config.discoverer.duration }} discover_client: - {{- $discoverClient := dict "Values" .Values.gateway.gateway_config.discoverer.discover_client "default" .Values.defaults.grpc.client }} + {{- $discoverClient := dict "Values" $gateway.gateway_config.discoverer.discover_client "default" .Values.defaults.grpc.client }} {{- include "vald.grpc.client" $discoverClient | nindent 10 }} agent_client: - {{- $agentClient := dict "Values" .Values.gateway.gateway_config.discoverer.agent_client "default" .Values.defaults.grpc.client }} + {{- $agentClient := dict "Values" $gateway.gateway_config.discoverer.agent_client "default" .Values.defaults.grpc.client }} {{- include "vald.grpc.client" $agentClient | nindent 10 }} meta: host: {{ .Values.meta.name }}.{{ .Release.Namespace }}.svc.cluster.local port: {{ default .Values.defaults.server_config.servers.grpc.port .Values.meta.server_config.servers.grpc.port }} client: - {{- $metaClient := dict "Values" .Values.gateway.gateway_config.meta.client "default" .Values.defaults.grpc.client }} + {{- $metaClient := dict "Values" $gateway.gateway_config.meta.client "default" .Values.defaults.grpc.client }} {{- include "vald.grpc.client" $metaClient | nindent 10 }} - enable_cache: {{ .Values.gateway.gateway_config.meta.enable_cache }} - cache_expiration: {{ .Values.gateway.gateway_config.meta.cache_expiration }} - expired_cache_check_duration: {{ .Values.gateway.gateway_config.meta.expired_cache_check_duration }} + enable_cache: {{ $gateway.gateway_config.meta.enable_cache }} + cache_expiration: {{ $gateway.gateway_config.meta.cache_expiration }} + expired_cache_check_duration: {{ $gateway.gateway_config.meta.expired_cache_check_duration }} backup: - host: {{ .Values.compressor.name }}.{{ .Release.Namespace }}.svc.cluster.local - port: {{ default .Values.defaults.server_config.servers.grpc.port .Values.compressor.server_config.servers.grpc.port }} + host: {{ .Values.manager.compressor.name }}.{{ .Release.Namespace }}.svc.cluster.local + port: {{ default .Values.defaults.server_config.servers.grpc.port .Values.manager.compressor.server_config.servers.grpc.port }} client: - {{- $backupClient := dict "Values" .Values.gateway.gateway_config.backup.client "default" .Values.defaults.grpc.client }} + {{- $backupClient := dict "Values" $gateway.gateway_config.backup.client "default" .Values.defaults.grpc.client }} {{- include "vald.grpc.client" $backupClient | nindent 10 }} egress_filter: client: null diff --git a/charts/vald/templates/gateway/vald/daemonset.yaml b/charts/vald/templates/gateway/vald/daemonset.yaml index ec9c5bb470..577b48ab48 100644 --- a/charts/vald/templates/gateway/vald/daemonset.yaml +++ b/charts/vald/templates/gateway/vald/daemonset.yaml @@ -13,104 +13,105 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.gateway.enabled (eq .Values.gateway.kind "DaemonSet") }} +{{- $gateway := .Values.gateway.vald -}} +{{- if and $gateway.enabled (eq $gateway.kind "DaemonSet") }} apiVersion: apps/v1 kind: DaemonSet metadata: - name: {{ .Values.gateway.name }} + name: {{ $gateway.name }} labels: - app: {{ .Values.gateway.name }} + app: {{ $gateway.name }} app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: gateway - {{- if .Values.gateway.annotations }} + {{- if $gateway.annotations }} annotations: - {{- toYaml .Values.gateway.annotations | nindent 4 }} + {{- toYaml $gateway.annotations | nindent 4 }} {{- end }} spec: - revisionHistoryLimit: {{ .Values.gateway.revisionHistoryLimit }} + revisionHistoryLimit: {{ $gateway.revisionHistoryLimit }} selector: matchLabels: - app: {{ .Values.gateway.name }} + app: {{ $gateway.name }} updateStrategy: rollingUpdate: - maxUnavailable: {{ .Values.gateway.rollingUpdate.maxUnavailable }} + maxUnavailable: {{ $gateway.rollingUpdate.maxUnavailable }} type: RollingUpdate template: metadata: creationTimestamp: null labels: - app: {{ .Values.gateway.name }} + app: {{ $gateway.name }} app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: gateway annotations: checksum/configmap: {{ include (print $.Template.BasePath "/gateway/vald/configmap.yaml") . | sha256sum }} - {{- if .Values.gateway.podAnnotations }} - {{- toYaml .Values.gateway.podAnnotations | nindent 8 }} + {{- if $gateway.podAnnotations }} + {{- toYaml $gateway.podAnnotations | nindent 8 }} {{- end }} spec: - {{- if .Values.gateway.initContainers }} + {{- if $gateway.initContainers }} initContainers: - {{- $initContainers := dict "initContainers" .Values.gateway.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- $initContainers := dict "initContainers" $gateway.initContainers "Values" .Values "namespace" .Release.Namespace -}} {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} {{- end }} affinity: - {{- include "vald.affinity" .Values.gateway.affinity | nindent 8 }} - {{- if .Values.gateway.topologySpreadConstraints }} + {{- include "vald.affinity" $gateway.affinity | nindent 8 }} + {{- if $gateway.topologySpreadConstraints }} topologySpreadConstraints: - {{- toYaml .Values.gateway.topologySpreadConstraints | nindent 8 }} + {{- toYaml $gateway.topologySpreadConstraints | nindent 8 }} {{- end }} containers: - - name: {{ .Values.gateway.name }} - image: "{{ .Values.gateway.image.repository }}:{{ default .Values.defaults.image.tag .Values.gateway.image.tag }}" - imagePullPolicy: {{ .Values.gateway.image.pullPolicy }} - {{- $servers := dict "Values" .Values.gateway.server_config "default" .Values.defaults.server_config -}} + - name: {{ $gateway.name }} + image: "{{ $gateway.image.repository }}:{{ default .Values.defaults.image.tag $gateway.image.tag }}" + imagePullPolicy: {{ $gateway.image.pullPolicy }} + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config -}} {{- include "vald.containerPorts" $servers | trim | nindent 10 }} resources: - {{- toYaml .Values.gateway.resources | nindent 12 }} + {{- toYaml $gateway.resources | nindent 12 }} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File - {{- if .Values.gateway.env }} + {{- if $gateway.env }} env: - {{- toYaml .Values.gateway.env | nindent 12 }} + {{- toYaml $gateway.env | nindent 12 }} {{- end }} volumeMounts: - - name: {{ .Values.gateway.name }}-config + - name: {{ $gateway.name }}-config mountPath: /etc/server/ - {{- if .Values.gateway.volumeMounts }} - {{- toYaml .Values.gateway.volumeMounts | nindent 12 }} + {{- if $gateway.volumeMounts }} + {{- toYaml $gateway.volumeMounts | nindent 12 }} {{- end }} dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} - terminationGracePeriodSeconds: {{ .Values.gateway.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ $gateway.terminationGracePeriodSeconds }} volumes: - - name: {{ .Values.gateway.name }}-config + - name: {{ $gateway.name }}-config configMap: defaultMode: 420 - name: {{ .Values.gateway.name }}-config - {{- if .Values.gateway.volumes }} - {{- toYaml .Values.gateway.volumes | nindent 8 }} + name: {{ $gateway.name }}-config + {{- if $gateway.volumes }} + {{- toYaml $gateway.volumes | nindent 8 }} {{- end }} - {{- if .Values.gateway.nodeName }} - nodeName: {{ .Values.gateway.nodeName }} + {{- if $gateway.nodeName }} + nodeName: {{ $gateway.nodeName }} {{- end }} - {{- if .Values.gateway.nodeSelector }} + {{- if $gateway.nodeSelector }} nodeSelector: - {{- toYaml .Values.gateway.nodeSelector | nindent 8 }} + {{- toYaml $gateway.nodeSelector | nindent 8 }} {{- end }} - {{- if .Values.gateway.tolerations }} + {{- if $gateway.tolerations }} tolerations: - {{- toYaml .Values.gateway.tolerations | nindent 8 }} + {{- toYaml $gateway.tolerations | nindent 8 }} {{- end }} - {{- if .Values.gateway.podPriority }} - {{- if .Values.gateway.podPriority.enabled }} - priorityClassName: {{ .Values.gateway.name }}-priority + {{- if $gateway.podPriority }} + {{- if $gateway.podPriority.enabled }} + priorityClassName: {{ $gateway.name }}-priority {{- end }} {{- end }} status: diff --git a/charts/vald/templates/gateway/vald/deployment.yaml b/charts/vald/templates/gateway/vald/deployment.yaml index a44c317e1c..c18962b218 100644 --- a/charts/vald/templates/gateway/vald/deployment.yaml +++ b/charts/vald/templates/gateway/vald/deployment.yaml @@ -13,109 +13,110 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.gateway.enabled (eq .Values.gateway.kind "Deployment") }} +{{- $gateway := .Values.gateway.vald -}} +{{- if and $gateway.enabled (eq $gateway.kind "Deployment") }} apiVersion: apps/v1 kind: Deployment metadata: - name: {{ .Values.gateway.name }} + name: {{ $gateway.name }} labels: - app: {{ .Values.gateway.name }} + app: {{ $gateway.name }} app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: gateway - {{- if .Values.gateway.annotations }} + {{- if $gateway.annotations }} annotations: - {{- toYaml .Values.gateway.annotations | nindent 4 }} + {{- toYaml $gateway.annotations | nindent 4 }} {{- end }} spec: - progressDeadlineSeconds: {{ .Values.gateway.progressDeadlineSeconds }} - {{- if not .Values.gateway.hpa.enabled }} - replicas: {{ .Values.gateway.minReplicas }} + progressDeadlineSeconds: {{ $gateway.progressDeadlineSeconds }} + {{- if not $gateway.hpa.enabled }} + replicas: {{ $gateway.minReplicas }} {{- end }} - revisionHistoryLimit: {{ .Values.gateway.revisionHistoryLimit }} + revisionHistoryLimit: {{ $gateway.revisionHistoryLimit }} selector: matchLabels: - app: {{ .Values.gateway.name }} + app: {{ $gateway.name }} strategy: rollingUpdate: - maxSurge: {{ .Values.gateway.rollingUpdate.maxSurge }} - maxUnavailable: {{ .Values.gateway.rollingUpdate.maxUnavailable }} + maxSurge: {{ $gateway.rollingUpdate.maxSurge }} + maxUnavailable: {{ $gateway.rollingUpdate.maxUnavailable }} type: RollingUpdate template: metadata: creationTimestamp: null labels: - app: {{ .Values.gateway.name }} + app: {{ $gateway.name }} app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: gateway annotations: checksum/configmap: {{ include (print $.Template.BasePath "/gateway/vald/configmap.yaml") . | sha256sum }} - {{- if .Values.gateway.podAnnotations }} - {{- toYaml .Values.gateway.podAnnotations | nindent 8 }} + {{- if $gateway.podAnnotations }} + {{- toYaml $gateway.podAnnotations | nindent 8 }} {{- end }} spec: - {{- if .Values.gateway.initContainers }} + {{- if $gateway.initContainers }} initContainers: - {{- $initContainers := dict "initContainers" .Values.gateway.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- $initContainers := dict "initContainers" $gateway.initContainers "Values" .Values "namespace" .Release.Namespace -}} {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} {{- end }} affinity: - {{- include "vald.affinity" .Values.gateway.affinity | nindent 8 }} - {{- if .Values.gateway.topologySpreadConstraints }} + {{- include "vald.affinity" $gateway.affinity | nindent 8 }} + {{- if $gateway.topologySpreadConstraints }} topologySpreadConstraints: - {{- toYaml .Values.gateway.topologySpreadConstraints | nindent 8 }} + {{- toYaml $gateway.topologySpreadConstraints | nindent 8 }} {{- end }} containers: - - name: {{ .Values.gateway.name }} - image: "{{ .Values.gateway.image.repository }}:{{ default .Values.defaults.image.tag .Values.gateway.image.tag }}" - imagePullPolicy: {{ .Values.gateway.image.pullPolicy }} - {{- $servers := dict "Values" .Values.gateway.server_config "default" .Values.defaults.server_config -}} + - name: {{ $gateway.name }} + image: "{{ $gateway.image.repository }}:{{ default .Values.defaults.image.tag $gateway.image.tag }}" + imagePullPolicy: {{ $gateway.image.pullPolicy }} + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config -}} {{- include "vald.containerPorts" $servers | trim | nindent 10 }} resources: - {{- toYaml .Values.gateway.resources | nindent 12 }} + {{- toYaml $gateway.resources | nindent 12 }} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File - {{- if .Values.gateway.env }} + {{- if $gateway.env }} env: - {{- toYaml .Values.gateway.env | nindent 12 }} + {{- toYaml $gateway.env | nindent 12 }} {{- end }} volumeMounts: - - name: {{ .Values.gateway.name }}-config + - name: {{ $gateway.name }}-config mountPath: /etc/server/ - {{- if .Values.gateway.volumeMounts }} - {{- toYaml .Values.gateway.volumeMounts | nindent 12 }} + {{- if $gateway.volumeMounts }} + {{- toYaml $gateway.volumeMounts | nindent 12 }} {{- end }} dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} - terminationGracePeriodSeconds: {{ .Values.gateway.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ $gateway.terminationGracePeriodSeconds }} volumes: - - name: {{ .Values.gateway.name }}-config + - name: {{ $gateway.name }}-config configMap: defaultMode: 420 - name: {{ .Values.gateway.name }}-config - {{- if .Values.gateway.volumes }} - {{- toYaml .Values.gateway.volumes | nindent 8 }} + name: {{ $gateway.name }}-config + {{- if $gateway.volumes }} + {{- toYaml $gateway.volumes | nindent 8 }} {{- end }} - {{- if .Values.gateway.nodeName }} - nodeName: {{ .Values.gateway.nodeName }} + {{- if $gateway.nodeName }} + nodeName: {{ $gateway.nodeName }} {{- end }} - {{- if .Values.gateway.nodeSelector }} + {{- if $gateway.nodeSelector }} nodeSelector: - {{- toYaml .Values.gateway.nodeSelector | nindent 8 }} + {{- toYaml $gateway.nodeSelector | nindent 8 }} {{- end }} - {{- if .Values.gateway.tolerations }} + {{- if $gateway.tolerations }} tolerations: - {{- toYaml .Values.gateway.tolerations | nindent 8 }} + {{- toYaml $gateway.tolerations | nindent 8 }} {{- end }} - {{- if .Values.gateway.podPriority }} - {{- if .Values.gateway.podPriority.enabled }} - priorityClassName: {{ .Values.gateway.name }}-priority + {{- if $gateway.podPriority }} + {{- if $gateway.podPriority.enabled }} + priorityClassName: {{ $gateway.name }}-priority {{- end }} {{- end }} status: diff --git a/charts/vald/templates/gateway/vald/hpa.yaml b/charts/vald/templates/gateway/vald/hpa.yaml index 144538a892..88a522173b 100644 --- a/charts/vald/templates/gateway/vald/hpa.yaml +++ b/charts/vald/templates/gateway/vald/hpa.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.gateway.enabled .Values.gateway.hpa.enabled }} +{{- $gateway := .Values.gateway.vald -}} +{{- if and $gateway.enabled $gateway.hpa.enabled }} apiVersion: autoscaling/v1 kind: HorizontalPodAutoscaler metadata: - name: {{ .Values.gateway.name }} + name: {{ $gateway.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -26,12 +27,12 @@ metadata: app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: gateway spec: - maxReplicas: {{ .Values.gateway.maxReplicas }} - minReplicas: {{ .Values.gateway.minReplicas }} + maxReplicas: {{ $gateway.maxReplicas }} + minReplicas: {{ $gateway.minReplicas }} scaleTargetRef: apiVersion: apps/v1 - kind: {{ .Values.gateway.kind }} - name: {{ .Values.gateway.name }} - targetCPUUtilizationPercentage: {{ .Values.gateway.hpa.targetCPUUtilizationPercentage }} + kind: {{ $gateway.kind }} + name: {{ $gateway.name }} + targetCPUUtilizationPercentage: {{ $gateway.hpa.targetCPUUtilizationPercentage }} status: {{- end }} diff --git a/charts/vald/templates/gateway/vald/ing.yaml b/charts/vald/templates/gateway/vald/ing.yaml index cb113a496f..90cb0ba8ea 100644 --- a/charts/vald/templates/gateway/vald/ing.yaml +++ b/charts/vald/templates/gateway/vald/ing.yaml @@ -13,29 +13,29 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.gateway.enabled .Values.gateway.ingress.enabled }} +{{- $gateway := .Values.gateway.vald -}} +{{- if and $gateway.enabled $gateway.ingress.enabled }} apiVersion: networking.k8s.io/v1beta1 -# apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: - {{- toYaml .Values.gateway.ingress.annotations | nindent 4 }} + {{- toYaml $gateway.ingress.annotations | nindent 4 }} labels: - name: {{ .Values.gateway.name }}-ingress - app: {{ .Values.gateway.name }}-ingress + name: {{ $gateway.name }}-ingress + app: {{ $gateway.name }}-ingress app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: gateway - name: {{ .Values.gateway.name }}-ingress + name: {{ $gateway.name }}-ingress spec: rules: - - host: {{ .Values.gateway.ingress.host }} + - host: {{ $gateway.ingress.host }} http: paths: - backend: - serviceName: {{ .Values.gateway.name }} - servicePort: {{ .Values.gateway.ingress.servicePort }} + serviceName: {{ $gateway.name }} + servicePort: {{ $gateway.ingress.servicePort }} {{- end }} diff --git a/charts/vald/templates/gateway/vald/pdb.yaml b/charts/vald/templates/gateway/vald/pdb.yaml index 7fd1c9e223..41fcfcb497 100644 --- a/charts/vald/templates/gateway/vald/pdb.yaml +++ b/charts/vald/templates/gateway/vald/pdb.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.gateway.enabled }} +{{- $gateway := .Values.gateway.vald -}} +{{- if $gateway.enabled }} apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: - name: {{ .Values.gateway.name }} + name: {{ $gateway.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -26,8 +27,8 @@ metadata: app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: gateway spec: - maxUnavailable: {{ .Values.gateway.maxUnavailable }} + maxUnavailable: {{ $gateway.maxUnavailable }} selector: matchLabels: - app: {{ .Values.gateway.name }} + app: {{ $gateway.name }} {{- end }} diff --git a/charts/vald/templates/gateway/vald/priorityclass.yaml b/charts/vald/templates/gateway/vald/priorityclass.yaml index 2386775dfe..fd6496bce7 100644 --- a/charts/vald/templates/gateway/vald/priorityclass.yaml +++ b/charts/vald/templates/gateway/vald/priorityclass.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.gateway.enabled .Values.gateway.podPriority.enabled }} +{{- $gateway := .Values.gateway.vald -}} +{{- if and $gateway.enabled $gateway.podPriority.enabled }} apiVersion: scheduling.k8s.io/v1 kind: PriorityClass metadata: - name: {{ .Values.gateway.name }}-priority + name: {{ $gateway.name }}-priority labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -25,7 +26,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: gateway -value: {{ .Values.gateway.podPriority.value }} +value: {{ $gateway.podPriority.value }} globalDefault: false description: "A priority class for Vald gateway." {{- end }} diff --git a/charts/vald/templates/gateway/vald/svc.yaml b/charts/vald/templates/gateway/vald/svc.yaml index 8cb9cbc02b..96bdd3a7af 100644 --- a/charts/vald/templates/gateway/vald/svc.yaml +++ b/charts/vald/templates/gateway/vald/svc.yaml @@ -13,14 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.gateway.enabled }} +{{- $gateway := .Values.gateway.vald -}} +{{- if $gateway.enabled }} apiVersion: v1 kind: Service metadata: - name: {{ .Values.gateway.name }} - {{- if .Values.gateway.service.annotations }} + name: {{ $gateway.name }} + {{- if $gateway.service.annotations }} annotations: - {{- toYaml .Values.gateway.service.annotations | nindent 4 }} + {{- toYaml $gateway.service.annotations | nindent 4 }} {{- end }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} @@ -29,20 +30,20 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: gateway - {{- if .Values.gateway.service.labels }} - {{- toYaml .Values.gateway.service.labels | nindent 4 }} + {{- if $gateway.service.labels }} + {{- toYaml $gateway.service.labels | nindent 4 }} {{- end }} spec: - {{- $servers := dict "Values" .Values.gateway.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $gateway.server_config "default" .Values.defaults.server_config }} {{- include "vald.servicePorts" $servers | nindent 2 }} selector: app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/component: gateway - {{- if eq .Values.gateway.serviceType "ClusterIP" }} + {{- if eq $gateway.serviceType "ClusterIP" }} clusterIP: None {{- end }} - type: {{ .Values.gateway.serviceType }} - {{- if .Values.gateway.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.gateway.externalTrafficPolicy }} + type: {{ $gateway.serviceType }} + {{- if $gateway.externalTrafficPolicy }} + externalTrafficPolicy: {{ $gateway.externalTrafficPolicy }} {{- end }} {{- end }} diff --git a/charts/vald/templates/jobs/db/initialize/mysql/configmap.yaml b/charts/vald/templates/jobs/db/initialize/mysql/configmap.yaml index 604c7e3456..670fe36104 100644 --- a/charts/vald/templates/jobs/db/initialize/mysql/configmap.yaml +++ b/charts/vald/templates/jobs/db/initialize/mysql/configmap.yaml @@ -25,10 +25,9 @@ data: USE `{{ .Values.initializer.mysql.configmap.schema }}` ; - CREATE TABLE IF NOT EXISTS `{{ .Values.initializer.mysql.configmap.schema }}`.`meta_vector` ( + CREATE TABLE IF NOT EXISTS `{{ .Values.initializer.mysql.configmap.schema }}`.`backup_vector` ( `uuid` VARCHAR(255) NOT NULL, `vector` BLOB NOT NULL, - `meta` VARCHAR(1024) NOT NULL, `id` int NOT NULL AUTO_INCREMENT, PRIMARY KEY (`uuid`), UNIQUE INDEX `id_unique` (`id` ASC), diff --git a/charts/vald/templates/manager/backup/configmap.yaml b/charts/vald/templates/manager/backup/configmap.yaml index 7499f7efed..93c9c7b9e4 100644 --- a/charts/vald/templates/manager/backup/configmap.yaml +++ b/charts/vald/templates/manager/backup/configmap.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.backupManager.enabled }} +{{- $backup := .Values.manager.backup -}} +{{- if $backup.enabled }} apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.backupManager.name }}-config + name: {{ $backup.name }}-config labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -28,23 +29,23 @@ metadata: data: config.yaml: | --- - version: {{ .Values.backupManager.version }} - time_zone: {{ default .Values.defaults.time_zone .Values.backupManager.time_zone }} + version: {{ $backup.version }} + time_zone: {{ default .Values.defaults.time_zone $backup.time_zone }} logging: - {{- $logging := dict "Values" .Values.backupManager.logging "default" .Values.defaults.logging }} + {{- $logging := dict "Values" $backup.logging "default" .Values.defaults.logging }} {{- include "vald.logging" $logging | nindent 6 }} server_config: - {{- $servers := dict "Values" .Values.backupManager.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $backup.server_config "default" .Values.defaults.server_config }} {{- include "vald.servers" $servers | nindent 6 }} observability: - {{- $observability := dict "Values" .Values.backupManager.observability "default" .Values.defaults.observability }} + {{- $observability := dict "Values" $backup.observability "default" .Values.defaults.observability }} {{- include "vald.observability" $observability | nindent 6 }} - {{- if .Values.backupManager.mysql.enabled }} + {{- if $backup.mysql.enabled }} mysql_config: - {{- toYaml .Values.backupManager.mysql.config | nindent 6 }} + {{- toYaml $backup.mysql.config | nindent 6 }} {{- end }} - {{- if .Values.backupManager.cassandra.enabled }} + {{- if $backup.cassandra.enabled }} cassandra_config: - {{- toYaml .Values.backupManager.cassandra.config | nindent 6 }} + {{- toYaml $backup.cassandra.config | nindent 6 }} {{- end }} {{- end }} diff --git a/charts/vald/templates/manager/backup/daemonset.yaml b/charts/vald/templates/manager/backup/daemonset.yaml index 5a1ccc8577..7d7c8bbedb 100644 --- a/charts/vald/templates/manager/backup/daemonset.yaml +++ b/charts/vald/templates/manager/backup/daemonset.yaml @@ -13,104 +13,105 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.backupManager.enabled (eq .Values.backupManager.kind "DaemonSet") }} +{{- $backup := .Values.manager.backup -}} +{{- if and $backup.enabled (eq $backup.kind "DaemonSet") }} apiVersion: apps/v1 kind: DaemonSet metadata: - name: {{ .Values.backupManager.name }} + name: {{ $backup.name }} labels: - app: {{ .Values.backupManager.name }} + app: {{ $backup.name }} app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-backup - {{- if .Values.backupManager.annotations }} + {{- if $backup.annotations }} annotations: - {{- toYaml .Values.backupManager.annotations | nindent 4 }} + {{- toYaml $backup.annotations | nindent 4 }} {{- end }} spec: - revisionHistoryLimit: {{ .Values.backupManager.revisionHistoryLimit }} + revisionHistoryLimit: {{ $backup.revisionHistoryLimit }} selector: matchLabels: - app: {{ .Values.backupManager.name }} + app: {{ $backup.name }} updateStrategy: rollingUpdate: - maxUnavailable: {{ .Values.backupManager.rollingUpdate.maxUnavailable }} + maxUnavailable: {{ $backup.rollingUpdate.maxUnavailable }} type: RollingUpdate template: metadata: creationTimestamp: null labels: - app: {{ .Values.backupManager.name }} + app: {{ $backup.name }} app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: manager-backup annotations: checksum/configmap: {{ include (print $.Template.BasePath "/manager/backup/configmap.yaml") . | sha256sum }} - {{- if .Values.backupManager.podAnnotations }} - {{- toYaml .Values.backupManager.podAnnotations | nindent 8 }} + {{- if $backup.podAnnotations }} + {{- toYaml $backup.podAnnotations | nindent 8 }} {{- end }} spec: - {{- if .Values.backupManager.initContainers }} + {{- if $backup.initContainers }} initContainers: - {{- $initContainers := dict "initContainers" .Values.backupManager.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- $initContainers := dict "initContainers" $backup.initContainers "Values" .Values "namespace" .Release.Namespace -}} {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} {{- end }} affinity: - {{- include "vald.affinity" .Values.backupManager.affinity | nindent 8 }} - {{- if .Values.backupManager.topologySpreadConstraints }} + {{- include "vald.affinity" $backup.affinity | nindent 8 }} + {{- if $backup.topologySpreadConstraints }} topologySpreadConstraints: - {{- toYaml .Values.backupManager.topologySpreadConstraints | nindent 8 }} + {{- toYaml $backup.topologySpreadConstraints | nindent 8 }} {{- end }} containers: - - name: {{ .Values.backupManager.name }} - image: "{{ .Values.backupManager.image.repository }}:{{ default .Values.defaults.image.tag .Values.backupManager.image.tag }}" - imagePullPolicy: {{ .Values.backupManager.image.pullPolicy }} - {{- $servers := dict "Values" .Values.backupManager.server_config "default" .Values.defaults.server_config -}} + - name: {{ $backup.name }} + image: "{{ $backup.image.repository }}:{{ default .Values.defaults.image.tag $backup.image.tag }}" + imagePullPolicy: {{ $backup.image.pullPolicy }} + {{- $servers := dict "Values" $backup.server_config "default" .Values.defaults.server_config -}} {{- include "vald.containerPorts" $servers | trim | nindent 10 }} resources: - {{- toYaml .Values.backupManager.resources | nindent 12 }} + {{- toYaml $backup.resources | nindent 12 }} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File - {{- if .Values.backupManager.env }} + {{- if $backup.env }} env: - {{- toYaml .Values.backupManager.env | nindent 12 }} + {{- toYaml $backup.env | nindent 12 }} {{- end }} volumeMounts: - - name: {{ .Values.backupManager.name }}-config + - name: {{ $backup.name }}-config mountPath: /etc/server/ - {{- if .Values.backupManager.volumeMounts }} - {{- toYaml .Values.backupManager.volumeMounts | nindent 12 }} + {{- if $backup.volumeMounts }} + {{- toYaml $backup.volumeMounts | nindent 12 }} {{- end }} dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} - terminationGracePeriodSeconds: {{ .Values.backupManager.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ $backup.terminationGracePeriodSeconds }} volumes: - - name: {{ .Values.backupManager.name }}-config + - name: {{ $backup.name }}-config configMap: defaultMode: 420 - name: {{ .Values.backupManager.name }}-config - {{- if .Values.backupManager.volumes }} - {{- toYaml .Values.backupManager.volumes | nindent 8 }} + name: {{ $backup.name }}-config + {{- if $backup.volumes }} + {{- toYaml $backup.volumes | nindent 8 }} {{- end }} - {{- if .Values.backupManager.nodeName }} - nodeName: {{ .Values.backupManager.nodeName }} + {{- if $backup.nodeName }} + nodeName: {{ $backup.nodeName }} {{- end }} - {{- if .Values.backupManager.nodeSelector }} + {{- if $backup.nodeSelector }} nodeSelector: - {{- toYaml .Values.backupManager.nodeSelector | nindent 8 }} + {{- toYaml $backup.nodeSelector | nindent 8 }} {{- end }} - {{- if .Values.backupManager.tolerations }} + {{- if $backup.tolerations }} tolerations: - {{- toYaml .Values.backupManager.tolerations | nindent 8 }} + {{- toYaml $backup.tolerations | nindent 8 }} {{- end }} - {{- if .Values.backupManager.podPriority }} - {{- if .Values.backupManager.podPriority.enabled }} - priorityClassName: {{ .Values.backupManager.name }}-priority + {{- if $backup.podPriority }} + {{- if $backup.podPriority.enabled }} + priorityClassName: {{ $backup.name }}-priority {{- end }} {{- end }} status: diff --git a/charts/vald/templates/manager/backup/deployment.yaml b/charts/vald/templates/manager/backup/deployment.yaml index 5673c6c37c..c8fbd230c9 100644 --- a/charts/vald/templates/manager/backup/deployment.yaml +++ b/charts/vald/templates/manager/backup/deployment.yaml @@ -13,109 +13,110 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.backupManager.enabled (eq .Values.backupManager.kind "Deployment") }} +{{- $backup := .Values.manager.backup -}} +{{- if and $backup.enabled (eq $backup.kind "Deployment") }} apiVersion: apps/v1 kind: Deployment metadata: - name: {{ .Values.backupManager.name }} + name: {{ $backup.name }} labels: - app: {{ .Values.backupManager.name }} + app: {{ $backup.name }} app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-backup - {{- if .Values.backupManager.annotations }} + {{- if $backup.annotations }} annotations: - {{- toYaml .Values.backupManager.annotations | nindent 4 }} + {{- toYaml $backup.annotations | nindent 4 }} {{- end }} spec: - progressDeadlineSeconds: {{ .Values.backupManager.progressDeadlineSeconds }} - {{- if not .Values.backupManager.hpa.enabled }} - replicas: {{ .Values.backupManager.minReplicas }} + progressDeadlineSeconds: {{ $backup.progressDeadlineSeconds }} + {{- if not $backup.hpa.enabled }} + replicas: {{ $backup.minReplicas }} {{- end }} - revisionHistoryLimit: {{ .Values.backupManager.revisionHistoryLimit }} + revisionHistoryLimit: {{ $backup.revisionHistoryLimit }} selector: matchLabels: - app: {{ .Values.backupManager.name }} + app: {{ $backup.name }} strategy: rollingUpdate: - maxSurge: {{ .Values.backupManager.rollingUpdate.maxSurge }} - maxUnavailable: {{ .Values.backupManager.rollingUpdate.maxUnavailable }} + maxSurge: {{ $backup.rollingUpdate.maxSurge }} + maxUnavailable: {{ $backup.rollingUpdate.maxUnavailable }} type: RollingUpdate template: metadata: creationTimestamp: null labels: - app: {{ .Values.backupManager.name }} + app: {{ $backup.name }} app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: manager-backup annotations: checksum/configmap: {{ include (print $.Template.BasePath "/manager/backup/configmap.yaml") . | sha256sum }} - {{- if .Values.backupManager.podAnnotations }} - {{- toYaml .Values.backupManager.podAnnotations | nindent 8 }} + {{- if $backup.podAnnotations }} + {{- toYaml $backup.podAnnotations | nindent 8 }} {{- end }} spec: - {{- if .Values.backupManager.initContainers }} + {{- if $backup.initContainers }} initContainers: - {{- $initContainers := dict "initContainers" .Values.backupManager.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- $initContainers := dict "initContainers" $backup.initContainers "Values" .Values "namespace" .Release.Namespace -}} {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} {{- end }} affinity: - {{- include "vald.affinity" .Values.backupManager.affinity | nindent 8 }} - {{- if .Values.backupManager.topologySpreadConstraints }} + {{- include "vald.affinity" $backup.affinity | nindent 8 }} + {{- if $backup.topologySpreadConstraints }} topologySpreadConstraints: - {{- toYaml .Values.backupManager.topologySpreadConstraints | nindent 8 }} + {{- toYaml $backup.topologySpreadConstraints | nindent 8 }} {{- end }} containers: - - name: {{ .Values.backupManager.name }} - image: "{{ .Values.backupManager.image.repository }}:{{ default .Values.defaults.image.tag .Values.backupManager.image.tag }}" - imagePullPolicy: {{ .Values.backupManager.image.pullPolicy }} - {{- $servers := dict "Values" .Values.backupManager.server_config "default" .Values.defaults.server_config -}} + - name: {{ $backup.name }} + image: "{{ $backup.image.repository }}:{{ default .Values.defaults.image.tag $backup.image.tag }}" + imagePullPolicy: {{ $backup.image.pullPolicy }} + {{- $servers := dict "Values" $backup.server_config "default" .Values.defaults.server_config -}} {{- include "vald.containerPorts" $servers | trim | nindent 10 }} resources: - {{- toYaml .Values.backupManager.resources | nindent 12 }} + {{- toYaml $backup.resources | nindent 12 }} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File - {{- if .Values.backupManager.env }} + {{- if $backup.env }} env: - {{- toYaml .Values.backupManager.env | nindent 12 }} + {{- toYaml $backup.env | nindent 12 }} {{- end }} volumeMounts: - - name: {{ .Values.backupManager.name }}-config + - name: {{ $backup.name }}-config mountPath: /etc/server/ - {{- if .Values.backupManager.volumeMounts }} - {{- toYaml .Values.backupManager.volumeMounts | nindent 12 }} + {{- if $backup.volumeMounts }} + {{- toYaml $backup.volumeMounts | nindent 12 }} {{- end }} dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} - terminationGracePeriodSeconds: {{ .Values.backupManager.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ $backup.terminationGracePeriodSeconds }} volumes: - - name: {{ .Values.backupManager.name }}-config + - name: {{ $backup.name }}-config configMap: defaultMode: 420 - name: {{ .Values.backupManager.name }}-config - {{- if .Values.backupManager.volumes }} - {{- toYaml .Values.backupManager.volumes | nindent 8 }} + name: {{ $backup.name }}-config + {{- if $backup.volumes }} + {{- toYaml $backup.volumes | nindent 8 }} {{- end }} - {{- if .Values.backupManager.nodeName }} - nodeName: {{ .Values.backupManager.nodeName }} + {{- if $backup.nodeName }} + nodeName: {{ $backup.nodeName }} {{- end }} - {{- if .Values.backupManager.nodeSelector }} + {{- if $backup.nodeSelector }} nodeSelector: - {{- toYaml .Values.backupManager.nodeSelector | nindent 8 }} + {{- toYaml $backup.nodeSelector | nindent 8 }} {{- end }} - {{- if .Values.backupManager.tolerations }} + {{- if $backup.tolerations }} tolerations: - {{- toYaml .Values.backupManager.tolerations | nindent 8 }} + {{- toYaml $backup.tolerations | nindent 8 }} {{- end }} - {{- if .Values.backupManager.podPriority }} - {{- if .Values.backupManager.podPriority.enabled }} - priorityClassName: {{ .Values.backupManager.name }}-priority + {{- if $backup.podPriority }} + {{- if $backup.podPriority.enabled }} + priorityClassName: {{ $backup.name }}-priority {{- end }} {{- end }} status: diff --git a/charts/vald/templates/manager/backup/hpa.yaml b/charts/vald/templates/manager/backup/hpa.yaml index 25d5ec7cd9..ee77aa1e32 100644 --- a/charts/vald/templates/manager/backup/hpa.yaml +++ b/charts/vald/templates/manager/backup/hpa.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.backupManager.enabled .Values.backupManager.hpa.enabled }} +{{- $backup := .Values.manager.backup -}} +{{- if and $backup.enabled $backup.hpa.enabled }} apiVersion: autoscaling/v1 kind: HorizontalPodAutoscaler metadata: - name: {{ .Values.backupManager.name }} + name: {{ $backup.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -26,12 +27,12 @@ metadata: app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-backup spec: - maxReplicas: {{ .Values.backupManager.maxReplicas }} - minReplicas: {{ .Values.backupManager.minReplicas }} + maxReplicas: {{ $backup.maxReplicas }} + minReplicas: {{ $backup.minReplicas }} scaleTargetRef: apiVersion: apps/v1 - kind: {{ .Values.backupManager.kind }} - name: {{ .Values.backupManager.name }} - targetCPUUtilizationPercentage: {{ .Values.backupManager.hpa.targetCPUUtilizationPercentage }} + kind: {{ $backup.kind }} + name: {{ $backup.name }} + targetCPUUtilizationPercentage: {{ $backup.hpa.targetCPUUtilizationPercentage }} status: {{- end }} diff --git a/charts/vald/templates/manager/backup/pdb.yaml b/charts/vald/templates/manager/backup/pdb.yaml index 2bbea1a7ee..3c29bd2d1f 100644 --- a/charts/vald/templates/manager/backup/pdb.yaml +++ b/charts/vald/templates/manager/backup/pdb.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.backupManager.enabled }} +{{- $backup := .Values.manager.backup -}} +{{- if $backup.enabled }} apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: - name: {{ .Values.backupManager.name }} + name: {{ $backup.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -26,8 +27,8 @@ metadata: app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-backup spec: - maxUnavailable: {{ .Values.backupManager.maxUnavailable }} + maxUnavailable: {{ $backup.maxUnavailable }} selector: matchLabels: - app: {{ .Values.backupManager.name }} + app: {{ $backup.name }} {{- end }} diff --git a/charts/vald/templates/manager/backup/priorityclass.yaml b/charts/vald/templates/manager/backup/priorityclass.yaml index c0b02e4752..3d40a81058 100644 --- a/charts/vald/templates/manager/backup/priorityclass.yaml +++ b/charts/vald/templates/manager/backup/priorityclass.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.backupManager.enabled .Values.backupManager.podPriority.enabled }} +{{- $backup := .Values.manager.backup -}} +{{- if and $backup.enabled $backup.podPriority.enabled }} apiVersion: scheduling.k8s.io/v1 kind: PriorityClass metadata: - name: {{ .Values.backupManager.name }}-priority + name: {{ $backup.name }}-priority labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -25,7 +26,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-backup -value: {{ .Values.backupManager.podPriority.value }} +value: {{ $backup.podPriority.value }} globalDefault: false -description: "A priority class for Vald backupManager." +description: "A priority class for Vald backup manager." {{- end }} diff --git a/charts/vald/templates/manager/backup/svc.yaml b/charts/vald/templates/manager/backup/svc.yaml index 5538452dfb..2e87952af1 100644 --- a/charts/vald/templates/manager/backup/svc.yaml +++ b/charts/vald/templates/manager/backup/svc.yaml @@ -13,14 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.backupManager.enabled }} +{{- $backup := .Values.manager.backup -}} +{{- if $backup.enabled }} apiVersion: v1 kind: Service metadata: - name: {{ .Values.backupManager.name }} - {{- if .Values.backupManager.service.annotations }} + name: {{ $backup.name }} + {{- if $backup.service.annotations }} annotations: - {{- toYaml .Values.backupManager.service.annotations | nindent 4 }} + {{- toYaml $backup.service.annotations | nindent 4 }} {{- end }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} @@ -29,20 +30,20 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-backup - {{- if .Values.backupManager.service.labels }} - {{- toYaml .Values.backupManager.service.labels | nindent 4 }} + {{- if $backup.service.labels }} + {{- toYaml $backup.service.labels | nindent 4 }} {{- end }} spec: - {{- $servers := dict "Values" .Values.backupManager.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $backup.server_config "default" .Values.defaults.server_config }} {{- include "vald.servicePorts" $servers | nindent 2 }} selector: app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/component: manager-backup - {{- if eq .Values.backupManager.serviceType "ClusterIP" }} + {{- if eq $backup.serviceType "ClusterIP" }} clusterIP: None {{- end }} - type: {{ .Values.backupManager.serviceType }} - {{- if .Values.backupManager.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.backupManager.externalTrafficPolicy }} + type: {{ $backup.serviceType }} + {{- if $backup.externalTrafficPolicy }} + externalTrafficPolicy: {{ $backup.externalTrafficPolicy }} {{- end }} {{- end }} diff --git a/charts/vald/templates/manager/compressor/configmap.yaml b/charts/vald/templates/manager/compressor/configmap.yaml index 2114952dcc..4f05a3da6c 100644 --- a/charts/vald/templates/manager/compressor/configmap.yaml +++ b/charts/vald/templates/manager/compressor/configmap.yaml @@ -13,11 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.compressor.enabled }} +{{- $compressor := .Values.manager.compressor -}} +{{- $backup := .Values.manager.backup -}} +{{- if $compressor.enabled }} apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.compressor.name }}-config + name: {{ $compressor.name }}-config labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -28,35 +30,35 @@ metadata: data: config.yaml: | --- - version: {{ .Values.compressor.version }} - time_zone: {{ default .Values.defaults.time_zone .Values.compressor.time_zone }} + version: {{ $compressor.version }} + time_zone: {{ default .Values.defaults.time_zone $compressor.time_zone }} logging: - {{- $logging := dict "Values" .Values.compressor.logging "default" .Values.defaults.logging }} + {{- $logging := dict "Values" $compressor.logging "default" .Values.defaults.logging }} {{- include "vald.logging" $logging | nindent 6 }} server_config: - {{- $servers := dict "Values" .Values.compressor.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $compressor.server_config "default" .Values.defaults.server_config }} {{- include "vald.servers" $servers | nindent 6 }} observability: - {{- $observability := dict "Values" .Values.compressor.observability "default" .Values.defaults.observability }} + {{- $observability := dict "Values" $compressor.observability "default" .Values.defaults.observability }} {{- include "vald.observability" $observability | nindent 6 }} backup: - host: {{ .Values.backupManager.name }}.{{ .Release.Namespace }}.svc.cluster.local - port: {{ default .Values.defaults.server_config.servers.grpc.port .Values.backupManager.server_config.servers.grpc.port }} + host: {{ $backup.name }}.{{ .Release.Namespace }}.svc.cluster.local + port: {{ default .Values.defaults.server_config.servers.grpc.port $backup.server_config.servers.grpc.port }} client: - {{- $backupClient := dict "Values" .Values.compressor.backup.client "default" .Values.defaults.grpc.client }} + {{- $backupClient := dict "Values" $compressor.backup.client "default" .Values.defaults.grpc.client }} {{- include "vald.grpc.client" $backupClient | nindent 8 }} compressor: - compress_algorithm: {{ .Values.compressor.compress.compress_algorithm | quote }} - compression_level: {{ .Values.compressor.compress.compression_level }} - concurrent_limit: {{ .Values.compressor.compress.concurrent_limit }} - queue_check_duration: {{ .Values.compressor.compress.queue_check_duration }} + compress_algorithm: {{ $compressor.compress.compress_algorithm | quote }} + compression_level: {{ $compressor.compress.compression_level }} + concurrent_limit: {{ $compressor.compress.concurrent_limit }} + queue_check_duration: {{ $compressor.compress.queue_check_duration }} registerer: - concurrent_limit: {{ .Values.compressor.registerer.concurrent_limit }} - queue_check_duration: {{ .Values.compressor.registerer.queue_check_duration }} + concurrent_limit: {{ $compressor.registerer.concurrent_limit }} + queue_check_duration: {{ $compressor.registerer.queue_check_duration }} compressor: - host: {{ .Values.compressor.name }}.{{ .Release.Namespace }}.svc.cluster.local - port: {{ default .Values.defaults.server_config.servers.grpc.port .Values.compressor.server_config.servers.grpc.port }} + host: {{ $compressor.name }}.{{ .Release.Namespace }}.svc.cluster.local + port: {{ default .Values.defaults.server_config.servers.grpc.port $compressor.server_config.servers.grpc.port }} client: - {{- $compressorClient := dict "Values" .Values.compressor.registerer.compressor.client "default" .Values.defaults.grpc.client }} + {{- $compressorClient := dict "Values" $compressor.registerer.compressor.client "default" .Values.defaults.grpc.client }} {{- include "vald.grpc.client" $compressorClient | nindent 12 }} {{- end }} diff --git a/charts/vald/templates/manager/compressor/daemonset.yaml b/charts/vald/templates/manager/compressor/daemonset.yaml index 290ebee446..014e818d22 100644 --- a/charts/vald/templates/manager/compressor/daemonset.yaml +++ b/charts/vald/templates/manager/compressor/daemonset.yaml @@ -13,104 +13,105 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.compressor.enabled (eq .Values.compressor.kind "DaemonSet") }} +{{- $compressor := .Values.manager.compressor -}} +{{- if and $compressor.enabled (eq $compressor.kind "DaemonSet") }} apiVersion: apps/v1 kind: DaemonSet metadata: - name: {{ .Values.compressor.name }} + name: {{ $compressor.name }} labels: - app: {{ .Values.compressor.name }} + app: {{ $compressor.name }} app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-compressor - {{- if .Values.compressor.annotations }} + {{- if $compressor.annotations }} annotations: - {{- toYaml .Values.compressor.annotations | nindent 4 }} + {{- toYaml $compressor.annotations | nindent 4 }} {{- end }} spec: - revisionHistoryLimit: {{ .Values.compressor.revisionHistoryLimit }} + revisionHistoryLimit: {{ $compressor.revisionHistoryLimit }} selector: matchLabels: - app: {{ .Values.compressor.name }} + app: {{ $compressor.name }} updateStrategy: rollingUpdate: - maxUnavailable: {{ .Values.compressor.rollingUpdate.maxUnavailable }} + maxUnavailable: {{ $compressor.rollingUpdate.maxUnavailable }} type: RollingUpdate template: metadata: creationTimestamp: null labels: - app: {{ .Values.compressor.name }} + app: {{ $compressor.name }} app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: manager-compressor annotations: checksum/configmap: {{ include (print $.Template.BasePath "/manager/compressor/configmap.yaml") . | sha256sum }} - {{- if .Values.compressor.podAnnotations }} - {{- toYaml .Values.compressor.podAnnotations | nindent 8 }} + {{- if $compressor.podAnnotations }} + {{- toYaml $compressor.podAnnotations | nindent 8 }} {{- end }} spec: - {{- if .Values.compressor.initContainers }} + {{- if $compressor.initContainers }} initContainers: - {{- $initContainers := dict "initContainers" .Values.compressor.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- $initContainers := dict "initContainers" $compressor.initContainers "Values" .Values "namespace" .Release.Namespace -}} {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} {{- end }} affinity: - {{- include "vald.affinity" .Values.compressor.affinity | nindent 8 }} - {{- if .Values.compressor.topologySpreadConstraints }} + {{- include "vald.affinity" $compressor.affinity | nindent 8 }} + {{- if $compressor.topologySpreadConstraints }} topologySpreadConstraints: - {{- toYaml .Values.compressor.topologySpreadConstraints | nindent 8 }} + {{- toYaml $compressor.topologySpreadConstraints | nindent 8 }} {{- end }} containers: - - name: {{ .Values.compressor.name }} - image: "{{ .Values.compressor.image.repository }}:{{ default .Values.defaults.image.tag .Values.compressor.image.tag }}" - imagePullPolicy: {{ .Values.compressor.image.pullPolicy }} - {{- $servers := dict "Values" .Values.compressor.server_config "default" .Values.defaults.server_config -}} + - name: {{ $compressor.name }} + image: "{{ $compressor.image.repository }}:{{ default .Values.defaults.image.tag $compressor.image.tag }}" + imagePullPolicy: {{ $compressor.image.pullPolicy }} + {{- $servers := dict "Values" $compressor.server_config "default" .Values.defaults.server_config -}} {{- include "vald.containerPorts" $servers | trim | nindent 10 }} resources: - {{- toYaml .Values.compressor.resources | nindent 12 }} + {{- toYaml $compressor.resources | nindent 12 }} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File - {{- if .Values.compressor.env }} + {{- if $compressor.env }} env: - {{- toYaml .Values.compressor.env | nindent 12 }} + {{- toYaml $compressor.env | nindent 12 }} {{- end }} volumeMounts: - - name: {{ .Values.compressor.name }}-config + - name: {{ $compressor.name }}-config mountPath: /etc/server/ - {{- if .Values.compressor.volumeMounts }} - {{- toYaml .Values.compressor.volumeMounts | nindent 12 }} + {{- if $compressor.volumeMounts }} + {{- toYaml $compressor.volumeMounts | nindent 12 }} {{- end }} dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} - terminationGracePeriodSeconds: {{ .Values.compressor.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ $compressor.terminationGracePeriodSeconds }} volumes: - configMap: defaultMode: 420 - name: {{ .Values.compressor.name }}-config - name: {{ .Values.compressor.name }}-config - {{- if .Values.compressor.volumes }} - {{- toYaml .Values.compressor.volumes | nindent 8 }} + name: {{ $compressor.name }}-config + name: {{ $compressor.name }}-config + {{- if $compressor.volumes }} + {{- toYaml $compressor.volumes | nindent 8 }} {{- end }} - {{- if .Values.compressor.nodeName }} - nodeName: {{ .Values.compressor.nodeName }} + {{- if $compressor.nodeName }} + nodeName: {{ $compressor.nodeName }} {{- end }} - {{- if .Values.compressor.nodeSelector }} + {{- if $compressor.nodeSelector }} nodeSelector: - {{- toYaml .Values.compressor.nodeSelector | nindent 8 }} + {{- toYaml $compressor.nodeSelector | nindent 8 }} {{- end }} - {{- if .Values.compressor.tolerations }} + {{- if $compressor.tolerations }} tolerations: - {{- toYaml .Values.compressor.tolerations | nindent 8 }} + {{- toYaml $compressor.tolerations | nindent 8 }} {{- end }} - {{- if .Values.compressor.podPriority }} - {{- if .Values.compressor.podPriority.enabled }} - priorityClassName: {{ .Values.compressor.name }}-priority + {{- if $compressor.podPriority }} + {{- if $compressor.podPriority.enabled }} + priorityClassName: {{ $compressor.name }}-priority {{- end }} {{- end }} status: diff --git a/charts/vald/templates/manager/compressor/deployment.yaml b/charts/vald/templates/manager/compressor/deployment.yaml index f718d84320..74510099d4 100644 --- a/charts/vald/templates/manager/compressor/deployment.yaml +++ b/charts/vald/templates/manager/compressor/deployment.yaml @@ -13,109 +13,110 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.compressor.enabled (eq .Values.compressor.kind "Deployment") }} +{{- $compressor := .Values.manager.compressor -}} +{{- if and $compressor.enabled (eq $compressor.kind "Deployment") }} apiVersion: apps/v1 kind: Deployment metadata: - name: {{ .Values.compressor.name }} + name: {{ $compressor.name }} labels: - app: {{ .Values.compressor.name }} + app: {{ $compressor.name }} app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-compressor - {{- if .Values.compressor.annotations }} + {{- if $compressor.annotations }} annotations: - {{- toYaml .Values.compressor.annotations | nindent 4 }} + {{- toYaml $compressor.annotations | nindent 4 }} {{- end }} spec: - progressDeadlineSeconds: {{ .Values.compressor.progressDeadlineSeconds }} - {{- if not .Values.compressor.hpa.enabled }} - replicas: {{ .Values.compressor.minReplicas }} + progressDeadlineSeconds: {{ $compressor.progressDeadlineSeconds }} + {{- if not $compressor.hpa.enabled }} + replicas: {{ $compressor.minReplicas }} {{- end }} - revisionHistoryLimit: {{ .Values.compressor.revisionHistoryLimit }} + revisionHistoryLimit: {{ $compressor.revisionHistoryLimit }} selector: matchLabels: - app: {{ .Values.compressor.name }} + app: {{ $compressor.name }} strategy: rollingUpdate: - maxSurge: {{ .Values.compressor.rollingUpdate.maxSurge }} - maxUnavailable: {{ .Values.compressor.rollingUpdate.maxUnavailable }} + maxSurge: {{ $compressor.rollingUpdate.maxSurge }} + maxUnavailable: {{ $compressor.rollingUpdate.maxUnavailable }} type: RollingUpdate template: metadata: creationTimestamp: null labels: - app: {{ .Values.compressor.name }} + app: {{ $compressor.name }} app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: manager-compressor annotations: checksum/configmap: {{ include (print $.Template.BasePath "/manager/compressor/configmap.yaml") . | sha256sum }} - {{- if .Values.compressor.podAnnotations }} - {{- toYaml .Values.compressor.podAnnotations | nindent 8 }} + {{- if $compressor.podAnnotations }} + {{- toYaml $compressor.podAnnotations | nindent 8 }} {{- end }} spec: - {{- if .Values.compressor.initContainers }} + {{- if $compressor.initContainers }} initContainers: - {{- $initContainers := dict "initContainers" .Values.compressor.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- $initContainers := dict "initContainers" $compressor.initContainers "Values" .Values "namespace" .Release.Namespace -}} {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} {{- end }} affinity: - {{- include "vald.affinity" .Values.compressor.affinity | nindent 8 }} - {{- if .Values.compressor.topologySpreadConstraints }} + {{- include "vald.affinity" $compressor.affinity | nindent 8 }} + {{- if $compressor.topologySpreadConstraints }} topologySpreadConstraints: - {{- toYaml .Values.compressor.topologySpreadConstraints | nindent 8 }} + {{- toYaml $compressor.topologySpreadConstraints | nindent 8 }} {{- end }} containers: - - name: {{ .Values.compressor.name }} - image: "{{ .Values.compressor.image.repository }}:{{ default .Values.defaults.image.tag .Values.compressor.image.tag }}" - imagePullPolicy: {{ .Values.compressor.image.pullPolicy }} - {{- $servers := dict "Values" .Values.compressor.server_config "default" .Values.defaults.server_config -}} + - name: {{ $compressor.name }} + image: "{{ $compressor.image.repository }}:{{ default .Values.defaults.image.tag $compressor.image.tag }}" + imagePullPolicy: {{ $compressor.image.pullPolicy }} + {{- $servers := dict "Values" $compressor.server_config "default" .Values.defaults.server_config -}} {{- include "vald.containerPorts" $servers | trim | nindent 10 }} resources: - {{- toYaml .Values.compressor.resources | nindent 12 }} + {{- toYaml $compressor.resources | nindent 12 }} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File - {{- if .Values.compressor.env }} + {{- if $compressor.env }} env: - {{- toYaml .Values.compressor.env | nindent 12 }} + {{- toYaml $compressor.env | nindent 12 }} {{- end }} volumeMounts: - - name: {{ .Values.compressor.name }}-config + - name: {{ $compressor.name }}-config mountPath: /etc/server/ - {{- if .Values.compressor.volumeMounts }} - {{- toYaml .Values.compressor.volumeMounts | nindent 12 }} + {{- if $compressor.volumeMounts }} + {{- toYaml $compressor.volumeMounts | nindent 12 }} {{- end }} dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} - terminationGracePeriodSeconds: {{ .Values.compressor.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ $compressor.terminationGracePeriodSeconds }} volumes: - configMap: defaultMode: 420 - name: {{ .Values.compressor.name }}-config - name: {{ .Values.compressor.name }}-config - {{- if .Values.compressor.volumes }} - {{- toYaml .Values.compressor.volumes | nindent 8 }} + name: {{ $compressor.name }}-config + name: {{ $compressor.name }}-config + {{- if $compressor.volumes }} + {{- toYaml $compressor.volumes | nindent 8 }} {{- end }} - {{- if .Values.compressor.nodeName }} - nodeName: {{ .Values.compressor.nodeName }} + {{- if $compressor.nodeName }} + nodeName: {{ $compressor.nodeName }} {{- end }} - {{- if .Values.compressor.nodeSelector }} + {{- if $compressor.nodeSelector }} nodeSelector: - {{- toYaml .Values.compressor.nodeSelector | nindent 8 }} + {{- toYaml $compressor.nodeSelector | nindent 8 }} {{- end }} - {{- if .Values.compressor.tolerations }} + {{- if $compressor.tolerations }} tolerations: - {{- toYaml .Values.compressor.tolerations | nindent 8 }} + {{- toYaml $compressor.tolerations | nindent 8 }} {{- end }} - {{- if .Values.compressor.podPriority }} - {{- if .Values.compressor.podPriority.enabled }} - priorityClassName: {{ .Values.compressor.name }}-priority + {{- if $compressor.podPriority }} + {{- if $compressor.podPriority.enabled }} + priorityClassName: {{ $compressor.name }}-priority {{- end }} {{- end }} status: diff --git a/charts/vald/templates/manager/compressor/hpa.yaml b/charts/vald/templates/manager/compressor/hpa.yaml index 8e13be05b3..7c2fc8333a 100644 --- a/charts/vald/templates/manager/compressor/hpa.yaml +++ b/charts/vald/templates/manager/compressor/hpa.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.compressor.enabled .Values.compressor.hpa.enabled }} +{{- $compressor := .Values.manager.compressor -}} +{{- if and $compressor.enabled $compressor.hpa.enabled }} apiVersion: autoscaling/v1 kind: HorizontalPodAutoscaler metadata: - name: {{ .Values.compressor.name }} + name: {{ $compressor.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -26,12 +27,12 @@ metadata: app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-compressor spec: - maxReplicas: {{ .Values.compressor.maxReplicas }} - minReplicas: {{ .Values.compressor.minReplicas }} + maxReplicas: {{ $compressor.maxReplicas }} + minReplicas: {{ $compressor.minReplicas }} scaleTargetRef: apiVersion: apps/v1 - kind: {{ .Values.compressor.kind }} - name: {{ .Values.compressor.name }} - targetCPUUtilizationPercentage: {{ .Values.compressor.hpa.targetCPUUtilizationPercentage }} + kind: {{ $compressor.kind }} + name: {{ $compressor.name }} + targetCPUUtilizationPercentage: {{ $compressor.hpa.targetCPUUtilizationPercentage }} status: {{- end }} diff --git a/charts/vald/templates/manager/compressor/pdb.yaml b/charts/vald/templates/manager/compressor/pdb.yaml index 0eaeb65c3f..d13735f59c 100644 --- a/charts/vald/templates/manager/compressor/pdb.yaml +++ b/charts/vald/templates/manager/compressor/pdb.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.compressor.enabled }} +{{- $compressor := .Values.manager.compressor -}} +{{- if $compressor.enabled }} apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: - name: {{ .Values.compressor.name }} + name: {{ $compressor.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -26,8 +27,8 @@ metadata: app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-compressor spec: - maxUnavailable: {{ .Values.compressor.maxUnavailable }} + maxUnavailable: {{ $compressor.maxUnavailable }} selector: matchLabels: - app: {{ .Values.compressor.name }} + app: {{ $compressor.name }} {{- end }} diff --git a/charts/vald/templates/manager/compressor/priorityclass.yaml b/charts/vald/templates/manager/compressor/priorityclass.yaml index c981d26774..1e00386cc2 100644 --- a/charts/vald/templates/manager/compressor/priorityclass.yaml +++ b/charts/vald/templates/manager/compressor/priorityclass.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.compressor.enabled .Values.compressor.podPriority.enabled }} +{{- $compressor := .Values.manager.compressor -}} +{{- if and $compressor.enabled $compressor.podPriority.enabled }} apiVersion: scheduling.k8s.io/v1 kind: PriorityClass metadata: - name: {{ .Values.compressor.name }}-priority + name: {{ $compressor.name }}-priority labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -25,7 +26,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-compressor -value: {{ .Values.compressor.podPriority.value }} +value: {{ $compressor.podPriority.value }} preemptionPolicy: Never globalDefault: false description: "A priority class for Vald compressor." diff --git a/charts/vald/templates/manager/compressor/svc.yaml b/charts/vald/templates/manager/compressor/svc.yaml index 369514b824..8b164ae1a0 100644 --- a/charts/vald/templates/manager/compressor/svc.yaml +++ b/charts/vald/templates/manager/compressor/svc.yaml @@ -13,14 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.compressor.enabled }} +{{- $compressor := .Values.manager.compressor -}} +{{- if $compressor.enabled }} apiVersion: v1 kind: Service metadata: - name: {{ .Values.compressor.name }} - {{- if .Values.compressor.service.annotations }} + name: {{ $compressor.name }} + {{- if $compressor.service.annotations }} annotations: - {{- toYaml .Values.compressor.service.annotations | nindent 4 }} + {{- toYaml $compressor.service.annotations | nindent 4 }} {{- end }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} @@ -29,20 +30,20 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-compressor - {{- if .Values.compressor.service.labels }} - {{- toYaml .Values.compressor.service.labels | nindent 4 }} + {{- if $compressor.service.labels }} + {{- toYaml $compressor.service.labels | nindent 4 }} {{- end }} spec: - {{- $servers := dict "Values" .Values.compressor.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $compressor.server_config "default" .Values.defaults.server_config }} {{- include "vald.servicePorts" $servers | nindent 2 }} selector: app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/component: manager-compressor - {{- if eq .Values.compressor.serviceType "ClusterIP" }} + {{- if eq $compressor.serviceType "ClusterIP" }} clusterIP: None {{- end }} - type: {{ .Values.compressor.serviceType }} - {{- if .Values.compressor.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.compressor.externalTrafficPolicy }} + type: {{ $compressor.serviceType }} + {{- if $compressor.externalTrafficPolicy }} + externalTrafficPolicy: {{ $compressor.externalTrafficPolicy }} {{- end }} {{- end }} diff --git a/charts/vald/templates/manager/index/configmap.yaml b/charts/vald/templates/manager/index/configmap.yaml index 8ab65dbaea..15f4029e7b 100644 --- a/charts/vald/templates/manager/index/configmap.yaml +++ b/charts/vald/templates/manager/index/configmap.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.indexManager.enabled }} +{{- $index := .Values.manager.index -}} +{{- if $index.enabled }} apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.indexManager.name }}-config + name: {{ $index.name }}-config labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -28,36 +29,36 @@ metadata: data: config.yaml: | --- - version: {{ .Values.indexManager.version }} - time_zone: {{ default .Values.defaults.time_zone .Values.indexManager.time_zone }} + version: {{ $index.version }} + time_zone: {{ default .Values.defaults.time_zone $index.time_zone }} logging: - {{- $logging := dict "Values" .Values.indexManager.logging "default" .Values.defaults.logging }} + {{- $logging := dict "Values" $index.logging "default" .Values.defaults.logging }} {{- include "vald.logging" $logging | nindent 6 }} server_config: - {{- $servers := dict "Values" .Values.indexManager.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $index.server_config "default" .Values.defaults.server_config }} {{- include "vald.servers" $servers | nindent 6 }} observability: - {{- $observability := dict "Values" .Values.indexManager.observability "default" .Values.defaults.observability }} + {{- $observability := dict "Values" $index.observability "default" .Values.defaults.observability }} {{- include "vald.observability" $observability | nindent 6 }} indexer: agent_port: {{ default .Values.defaults.server_config.servers.grpc.port .Values.agent.server_config.servers.grpc.port }} agent_name: {{ .Values.agent.name | quote }} agent_dns: {{ .Values.agent.name }}.{{ .Release.Namespace }}.svc.cluster.local - agent_namespace: {{ .Values.indexManager.indexer.agent_namespace | quote }} - node_name: {{ .Values.indexManager.indexer.node_name | quote }} + agent_namespace: {{ $index.indexer.agent_namespace | quote }} + node_name: {{ $index.indexer.node_name | quote }} discoverer: host: {{ .Values.discoverer.name }}.{{ .Release.Namespace }}.svc.cluster.local port: {{ default .Values.defaults.server_config.servers.grpc.port .Values.discoverer.server_config.servers.grpc.port }} - duration: {{ .Values.indexManager.indexer.discoverer.duration }} + duration: {{ $index.indexer.discoverer.duration }} discover_client: - {{- $discoverClient := dict "Values" .Values.indexManager.indexer.discoverer.discover_client "default" .Values.defaults.grpc.client }} + {{- $discoverClient := dict "Values" $index.indexer.discoverer.discover_client "default" .Values.defaults.grpc.client }} {{- include "vald.grpc.client" $discoverClient | nindent 10 }} agent_client: - {{- $agentClient := dict "Values" .Values.indexManager.indexer.discoverer.agent_client "default" .Values.defaults.grpc.client }} + {{- $agentClient := dict "Values" $index.indexer.discoverer.agent_client "default" .Values.defaults.grpc.client }} {{- include "vald.grpc.client" $agentClient | nindent 10 }} - concurrency: {{ .Values.indexManager.indexer.concurrency }} - auto_index_duration_limit: {{ .Values.indexManager.indexer.auto_index_duration_limit }} - auto_index_check_duration: {{ .Values.indexManager.indexer.auto_index_check_duration }} - auto_index_length: {{ .Values.indexManager.indexer.auto_index_length }} - creation_pool_size: {{ .Values.indexManager.indexer.creation_pool_size }} + concurrency: {{ $index.indexer.concurrency }} + auto_index_duration_limit: {{ $index.indexer.auto_index_duration_limit }} + auto_index_check_duration: {{ $index.indexer.auto_index_check_duration }} + auto_index_length: {{ $index.indexer.auto_index_length }} + creation_pool_size: {{ $index.indexer.creation_pool_size }} {{- end }} diff --git a/charts/vald/templates/manager/index/daemonset.yaml b/charts/vald/templates/manager/index/daemonset.yaml index ade6f00a01..e1f751bcf9 100644 --- a/charts/vald/templates/manager/index/daemonset.yaml +++ b/charts/vald/templates/manager/index/daemonset.yaml @@ -13,104 +13,105 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.indexManager.enabled (eq .Values.indexManager.kind "DaemonSet") }} +{{- $index := .Values.manager.index -}} +{{- if and $index.enabled (eq $index.kind "DaemonSet") }} apiVersion: apps/v1 kind: DaemonSet metadata: - name: {{ .Values.indexManager.name }} + name: {{ $index.name }} labels: - app: {{ .Values.indexManager.name }} + app: {{ $index.name }} app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-index - {{- if .Values.indexManager.annotations }} + {{- if $index.annotations }} annotations: - {{- toYaml .Values.indexManager.annotations | nindent 4 }} + {{- toYaml $index.annotations | nindent 4 }} {{- end }} spec: - revisionHistoryLimit: {{ .Values.indexManager.revisionHistoryLimit }} + revisionHistoryLimit: {{ $index.revisionHistoryLimit }} selector: matchLabels: - app: {{ .Values.indexManager.name }} + app: {{ $index.name }} updateStrategy: rollingUpdate: - maxUnavailable: {{ .Values.indexManager.rollingUpdate.maxUnavailable }} + maxUnavailable: {{ $index.rollingUpdate.maxUnavailable }} type: RollingUpdate template: metadata: creationTimestamp: null labels: - app: {{ .Values.indexManager.name }} + app: {{ $index.name }} app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: manager-index annotations: checksum/configmap: {{ include (print $.Template.BasePath "/manager/index/configmap.yaml") . | sha256sum }} - {{- if .Values.indexManager.podAnnotations }} - {{- toYaml .Values.indexManager.podAnnotations | nindent 8 }} + {{- if $index.podAnnotations }} + {{- toYaml $index.podAnnotations | nindent 8 }} {{- end }} spec: - {{- if .Values.indexManager.initContainers }} + {{- if $index.initContainers }} initContainers: - {{- $initContainers := dict "initContainers" .Values.indexManager.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- $initContainers := dict "initContainers" $index.initContainers "Values" .Values "namespace" .Release.Namespace -}} {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} {{- end }} affinity: - {{- include "vald.affinity" .Values.indexManager.affinity | nindent 8 }} - {{- if .Values.indexManager.topologySpreadConstraints }} + {{- include "vald.affinity" $index.affinity | nindent 8 }} + {{- if $index.topologySpreadConstraints }} topologySpreadConstraints: - {{- toYaml .Values.indexManager.topologySpreadConstraints | nindent 8 }} + {{- toYaml $index.topologySpreadConstraints | nindent 8 }} {{- end }} containers: - - name: {{ .Values.indexManager.name }} - image: "{{ .Values.indexManager.image.repository }}:{{ default .Values.defaults.image.tag .Values.indexManager.image.tag }}" - imagePullPolicy: {{ .Values.indexManager.image.pullPolicy }} - {{- $servers := dict "Values" .Values.indexManager.server_config "default" .Values.defaults.server_config -}} + - name: {{ $index.name }} + image: "{{ $index.image.repository }}:{{ default .Values.defaults.image.tag $index.image.tag }}" + imagePullPolicy: {{ $index.image.pullPolicy }} + {{- $servers := dict "Values" $index.server_config "default" .Values.defaults.server_config -}} {{- include "vald.containerPorts" $servers | trim | nindent 10 }} resources: - {{- toYaml .Values.indexManager.resources | nindent 12 }} + {{- toYaml $index.resources | nindent 12 }} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File - {{- if .Values.indexManager.env }} + {{- if $index.env }} env: - {{- toYaml .Values.indexManager.env | nindent 12 }} + {{- toYaml $index.env | nindent 12 }} {{- end }} volumeMounts: - - name: {{ .Values.indexManager.name }}-config + - name: {{ $index.name }}-config mountPath: /etc/server/ - {{- if .Values.indexManager.volumeMounts }} - {{- toYaml .Values.indexManager.volumeMounts | nindent 12 }} + {{- if $index.volumeMounts }} + {{- toYaml $index.volumeMounts | nindent 12 }} {{- end }} dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} - terminationGracePeriodSeconds: {{ .Values.indexManager.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ $index.terminationGracePeriodSeconds }} volumes: - configMap: defaultMode: 420 - name: {{ .Values.indexManager.name }}-config - name: {{ .Values.indexManager.name }}-config - {{- if .Values.indexManager.volumes }} - {{- toYaml .Values.indexManager.volumes | nindent 8 }} + name: {{ $index.name }}-config + name: {{ $index.name }}-config + {{- if $index.volumes }} + {{- toYaml $index.volumes | nindent 8 }} {{- end }} - {{- if .Values.indexManager.nodeName }} - nodeName: {{ .Values.indexManager.nodeName }} + {{- if $index.nodeName }} + nodeName: {{ $index.nodeName }} {{- end }} - {{- if .Values.indexManager.nodeSelector }} + {{- if $index.nodeSelector }} nodeSelector: - {{- toYaml .Values.indexManager.nodeSelector | nindent 8 }} + {{- toYaml $index.nodeSelector | nindent 8 }} {{- end }} - {{- if .Values.indexManager.tolerations }} + {{- if $index.tolerations }} tolerations: - {{- toYaml .Values.indexManager.tolerations | nindent 8 }} + {{- toYaml $index.tolerations | nindent 8 }} {{- end }} - {{- if .Values.indexManager.podPriority }} - {{- if .Values.indexManager.podPriority.enabled }} - priorityClassName: {{ .Values.indexManager.name }}-priority + {{- if $index.podPriority }} + {{- if $index.podPriority.enabled }} + priorityClassName: {{ $index.name }}-priority {{- end }} {{- end }} status: diff --git a/charts/vald/templates/manager/index/deployment.yaml b/charts/vald/templates/manager/index/deployment.yaml index 91e91e8df4..5d68d27f36 100644 --- a/charts/vald/templates/manager/index/deployment.yaml +++ b/charts/vald/templates/manager/index/deployment.yaml @@ -13,107 +13,108 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.indexManager.enabled (eq .Values.indexManager.kind "Deployment") }} +{{- $index := .Values.manager.index -}} +{{- if and $index.enabled (eq $index.kind "Deployment") }} apiVersion: apps/v1 kind: Deployment metadata: - name: {{ .Values.indexManager.name }} + name: {{ $index.name }} labels: - app: {{ .Values.indexManager.name }} + app: {{ $index.name }} app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-index - {{- if .Values.indexManager.annotations }} + {{- if $index.annotations }} annotations: - {{- toYaml .Values.indexManager.annotations | nindent 4 }} + {{- toYaml $index.annotations | nindent 4 }} {{- end }} spec: - progressDeadlineSeconds: {{ .Values.indexManager.progressDeadlineSeconds }} - replicas: {{ .Values.indexManager.replicas }} - revisionHistoryLimit: {{ .Values.indexManager.revisionHistoryLimit }} + progressDeadlineSeconds: {{ $index.progressDeadlineSeconds }} + replicas: {{ $index.replicas }} + revisionHistoryLimit: {{ $index.revisionHistoryLimit }} selector: matchLabels: - app: {{ .Values.indexManager.name }} + app: {{ $index.name }} strategy: rollingUpdate: - maxSurge: {{ .Values.indexManager.rollingUpdate.maxSurge }} - maxUnavailable: {{ .Values.indexManager.rollingUpdate.maxUnavailable }} + maxSurge: {{ $index.rollingUpdate.maxSurge }} + maxUnavailable: {{ $index.rollingUpdate.maxUnavailable }} type: RollingUpdate template: metadata: creationTimestamp: null labels: - app: {{ .Values.indexManager.name }} + app: {{ $index.name }} app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: manager-index annotations: checksum/configmap: {{ include (print $.Template.BasePath "/manager/index/configmap.yaml") . | sha256sum }} - {{- if .Values.indexManager.podAnnotations }} - {{- toYaml .Values.indexManager.podAnnotations | nindent 8 }} + {{- if $index.podAnnotations }} + {{- toYaml $index.podAnnotations | nindent 8 }} {{- end }} spec: - {{- if .Values.indexManager.initContainers }} + {{- if $index.initContainers }} initContainers: - {{- $initContainers := dict "initContainers" .Values.indexManager.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- $initContainers := dict "initContainers" $index.initContainers "Values" .Values "namespace" .Release.Namespace -}} {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} {{- end }} affinity: - {{- include "vald.affinity" .Values.indexManager.affinity | nindent 8 }} - {{- if .Values.indexManager.topologySpreadConstraints }} + {{- include "vald.affinity" $index.affinity | nindent 8 }} + {{- if $index.topologySpreadConstraints }} topologySpreadConstraints: - {{- toYaml .Values.indexManager.topologySpreadConstraints | nindent 8 }} + {{- toYaml $index.topologySpreadConstraints | nindent 8 }} {{- end }} containers: - - name: {{ .Values.indexManager.name }} - image: "{{ .Values.indexManager.image.repository }}:{{ default .Values.defaults.image.tag .Values.indexManager.image.tag }}" - imagePullPolicy: {{ .Values.indexManager.image.pullPolicy }} - {{- $servers := dict "Values" .Values.indexManager.server_config "default" .Values.defaults.server_config -}} + - name: {{ $index.name }} + image: "{{ $index.image.repository }}:{{ default .Values.defaults.image.tag $index.image.tag }}" + imagePullPolicy: {{ $index.image.pullPolicy }} + {{- $servers := dict "Values" $index.server_config "default" .Values.defaults.server_config -}} {{- include "vald.containerPorts" $servers | trim | nindent 10 }} resources: - {{- toYaml .Values.indexManager.resources | nindent 12 }} + {{- toYaml $index.resources | nindent 12 }} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File - {{- if .Values.indexManager.env }} + {{- if $index.env }} env: - {{- toYaml .Values.indexManager.env | nindent 12 }} + {{- toYaml $index.env | nindent 12 }} {{- end }} volumeMounts: - - name: {{ .Values.indexManager.name }}-config + - name: {{ $index.name }}-config mountPath: /etc/server/ - {{- if .Values.indexManager.volumeMounts }} - {{- toYaml .Values.indexManager.volumeMounts | nindent 12 }} + {{- if $index.volumeMounts }} + {{- toYaml $index.volumeMounts | nindent 12 }} {{- end }} dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} - terminationGracePeriodSeconds: {{ .Values.indexManager.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ $index.terminationGracePeriodSeconds }} volumes: - configMap: defaultMode: 420 - name: {{ .Values.indexManager.name }}-config - name: {{ .Values.indexManager.name }}-config - {{- if .Values.indexManager.volumes }} - {{- toYaml .Values.indexManager.volumes | nindent 8 }} + name: {{ $index.name }}-config + name: {{ $index.name }}-config + {{- if $index.volumes }} + {{- toYaml $index.volumes | nindent 8 }} {{- end }} - {{- if .Values.indexManager.nodeName }} - nodeName: {{ .Values.indexManager.nodeName }} + {{- if $index.nodeName }} + nodeName: {{ $index.nodeName }} {{- end }} - {{- if .Values.indexManager.nodeSelector }} + {{- if $index.nodeSelector }} nodeSelector: - {{- toYaml .Values.indexManager.nodeSelector | nindent 8 }} + {{- toYaml $index.nodeSelector | nindent 8 }} {{- end }} - {{- if .Values.indexManager.tolerations }} + {{- if $index.tolerations }} tolerations: - {{- toYaml .Values.indexManager.tolerations | nindent 8 }} + {{- toYaml $index.tolerations | nindent 8 }} {{- end }} - {{- if .Values.indexManager.podPriority }} - {{- if .Values.indexManager.podPriority.enabled }} - priorityClassName: {{ .Values.indexManager.name }}-priority + {{- if $index.podPriority }} + {{- if $index.podPriority.enabled }} + priorityClassName: {{ $index.name }}-priority {{- end }} {{- end }} status: diff --git a/charts/vald/templates/manager/index/pdb.yaml b/charts/vald/templates/manager/index/pdb.yaml index 93636bf0a5..b2a95d95af 100644 --- a/charts/vald/templates/manager/index/pdb.yaml +++ b/charts/vald/templates/manager/index/pdb.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.indexManager.enabled }} +{{- $index := .Values.manager.index -}} +{{- if $index.enabled }} apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: - name: {{ .Values.indexManager.name }} + name: {{ $index.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -26,8 +27,8 @@ metadata: app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-index spec: - maxUnavailable: {{ .Values.indexManager.maxUnavailable }} + maxUnavailable: {{ $index.maxUnavailable }} selector: matchLabels: - app: {{ .Values.indexManager.name }} + app: {{ $index.name }} {{- end }} diff --git a/charts/vald/templates/manager/index/priorityclass.yaml b/charts/vald/templates/manager/index/priorityclass.yaml index f1e78c47f5..1b44a9140f 100644 --- a/charts/vald/templates/manager/index/priorityclass.yaml +++ b/charts/vald/templates/manager/index/priorityclass.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.indexManager.enabled .Values.indexManager.podPriority.enabled }} +{{- $index := .Values.manager.index -}} +{{- if and $index.enabled $index.podPriority.enabled }} apiVersion: scheduling.k8s.io/v1 kind: PriorityClass metadata: - name: {{ .Values.indexManager.name }}-priority + name: {{ $index.name }}-priority labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -25,7 +26,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-index -value: {{ .Values.indexManager.podPriority.value }} +value: {{ $index.podPriority.value }} globalDefault: false -description: "A priority class for Vald indexManager." +description: "A priority class for Vald index manager." {{- end }} diff --git a/charts/vald/templates/manager/index/svc.yaml b/charts/vald/templates/manager/index/svc.yaml index 63294e83e1..a562aa0482 100644 --- a/charts/vald/templates/manager/index/svc.yaml +++ b/charts/vald/templates/manager/index/svc.yaml @@ -13,14 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.indexManager.enabled }} +{{- $index := .Values.manager.index -}} +{{- if $index.enabled }} apiVersion: v1 kind: Service metadata: - name: {{ .Values.indexManager.name }} - {{- if .Values.indexManager.service.annotations }} + name: {{ $index.name }} + {{- if $index.service.annotations }} annotations: - {{- toYaml .Values.indexManager.service.annotations | nindent 4 }} + {{- toYaml $index.service.annotations | nindent 4 }} {{- end }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} @@ -29,20 +30,20 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: manager-index - {{- if .Values.indexManager.service.labels }} - {{- toYaml .Values.indexManager.service.labels | nindent 4 }} + {{- if $index.service.labels }} + {{- toYaml $index.service.labels | nindent 4 }} {{- end }} spec: - {{- $servers := dict "Values" .Values.indexManager.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $index.server_config "default" .Values.defaults.server_config }} {{- include "vald.servicePorts" $servers | nindent 2 }} selector: app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/component: manager-index - {{- if eq .Values.indexManager.serviceType "ClusterIP" }} + {{- if eq $index.serviceType "ClusterIP" }} clusterIP: None {{- end }} - type: {{ .Values.indexManager.serviceType }} - {{- if .Values.indexManager.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.indexManager.externalTrafficPolicy }} + type: {{ $index.serviceType }} + {{- if $index.externalTrafficPolicy }} + externalTrafficPolicy: {{ $index.externalTrafficPolicy }} {{- end }} {{- end }} diff --git a/charts/vald/templates/meta/configmap.yaml b/charts/vald/templates/meta/configmap.yaml index 19ea6623bb..59031628e4 100644 --- a/charts/vald/templates/meta/configmap.yaml +++ b/charts/vald/templates/meta/configmap.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.meta.enabled }} +{{- $meta := .Values.meta -}} +{{- if $meta.enabled }} apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Values.meta.name }}-config + name: {{ $meta.name }}-config labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -28,23 +29,23 @@ metadata: data: config.yaml: | --- - version: {{ .Values.meta.version }} - time_zone: {{ default .Values.defaults.time_zone .Values.meta.time_zone }} + version: {{ $meta.version }} + time_zone: {{ default .Values.defaults.time_zone $meta.time_zone }} logging: - {{- $logging := dict "Values" .Values.meta.logging "default" .Values.defaults.logging }} + {{- $logging := dict "Values" $meta.logging "default" .Values.defaults.logging }} {{- include "vald.logging" $logging | nindent 6 }} server_config: - {{- $servers := dict "Values" .Values.meta.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $meta.server_config "default" .Values.defaults.server_config }} {{- include "vald.servers" $servers | nindent 6 }} observability: - {{- $observability := dict "Values" .Values.meta.observability "default" .Values.defaults.observability }} + {{- $observability := dict "Values" $meta.observability "default" .Values.defaults.observability }} {{- include "vald.observability" $observability | nindent 6 }} - {{- if .Values.meta.redis.enabled }} + {{- if $meta.redis.enabled }} redis_config: - {{- toYaml .Values.meta.redis.config | nindent 6 }} + {{- toYaml $meta.redis.config | nindent 6 }} {{- end }} - {{- if .Values.meta.cassandra.enabled }} + {{- if $meta.cassandra.enabled }} cassandra_config: - {{- toYaml .Values.meta.cassandra.config | nindent 6 }} + {{- toYaml $meta.cassandra.config | nindent 6 }} {{- end }} {{- end }} diff --git a/charts/vald/templates/meta/daemonset.yaml b/charts/vald/templates/meta/daemonset.yaml index 225a87b6f5..19bf7827fb 100644 --- a/charts/vald/templates/meta/daemonset.yaml +++ b/charts/vald/templates/meta/daemonset.yaml @@ -13,104 +13,105 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.meta.enabled (eq .Values.meta.kind "DaemonSet") }} +{{- $meta := .Values.meta -}} +{{- if and $meta.enabled (eq $meta.kind "DaemonSet") }} apiVersion: apps/v1 kind: DaemonSet metadata: - name: {{.Values.meta.name }} + name: {{$meta.name }} labels: - app: {{ .Values.meta.name }} + app: {{ $meta.name }} app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: meta - {{- if .Values.meta.annotations }} + {{- if $meta.annotations }} annotations: - {{- toYaml .Values.meta.annotations | nindent 4 }} + {{- toYaml $meta.annotations | nindent 4 }} {{- end }} spec: - revisionHistoryLimit: {{ .Values.meta.revisionHistoryLimit }} + revisionHistoryLimit: {{ $meta.revisionHistoryLimit }} selector: matchLabels: - app: {{ .Values.meta.name }} + app: {{ $meta.name }} updateStrategy: rollingUpdate: - maxUnavailable: {{ .Values.meta.rollingUpdate.maxUnavailable }} + maxUnavailable: {{ $meta.rollingUpdate.maxUnavailable }} type: RollingUpdate template: metadata: creationTimestamp: null labels: - app: {{ .Values.meta.name }} + app: {{ $meta.name }} app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: meta annotations: checksum/configmap: {{ include (print $.Template.BasePath "/meta/configmap.yaml") . | sha256sum }} - {{- if .Values.meta.podAnnotations }} - {{- toYaml .Values.meta.podAnnotations | nindent 8 }} + {{- if $meta.podAnnotations }} + {{- toYaml $meta.podAnnotations | nindent 8 }} {{- end }} spec: - {{- if .Values.meta.initContainers }} + {{- if $meta.initContainers }} initContainers: - {{- $initContainers := dict "initContainers" .Values.meta.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- $initContainers := dict "initContainers" $meta.initContainers "Values" .Values "namespace" .Release.Namespace -}} {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} {{- end }} affinity: - {{- include "vald.affinity" .Values.meta.affinity | nindent 8 }} - {{- if .Values.meta.topologySpreadConstraints }} + {{- include "vald.affinity" $meta.affinity | nindent 8 }} + {{- if $meta.topologySpreadConstraints }} topologySpreadConstraints: - {{- toYaml .Values.meta.topologySpreadConstraints | nindent 8 }} + {{- toYaml $meta.topologySpreadConstraints | nindent 8 }} {{- end }} containers: - - name: {{ .Values.meta.name }} - image: "{{ .Values.meta.image.repository }}:{{ default .Values.defaults.image.tag .Values.meta.image.tag }}" - imagePullPolicy: {{ .Values.meta.image.pullPolicy }} - {{- $servers := dict "Values" .Values.meta.server_config "default" .Values.defaults.server_config -}} + - name: {{ $meta.name }} + image: "{{ $meta.image.repository }}:{{ default .Values.defaults.image.tag $meta.image.tag }}" + imagePullPolicy: {{ $meta.image.pullPolicy }} + {{- $servers := dict "Values" $meta.server_config "default" .Values.defaults.server_config -}} {{- include "vald.containerPorts" $servers | trim | nindent 10 }} resources: - {{- toYaml .Values.meta.resources | nindent 12 }} + {{- toYaml $meta.resources | nindent 12 }} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File - {{- if .Values.meta.env }} + {{- if $meta.env }} env: - {{- toYaml .Values.meta.env | nindent 12 }} + {{- toYaml $meta.env | nindent 12 }} {{- end }} volumeMounts: - - name: {{ .Values.meta.name }}-config + - name: {{ $meta.name }}-config mountPath: /etc/server/ - {{- if .Values.meta.volumeMounts }} - {{- toYaml .Values.meta.volumeMounts | nindent 12 }} + {{- if $meta.volumeMounts }} + {{- toYaml $meta.volumeMounts | nindent 12 }} {{- end }} dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} - terminationGracePeriodSeconds: {{ .Values.meta.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ $meta.terminationGracePeriodSeconds }} volumes: - - name: {{ .Values.meta.name }}-config + - name: {{ $meta.name }}-config configMap: defaultMode: 420 - name: {{ .Values.meta.name }}-config - {{- if .Values.meta.volumes }} - {{- toYaml .Values.meta.volumes | nindent 8 }} + name: {{ $meta.name }}-config + {{- if $meta.volumes }} + {{- toYaml $meta.volumes | nindent 8 }} {{- end }} - {{- if .Values.meta.nodeName }} - nodeName: {{ .Values.meta.nodeName }} + {{- if $meta.nodeName }} + nodeName: {{ $meta.nodeName }} {{- end }} - {{- if .Values.meta.nodeSelector }} + {{- if $meta.nodeSelector }} nodeSelector: - {{- toYaml .Values.meta.nodeSelector | nindent 8 }} + {{- toYaml $meta.nodeSelector | nindent 8 }} {{- end }} - {{- if .Values.meta.tolerations }} + {{- if $meta.tolerations }} tolerations: - {{- toYaml .Values.meta.tolerations | nindent 8 }} + {{- toYaml $meta.tolerations | nindent 8 }} {{- end }} - {{- if .Values.meta.podPriority }} - {{- if .Values.meta.podPriority.enabled }} - priorityClassName: {{ .Values.meta.name }}-priority + {{- if $meta.podPriority }} + {{- if $meta.podPriority.enabled }} + priorityClassName: {{ $meta.name }}-priority {{- end }} {{- end }} status: diff --git a/charts/vald/templates/meta/deployment.yaml b/charts/vald/templates/meta/deployment.yaml index a7fa9ff577..6194f9a766 100644 --- a/charts/vald/templates/meta/deployment.yaml +++ b/charts/vald/templates/meta/deployment.yaml @@ -13,109 +13,110 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.meta.enabled (eq .Values.meta.kind "Deployment") }} +{{- $meta := .Values.meta -}} +{{- if and $meta.enabled (eq $meta.kind "Deployment") }} apiVersion: apps/v1 kind: Deployment metadata: - name: {{.Values.meta.name }} + name: {{$meta.name }} labels: - app: {{ .Values.meta.name }} + app: {{ $meta.name }} app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: meta - {{- if .Values.meta.annotations }} + {{- if $meta.annotations }} annotations: - {{- toYaml .Values.meta.annotations | nindent 4 }} + {{- toYaml $meta.annotations | nindent 4 }} {{- end }} spec: - progressDeadlineSeconds: {{ .Values.meta.progressDeadlineSeconds }} - {{- if not .Values.meta.hpa.enabled }} - replicas: {{ .Values.meta.minReplicas }} + progressDeadlineSeconds: {{ $meta.progressDeadlineSeconds }} + {{- if not $meta.hpa.enabled }} + replicas: {{ $meta.minReplicas }} {{- end }} - revisionHistoryLimit: {{ .Values.meta.revisionHistoryLimit }} + revisionHistoryLimit: {{ $meta.revisionHistoryLimit }} selector: matchLabels: - app: {{ .Values.meta.name }} + app: {{ $meta.name }} strategy: rollingUpdate: - maxSurge: {{ .Values.meta.rollingUpdate.maxSurge }} - maxUnavailable: {{ .Values.meta.rollingUpdate.maxUnavailable }} + maxSurge: {{ $meta.rollingUpdate.maxSurge }} + maxUnavailable: {{ $meta.rollingUpdate.maxUnavailable }} type: RollingUpdate template: metadata: creationTimestamp: null labels: - app: {{ .Values.meta.name }} + app: {{ $meta.name }} app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/component: meta annotations: checksum/configmap: {{ include (print $.Template.BasePath "/meta/configmap.yaml") . | sha256sum }} - {{- if .Values.meta.podAnnotations }} - {{- toYaml .Values.meta.podAnnotations | nindent 8 }} + {{- if $meta.podAnnotations }} + {{- toYaml $meta.podAnnotations | nindent 8 }} {{- end }} spec: - {{- if .Values.meta.initContainers }} + {{- if $meta.initContainers }} initContainers: - {{- $initContainers := dict "initContainers" .Values.meta.initContainers "Values" .Values "namespace" .Release.Namespace -}} + {{- $initContainers := dict "initContainers" $meta.initContainers "Values" .Values "namespace" .Release.Namespace -}} {{- include "vald.initContainers" $initContainers | trim | nindent 8 }} {{- end }} affinity: - {{- include "vald.affinity" .Values.meta.affinity | nindent 8 }} - {{- if .Values.meta.topologySpreadConstraints }} + {{- include "vald.affinity" $meta.affinity | nindent 8 }} + {{- if $meta.topologySpreadConstraints }} topologySpreadConstraints: - {{- toYaml .Values.meta.topologySpreadConstraints | nindent 8 }} + {{- toYaml $meta.topologySpreadConstraints | nindent 8 }} {{- end }} containers: - - name: {{ .Values.meta.name }} - image: "{{ .Values.meta.image.repository }}:{{ default .Values.defaults.image.tag .Values.meta.image.tag }}" - imagePullPolicy: {{ .Values.meta.image.pullPolicy }} - {{- $servers := dict "Values" .Values.meta.server_config "default" .Values.defaults.server_config -}} + - name: {{ $meta.name }} + image: "{{ $meta.image.repository }}:{{ default .Values.defaults.image.tag $meta.image.tag }}" + imagePullPolicy: {{ $meta.image.pullPolicy }} + {{- $servers := dict "Values" $meta.server_config "default" .Values.defaults.server_config -}} {{- include "vald.containerPorts" $servers | trim | nindent 10 }} resources: - {{- toYaml .Values.meta.resources | nindent 12 }} + {{- toYaml $meta.resources | nindent 12 }} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File - {{- if .Values.meta.env }} + {{- if $meta.env }} env: - {{- toYaml .Values.meta.env | nindent 12 }} + {{- toYaml $meta.env | nindent 12 }} {{- end }} volumeMounts: - - name: {{ .Values.meta.name }}-config + - name: {{ $meta.name }}-config mountPath: /etc/server/ - {{- if .Values.meta.volumeMounts }} - {{- toYaml .Values.meta.volumeMounts | nindent 12 }} + {{- if $meta.volumeMounts }} + {{- toYaml $meta.volumeMounts | nindent 12 }} {{- end }} dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} - terminationGracePeriodSeconds: {{ .Values.meta.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ $meta.terminationGracePeriodSeconds }} volumes: - - name: {{ .Values.meta.name }}-config + - name: {{ $meta.name }}-config configMap: defaultMode: 420 - name: {{ .Values.meta.name }}-config - {{- if .Values.meta.volumes }} - {{- toYaml .Values.meta.volumes | nindent 8 }} + name: {{ $meta.name }}-config + {{- if $meta.volumes }} + {{- toYaml $meta.volumes | nindent 8 }} {{- end }} - {{- if .Values.meta.nodeName }} - nodeName: {{ .Values.meta.nodeName }} + {{- if $meta.nodeName }} + nodeName: {{ $meta.nodeName }} {{- end }} - {{- if .Values.meta.nodeSelector }} + {{- if $meta.nodeSelector }} nodeSelector: - {{- toYaml .Values.meta.nodeSelector | nindent 8 }} + {{- toYaml $meta.nodeSelector | nindent 8 }} {{- end }} - {{- if .Values.meta.tolerations }} + {{- if $meta.tolerations }} tolerations: - {{- toYaml .Values.meta.tolerations | nindent 8 }} + {{- toYaml $meta.tolerations | nindent 8 }} {{- end }} - {{- if .Values.meta.podPriority }} - {{- if .Values.meta.podPriority.enabled }} - priorityClassName: {{ .Values.meta.name }}-priority + {{- if $meta.podPriority }} + {{- if $meta.podPriority.enabled }} + priorityClassName: {{ $meta.name }}-priority {{- end }} {{- end }} status: diff --git a/charts/vald/templates/meta/hpa.yaml b/charts/vald/templates/meta/hpa.yaml index a22f0aea86..e1c0051eb8 100644 --- a/charts/vald/templates/meta/hpa.yaml +++ b/charts/vald/templates/meta/hpa.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.meta.enabled .Values.meta.hpa.enabled }} +{{- $meta := .Values.meta -}} +{{- if and $meta.enabled $meta.hpa.enabled }} apiVersion: autoscaling/v1 kind: HorizontalPodAutoscaler metadata: - name: {{ .Values.meta.name }} + name: {{ $meta.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -26,12 +27,12 @@ metadata: app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: meta spec: - maxReplicas: {{ .Values.meta.maxReplicas }} - minReplicas: {{ .Values.meta.minReplicas }} + maxReplicas: {{ $meta.maxReplicas }} + minReplicas: {{ $meta.minReplicas }} scaleTargetRef: apiVersion: apps/v1 - kind: {{ .Values.meta.kind }} - name: {{ .Values.meta.name }} - targetCPUUtilizationPercentage: {{ .Values.meta.hpa.targetCPUUtilizationPercentage }} + kind: {{ $meta.kind }} + name: {{ $meta.name }} + targetCPUUtilizationPercentage: {{ $meta.hpa.targetCPUUtilizationPercentage }} status: {{- end }} diff --git a/charts/vald/templates/meta/pdb.yaml b/charts/vald/templates/meta/pdb.yaml index 023aaff3b4..36ba467a37 100644 --- a/charts/vald/templates/meta/pdb.yaml +++ b/charts/vald/templates/meta/pdb.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.meta.enabled }} +{{- $meta := .Values.meta -}} +{{- if $meta.enabled }} apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: - name: {{ .Values.meta.name }} + name: {{ $meta.name }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -26,8 +27,8 @@ metadata: app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: meta spec: - maxUnavailable: {{ .Values.meta.maxUnavailable }} + maxUnavailable: {{ $meta.maxUnavailable }} selector: matchLabels: - app: {{ .Values.meta.name }} + app: {{ $meta.name }} {{- end }} diff --git a/charts/vald/templates/meta/priorityclass.yaml b/charts/vald/templates/meta/priorityclass.yaml index 1a6b9e768d..0f12241012 100644 --- a/charts/vald/templates/meta/priorityclass.yaml +++ b/charts/vald/templates/meta/priorityclass.yaml @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if and .Values.meta.enabled .Values.meta.podPriority.enabled }} +{{- $meta := .Values.meta -}} +{{- if and $meta.enabled $meta.podPriority.enabled }} apiVersion: scheduling.k8s.io/v1 kind: PriorityClass metadata: - name: {{ .Values.meta.name }}-priority + name: {{ $meta.name }}-priority labels: app.kubernetes.io/name: {{ include "vald.name" . }} helm.sh/chart: {{ include "vald.chart" . }} @@ -25,7 +26,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: meta -value: {{ .Values.meta.podPriority.value }} +value: {{ $meta.podPriority.value }} globalDefault: false description: "A priority class for Vald meta." {{- end }} diff --git a/charts/vald/templates/meta/svc.yaml b/charts/vald/templates/meta/svc.yaml index 6441434e43..0eb04c2a10 100644 --- a/charts/vald/templates/meta/svc.yaml +++ b/charts/vald/templates/meta/svc.yaml @@ -13,14 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.meta.enabled }} +{{- $meta := .Values.meta -}} +{{- if $meta.enabled }} apiVersion: v1 kind: Service metadata: - name: {{ .Values.meta.name }} - {{- if .Values.meta.service.annotations }} + name: {{ $meta.name }} + {{- if $meta.service.annotations }} annotations: - {{- toYaml .Values.meta.service.annotations | nindent 4 }} + {{- toYaml $meta.service.annotations | nindent 4 }} {{- end }} labels: app.kubernetes.io/name: {{ include "vald.name" . }} @@ -29,20 +30,20 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.Version }} app.kubernetes.io/component: meta - {{- if .Values.meta.service.labels }} - {{- toYaml .Values.meta.service.labels | nindent 4 }} + {{- if $meta.service.labels }} + {{- toYaml $meta.service.labels | nindent 4 }} {{- end }} spec: - {{- $servers := dict "Values" .Values.meta.server_config "default" .Values.defaults.server_config }} + {{- $servers := dict "Values" $meta.server_config "default" .Values.defaults.server_config }} {{- include "vald.servicePorts" $servers | nindent 2 }} selector: app.kubernetes.io/name: {{ include "vald.name" . }} app.kubernetes.io/component: meta - {{- if eq .Values.meta.serviceType "ClusterIP" }} + {{- if eq $meta.serviceType "ClusterIP" }} clusterIP: None {{- end }} - type: {{ .Values.meta.serviceType }} - {{- if .Values.meta.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.meta.externalTrafficPolicy }} + type: {{ $meta.serviceType }} + {{- if $meta.externalTrafficPolicy }} + externalTrafficPolicy: {{ $meta.externalTrafficPolicy }} {{- end }} {{- end }} diff --git a/charts/vald/values-agent-ngt-standalone.yaml b/charts/vald/values-agent-ngt-standalone.yaml index 074bc3ba33..db8ff7d370 100644 --- a/charts/vald/values-agent-ngt-standalone.yaml +++ b/charts/vald/values-agent-ngt-standalone.yaml @@ -26,19 +26,21 @@ agent: search_edge_size: 10 gateway: - enabled: false + vald: + enabled: false discoverer: enabled: false -compressor: - enabled: false +manager: + compressor: + enabled: false -backupManager: - enabled: false + backup: + enabled: false -indexManager: - enabled: false + index: + enabled: false meta: enabled: false diff --git a/charts/vald/values-cassandra.yaml b/charts/vald/values-cassandra.yaml index 66c2cea308..f21649616e 100644 --- a/charts/vald/values-cassandra.yaml +++ b/charts/vald/values-cassandra.yaml @@ -14,43 +14,44 @@ # limitations under the License. # -backupManager: - image: - repository: vdaas/vald-manager-backup-cassandra - initContainers: - - type: wait-for-cassandra - name: wait-for-cassandra - image: cassandra:latest - cassandra: - hosts: - - cassandra-0.default.svc.cluster.local - - cassandra-1.default.svc.cluster.local - - cassandra-2.default.svc.cluster.local - options: - - "-uroot" - - "-p${CASSANDRA_PASSWORD}" - sleepDuration: 2 - env: +manager: + backup: + image: + repository: vdaas/vald-manager-backup-cassandra + initContainers: + - type: wait-for-cassandra + name: wait-for-cassandra + image: cassandra:latest + cassandra: + hosts: + - cassandra-0.default.svc.cluster.local + - cassandra-1.default.svc.cluster.local + - cassandra-2.default.svc.cluster.local + options: + - "-uroot" + - "-p${CASSANDRA_PASSWORD}" + sleepDuration: 2 + env: + - name: CASSANDRA_PASSWORD + valueFrom: + secretKeyRef: + name: cassandra-secret + key: password + env: - name: CASSANDRA_PASSWORD valueFrom: secretKeyRef: name: cassandra-secret key: password - env: - - name: CASSANDRA_PASSWORD - valueFrom: - secretKeyRef: - name: cassandra-secret - key: password - mysql: - enabled: false - cassandra: - enabled: true - config: - hosts: - - cassandra-0.cassandra.default.svc.cluster.local - - cassandra-1.cassandra.default.svc.cluster.local - - cassandra-2.cassandra.default.svc.cluster.local + mysql: + enabled: false + cassandra: + enabled: true + config: + hosts: + - cassandra-0.cassandra.default.svc.cluster.local + - cassandra-1.cassandra.default.svc.cluster.local + - cassandra-2.cassandra.default.svc.cluster.local meta: image: diff --git a/charts/vald/values-ci.yaml b/charts/vald/values-ci.yaml index da333affe0..62c3cfaf10 100644 --- a/charts/vald/values-ci.yaml +++ b/charts/vald/values-ci.yaml @@ -15,15 +15,16 @@ # gateway: - minReplicas: 1 - hpa: - enabled: false - resources: - requests: - cpu: 100m - memory: 50Mi - gateway_config: - index_replica: 3 + vald: + minReplicas: 1 + hpa: + enabled: false + resources: + requests: + cpu: 100m + memory: 50Mi + gateway_config: + index_replica: 3 agent: minReplicas: 3 @@ -50,32 +51,33 @@ discoverer: cpu: 100m memory: 50Mi -compressor: - minReplicas: 1 - hpa: - enabled: false - resources: - requests: - cpu: 100m - memory: 50Mi - compress: - compress_algorithm: gob +manager: + compressor: + minReplicas: 1 + hpa: + enabled: false + resources: + requests: + cpu: 100m + memory: 50Mi + compress: + compress_algorithm: gob -backupManager: - minReplicas: 1 - hpa: - enabled: false - resources: - requests: - cpu: 100m - memory: 30Mi + backup: + minReplicas: 1 + hpa: + enabled: false + resources: + requests: + cpu: 100m + memory: 30Mi -indexManager: - replicas: 1 - resources: - requests: - cpu: 100m - memory: 30Mi + index: + replicas: 1 + resources: + requests: + cpu: 100m + memory: 30Mi meta: minReplicas: 1 diff --git a/charts/vald/values-dev.yaml b/charts/vald/values-dev.yaml index 1c9e98a5df..fa110484c4 100644 --- a/charts/vald/values-dev.yaml +++ b/charts/vald/values-dev.yaml @@ -23,14 +23,15 @@ defaults: enabled: true gateway: - podAnnotations: - profefe.com/enable: "true" - profefe.com/port: "6060" - profefe.com/service: "vald-gateway" - resources: - requests: - cpu: 100m - memory: 50Mi + vald: + podAnnotations: + profefe.com/enable: "true" + profefe.com/port: "6060" + profefe.com/service: "vald-gateway" + resources: + requests: + cpu: 100m + memory: 50Mi agent: podAnnotations: @@ -55,35 +56,36 @@ discoverer: cpu: 100m memory: 50Mi -compressor: - podAnnotations: - profefe.com/enable: "true" - profefe.com/port: "6060" - profefe.com/service: "vald-manager-compressor" - resources: - requests: - cpu: 100m - memory: 50Mi +manager: + compressor: + podAnnotations: + profefe.com/enable: "true" + profefe.com/port: "6060" + profefe.com/service: "vald-manager-compressor" + resources: + requests: + cpu: 100m + memory: 50Mi -backupManager: - podAnnotations: - profefe.com/enable: "true" - profefe.com/port: "6060" - profefe.com/service: "vald-manager-backup" - resources: - requests: - cpu: 100m - memory: 30Mi + backup: + podAnnotations: + profefe.com/enable: "true" + profefe.com/port: "6060" + profefe.com/service: "vald-manager-backup" + resources: + requests: + cpu: 100m + memory: 30Mi -indexManager: - podAnnotations: - profefe.com/enable: "true" - profefe.com/port: "6060" - profefe.com/service: "vald-manager-index" - resources: - requests: - cpu: 100m - memory: 30Mi + index: + podAnnotations: + profefe.com/enable: "true" + profefe.com/port: "6060" + profefe.com/service: "vald-manager-index" + resources: + requests: + cpu: 100m + memory: 30Mi meta: podAnnotations: diff --git a/charts/vald/values-gateways.yaml b/charts/vald/values-gateways.yaml new file mode 100644 index 0000000000..982c3cab96 --- /dev/null +++ b/charts/vald/values-gateways.yaml @@ -0,0 +1,27 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +gateway: + vald: + enabled: false + backup: + enabled: true + filter: + enabled: true + lb: + enabled: true + meta: + enabled: true diff --git a/charts/vald/values-scylla.yaml b/charts/vald/values-scylla.yaml index 90021606f3..fc86bc536b 100644 --- a/charts/vald/values-scylla.yaml +++ b/charts/vald/values-scylla.yaml @@ -14,29 +14,30 @@ # limitations under the License. # -backupManager: - image: - repository: vdaas/vald-manager-backup-cassandra - initContainers: - - type: wait-for-cassandra - name: wait-for-scylla - image: cassandra:latest - cassandra: +manager: + backup: + image: + repository: vdaas/vald-manager-backup-cassandra + initContainers: + - type: wait-for-cassandra + name: wait-for-scylla + image: cassandra:latest + cassandra: + hosts: + - scylla-0.scylla.default.svc.cluster.local + - scylla-1.scylla.default.svc.cluster.local + - scylla-2.scylla.default.svc.cluster.local + sleepDuration: 2 + env: [] + mysql: + enabled: false + cassandra: + enabled: true + config: hosts: - scylla-0.scylla.default.svc.cluster.local - scylla-1.scylla.default.svc.cluster.local - scylla-2.scylla.default.svc.cluster.local - sleepDuration: 2 - env: [] - mysql: - enabled: false - cassandra: - enabled: true - config: - hosts: - - scylla-0.scylla.default.svc.cluster.local - - scylla-1.scylla.default.svc.cluster.local - - scylla-2.scylla.default.svc.cluster.local meta: image: diff --git a/charts/vald/values.schema.json b/charts/vald/values.schema.json index f100c093bb..17551656d0 100644 --- a/charts/vald/values.schema.json +++ b/charts/vald/values.schema.json @@ -1 +1 @@ -{"$schema":"http://json-schema.org/draft-07/schema#","title":"Values","type":"object","properties":{"agent":{"type":"object","properties":{"affinity":{"type":"object","properties":{"nodeAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object","properties":{"nodeSelectorTerms":{"type":"array","items":{"type":"object"}}}}}},"podAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}},"podAntiAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}}}},"annotations":{"type":"object"},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"externalTrafficPolicy":{"type":"string"},"hpa":{"type":"object","properties":{"enabled":{"type":"boolean"},"targetCPUUtilizationPercentage":{"type":"integer"}}},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"initContainers":{"type":"array","items":{"type":"object"}},"kind":{"type":"string","enum":["StatefulSet","Deployment","DaemonSet"]},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"maxReplicas":{"type":"integer","minimum":0},"maxUnavailable":{"type":"string"},"minReplicas":{"type":"integer","minimum":0},"name":{"type":"string"},"ngt":{"type":"object","properties":{"auto_index_check_duration":{"type":"string"},"auto_index_duration_limit":{"type":"string"},"auto_index_length":{"type":"integer"},"auto_save_index_duration":{"type":"string"},"bulk_insert_chunk_size":{"type":"integer"},"creation_edge_size":{"type":"integer"},"default_epsilon":{"type":"number"},"default_pool_size":{"type":"integer"},"default_radius":{"type":"number"},"dimension":{"type":"integer","minimum":1},"distance_type":{"type":"string","enum":["l1","l2","angle","hamming","cos","cosine","normalizedangle","normalizedcosine","jaccard"]},"enable_in_memory_mode":{"type":"boolean"},"enable_proactive_gc":{"type":"boolean"},"index_path":{"type":"string"},"initial_delay_max_duration":{"type":"string"},"load_index_timeout_factor":{"type":"string"},"max_load_index_timeout":{"type":"string"},"min_load_index_timeout":{"type":"string"},"object_type":{"type":"string","enum":["float","uint8"]},"search_edge_size":{"type":"integer"}}},"nodeName":{"type":"string"},"nodeSelector":{"type":"object"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"persistentVolume":{"type":"object","properties":{"accessMode":{"type":"string"},"enabled":{"type":"boolean"},"size":{"type":"string"},"storageClass":{"type":"string"}}},"podAnnotations":{"type":"object"},"podManagementPolicy":{"type":"string","enum":["OrderedReady","Parallel"]},"podPriority":{"type":"object","properties":{"enabled":{"type":"boolean"},"value":{"type":"integer"}}},"progressDeadlineSeconds":{"type":"integer"},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"revisionHistoryLimit":{"type":"integer","minimum":0},"rollingUpdate":{"type":"object","properties":{"maxSurge":{"type":"string"},"maxUnavailable":{"type":"string"},"partition":{"type":"integer"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"labels":{"type":"object"}}},"serviceType":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]},"sidecar":{"type":"object","properties":{"config":{"type":"object","properties":{"auto_backup_duration":{"type":"string"},"auto_backup_enabled":{"type":"boolean"},"blob_storage":{"type":"object","properties":{"bucket":{"type":"string"},"s3":{"type":"object","properties":{"access_key":{"type":"string"},"enable_100_continue":{"type":"boolean"},"enable_content_md5_validation":{"type":"boolean"},"enable_endpoint_discovery":{"type":"boolean"},"enable_endpoint_host_prefix":{"type":"boolean"},"enable_param_validation":{"type":"boolean"},"enable_ssl":{"type":"boolean"},"endpoint":{"type":"string"},"force_path_style":{"type":"boolean"},"max_chunk_size":{"type":"string","pattern":"^[0-9]+(kb|mb|gb)$"},"max_part_size":{"type":"string","pattern":"^[0-9]+(kb|mb|gb)$"},"max_retries":{"type":"integer"},"region":{"type":"string"},"secret_access_key":{"type":"string"},"token":{"type":"string"},"use_accelerate":{"type":"boolean"},"use_arn_region":{"type":"boolean"},"use_dual_stack":{"type":"boolean"}}},"storage_type":{"type":"string","enum":["s3"]}}},"client":{"type":"object","properties":{"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"transport":{"type":"object","properties":{"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"round_tripper":{"type":"object","properties":{"expect_continue_timeout":{"type":"string"},"force_attempt_http_2":{"type":"boolean"},"idle_conn_timeout":{"type":"string"},"max_conns_per_host":{"type":"integer"},"max_idle_conns":{"type":"integer"},"max_idle_conns_per_host":{"type":"integer"},"max_response_header_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"response_header_timeout":{"type":"string"},"tls_handshake_timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}}}}}},"compress":{"type":"object","properties":{"compress_algorithm":{"type":"string","enum":["gob","gzip","lz4","zstd"]},"compression_level":{"type":"integer"}}},"filename":{"type":"string"},"filename_suffix":{"type":"string"},"post_stop_timeout":{"type":"string"},"restore_backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"watch_enabled":{"type":"boolean"}}},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"initContainerEnabled":{"type":"boolean"},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"name":{"type":"string"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"enabled":{"type":"boolean"},"externalTrafficPolicy":{"type":"string"},"labels":{"type":"object"},"type":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]}}},"time_zone":{"type":"string"},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"}}},"terminationGracePeriodSeconds":{"type":"integer","minimum":0},"time_zone":{"type":"string"},"tolerations":{"type":"array","items":{"type":"object"}},"topologySpreadConstraints":{"type":"array","items":{"type":"object"}},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"},"volumeMounts":{"type":"array","items":{"type":"object"}},"volumes":{"type":"array","items":{"type":"object"}}}},"backupManager":{"type":"object","properties":{"affinity":{"type":"object","properties":{"nodeAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object","properties":{"nodeSelectorTerms":{"type":"array","items":{"type":"object"}}}}}},"podAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}},"podAntiAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}}}},"annotations":{"type":"object"},"cassandra":{"type":"object","properties":{"config":{"type":"object","properties":{"connect_timeout":{"type":"string"},"consistency":{"type":"string","enum":["any","one","two","three","all","quorum","localquorum","eachquorum","localone"]},"cql_version":{"type":"string"},"default_idempotence":{"type":"boolean"},"default_timestamp":{"type":"boolean"},"disable_initial_host_lookup":{"type":"boolean"},"disable_node_status_events":{"type":"boolean"},"disable_skip_metadata":{"type":"boolean"},"disable_topology_events":{"type":"boolean"},"enable_host_verification":{"type":"boolean"},"host_filter":{"type":"object","properties":{"data_center":{"type":"string"},"enabled":{"type":"boolean"},"white_list":{"type":"array","items":{"type":"string"}}}},"hosts":{"type":"array","items":{"type":"string"}},"ignore_peer_addr":{"type":"boolean"},"keyspace":{"type":"string"},"max_prepared_stmts":{"type":"integer"},"max_routing_key_info":{"type":"integer"},"max_wait_schema_agreement":{"type":"string"},"meta_table":{"type":"string"},"num_conns":{"type":"integer"},"page_size":{"type":"integer"},"password":{"type":"string"},"pool_config":{"type":"object","properties":{"data_center":{"type":"string"},"dc_aware_routing":{"type":"boolean"},"non_local_replicas_fallback":{"type":"boolean"},"shuffle_replicas":{"type":"boolean"},"token_aware_host_policy":{"type":"boolean"}}},"port":{"type":"integer"},"proto_version":{"type":"integer"},"reconnect_interval":{"type":"string"},"reconnection_policy":{"type":"object","properties":{"initial_interval":{"type":"string"},"max_retries":{"type":"integer"}}},"retry_policy":{"type":"object","properties":{"max_duration":{"type":"string"},"min_duration":{"type":"string"},"num_retries":{"type":"integer"}}},"serial_consistency":{"type":"string","enum":["localserial","serial"]},"socket_keepalive":{"type":"string"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"username":{"type":"string"},"write_coalesce_wait_time":{"type":"string"}}},"enabled":{"type":"boolean"}}},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"externalTrafficPolicy":{"type":"string"},"hpa":{"type":"object","properties":{"enabled":{"type":"boolean"},"targetCPUUtilizationPercentage":{"type":"integer"}}},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"initContainers":{"type":"array","items":{"type":"object"}},"kind":{"type":"string","enum":["Deployment","DaemonSet"]},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"maxReplicas":{"type":"integer","minimum":0},"maxUnavailable":{"type":"string"},"minReplicas":{"type":"integer","minimum":0},"mysql":{"type":"object","properties":{"config":{"type":"object","properties":{"conn_max_life_time":{"type":"string"},"db":{"type":"string","enum":["mysql","postgres","sqlite3"]},"host":{"type":"string"},"max_idle_conns":{"type":"integer"},"max_open_conns":{"type":"integer"},"name":{"type":"string"},"pass":{"type":"string"},"port":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"user":{"type":"string"}}},"enabled":{"type":"boolean"}}},"name":{"type":"string"},"nodeName":{"type":"string"},"nodeSelector":{"type":"object"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"podAnnotations":{"type":"object"},"podPriority":{"type":"object","properties":{"enabled":{"type":"boolean"},"value":{"type":"integer"}}},"progressDeadlineSeconds":{"type":"integer"},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"revisionHistoryLimit":{"type":"integer","minimum":0},"rollingUpdate":{"type":"object","properties":{"maxSurge":{"type":"string"},"maxUnavailable":{"type":"string"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"labels":{"type":"object"}}},"serviceType":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]},"terminationGracePeriodSeconds":{"type":"integer","minimum":0},"time_zone":{"type":"string"},"tolerations":{"type":"array","items":{"type":"object"}},"topologySpreadConstraints":{"type":"array","items":{"type":"object"}},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"},"volumeMounts":{"type":"array","items":{"type":"object"}},"volumes":{"type":"array","items":{"type":"object"}}}},"compressor":{"type":"object","properties":{"affinity":{"type":"object","properties":{"nodeAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object","properties":{"nodeSelectorTerms":{"type":"array","items":{"type":"object"}}}}}},"podAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}},"podAntiAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}}}},"annotations":{"type":"object"},"backup":{"type":"object","properties":{"client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}}}},"compress":{"type":"object","properties":{"compress_algorithm":{"type":"string","enum":["gob","gzip","lz4","zstd"]},"compression_level":{"type":"integer"},"concurrent_limit":{"type":"integer"},"queue_check_duration":{"type":"string"}}},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"externalTrafficPolicy":{"type":"string"},"hpa":{"type":"object","properties":{"enabled":{"type":"boolean"},"targetCPUUtilizationPercentage":{"type":"integer"}}},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"initContainers":{"type":"array","items":{"type":"object"}},"kind":{"type":"string","enum":["Deployment","DaemonSet"]},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"maxReplicas":{"type":"integer","minimum":0},"maxUnavailable":{"type":"string"},"minReplicas":{"type":"integer","minimum":0},"name":{"type":"string"},"nodeName":{"type":"string"},"nodeSelector":{"type":"object"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"podAnnotations":{"type":"object"},"podPriority":{"type":"object","properties":{"enabled":{"type":"boolean"},"value":{"type":"integer"}}},"progressDeadlineSeconds":{"type":"integer"},"registerer":{"type":"object","properties":{"compressor":{"type":"object","properties":{"client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}}}},"concurrent_limit":{"type":"integer"},"queue_check_duration":{"type":"string"}}},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"revisionHistoryLimit":{"type":"integer","minimum":0},"rollingUpdate":{"type":"object","properties":{"maxSurge":{"type":"string"},"maxUnavailable":{"type":"string"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"labels":{"type":"object"}}},"serviceType":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]},"terminationGracePeriodSeconds":{"type":"integer","minimum":0},"time_zone":{"type":"string"},"tolerations":{"type":"array","items":{"type":"object"}},"topologySpreadConstraints":{"type":"array","items":{"type":"object"}},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"},"volumeMounts":{"type":"array","items":{"type":"object"}},"volumes":{"type":"array","items":{"type":"object"}}}},"defaults":{"type":"object","properties":{"grpc":{"type":"object","properties":{"client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}}}},"image":{"type":"object","properties":{"tag":{"type":"string"}}},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"time_zone":{"type":"string"}}},"discoverer":{"type":"object","properties":{"affinity":{"type":"object","properties":{"nodeAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object","properties":{"nodeSelectorTerms":{"type":"array","items":{"type":"object"}}}}}},"podAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}},"podAntiAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}}}},"annotations":{"type":"object"},"clusterRole":{"type":"object","properties":{"enabled":{"type":"boolean"},"name":{"type":"string"}}},"clusterRoleBinding":{"type":"object","properties":{"enabled":{"type":"boolean"},"name":{"type":"string"}}},"discoverer":{"type":"object","properties":{"discovery_duration":{"type":"string"},"name":{"type":"string"},"namespace":{"type":"string"}}},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"externalTrafficPolicy":{"type":"string"},"hpa":{"type":"object","properties":{"enabled":{"type":"boolean"},"targetCPUUtilizationPercentage":{"type":"integer"}}},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"initContainers":{"type":"array","items":{"type":"object"}},"kind":{"type":"string","enum":["Deployment","DaemonSet"]},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"maxReplicas":{"type":"integer","minimum":0},"maxUnavailable":{"type":"string"},"minReplicas":{"type":"integer","minimum":0},"name":{"type":"string"},"nodeName":{"type":"string"},"nodeSelector":{"type":"object"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"podAnnotations":{"type":"object"},"podPriority":{"type":"object","properties":{"enabled":{"type":"boolean"},"value":{"type":"integer"}}},"progressDeadlineSeconds":{"type":"integer"},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"revisionHistoryLimit":{"type":"integer","minimum":0},"rollingUpdate":{"type":"object","properties":{"maxSurge":{"type":"string"},"maxUnavailable":{"type":"string"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"labels":{"type":"object"}}},"serviceAccount":{"type":"object","properties":{"enabled":{"type":"boolean"},"name":{"type":"string"}}},"serviceType":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]},"terminationGracePeriodSeconds":{"type":"integer","minimum":0},"time_zone":{"type":"string"},"tolerations":{"type":"array","items":{"type":"object"}},"topologySpreadConstraints":{"type":"array","items":{"type":"object"}},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"},"volumeMounts":{"type":"array","items":{"type":"object"}},"volumes":{"type":"array","items":{"type":"object"}}}},"gateway":{"type":"object","properties":{"affinity":{"type":"object","properties":{"nodeAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object","properties":{"nodeSelectorTerms":{"type":"array","items":{"type":"object"}}}}}},"podAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}},"podAntiAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}}}},"annotations":{"type":"object"},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"externalTrafficPolicy":{"type":"string"},"filter":{"type":"object","properties":{"egress":{"type":"array","items":{"type":"string"}},"ingress":{"type":"array","items":{"type":"string"}}}},"gateway_config":{"type":"object","properties":{"agent_namespace":{"type":"string"},"backup":{"type":"object","properties":{"client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}}}},"discoverer":{"type":"object","properties":{"agent_client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}},"discover_client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}},"duration":{"type":"string"}}},"index_replica":{"type":"integer","minimum":1},"meta":{"type":"object","properties":{"cache_expiration":{"type":"string"},"client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}},"enable_cache":{"type":"boolean"},"expired_cache_check_duration":{"type":"string"}}},"node_namespace":{"type":"string"}}},"hpa":{"type":"object","properties":{"enabled":{"type":"boolean"},"targetCPUUtilizationPercentage":{"type":"integer"}}},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"ingress":{"type":"object","properties":{"annotations":{"type":"object"},"enabled":{"type":"boolean"},"host":{"type":"string"},"servicePort":{"type":"string"}}},"initContainers":{"type":"array","items":{"type":"object"}},"kind":{"type":"string","enum":["Deployment","DaemonSet"]},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"maxReplicas":{"type":"integer","minimum":0},"maxUnavailable":{"type":"string"},"minReplicas":{"type":"integer","minimum":0},"name":{"type":"string"},"nodeName":{"type":"string"},"nodeSelector":{"type":"object"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"podAnnotations":{"type":"object"},"podPriority":{"type":"object","properties":{"enabled":{"type":"boolean"},"value":{"type":"integer"}}},"progressDeadlineSeconds":{"type":"integer"},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"revisionHistoryLimit":{"type":"integer","minimum":0},"rollingUpdate":{"type":"object","properties":{"maxSurge":{"type":"string"},"maxUnavailable":{"type":"string"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"labels":{"type":"object"}}},"serviceType":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]},"terminationGracePeriodSeconds":{"type":"integer","minimum":0},"time_zone":{"type":"string"},"tolerations":{"type":"array","items":{"type":"object"}},"topologySpreadConstraints":{"type":"array","items":{"type":"object"}},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"},"volumeMounts":{"type":"array","items":{"type":"object"}},"volumes":{"type":"array","items":{"type":"object"}}}},"indexManager":{"type":"object","properties":{"affinity":{"type":"object","properties":{"nodeAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object","properties":{"nodeSelectorTerms":{"type":"array","items":{"type":"object"}}}}}},"podAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}},"podAntiAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}}}},"annotations":{"type":"object"},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"externalTrafficPolicy":{"type":"string"},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"indexer":{"type":"object","properties":{"agent_namespace":{"type":"string"},"auto_index_check_duration":{"type":"string"},"auto_index_duration_limit":{"type":"string"},"auto_index_length":{"type":"integer"},"concurrency":{"type":"integer","minimum":1},"creation_pool_size":{"type":"integer"},"discoverer":{"type":"object","properties":{"agent_client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}},"discover_client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}},"duration":{"type":"string"}}},"node_name":{"type":"string"}}},"initContainers":{"type":"array","items":{"type":"object"}},"kind":{"type":"string","enum":["Deployment","DaemonSet"]},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"maxUnavailable":{"type":"string"},"name":{"type":"string"},"nodeName":{"type":"string"},"nodeSelector":{"type":"object"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"podAnnotations":{"type":"object"},"podPriority":{"type":"object","properties":{"enabled":{"type":"boolean"},"value":{"type":"integer"}}},"progressDeadlineSeconds":{"type":"integer"},"replicas":{"type":"integer","minimum":0},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"revisionHistoryLimit":{"type":"integer","minimum":0},"rollingUpdate":{"type":"object","properties":{"maxSurge":{"type":"string"},"maxUnavailable":{"type":"string"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"labels":{"type":"object"}}},"serviceType":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]},"terminationGracePeriodSeconds":{"type":"integer","minimum":0},"time_zone":{"type":"string"},"tolerations":{"type":"array","items":{"type":"object"}},"topologySpreadConstraints":{"type":"array","items":{"type":"object"}},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"},"volumeMounts":{"type":"array","items":{"type":"object"}},"volumes":{"type":"array","items":{"type":"object"}}}},"meta":{"type":"object","properties":{"affinity":{"type":"object","properties":{"nodeAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object","properties":{"nodeSelectorTerms":{"type":"array","items":{"type":"object"}}}}}},"podAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}},"podAntiAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}}}},"annotations":{"type":"object"},"cassandra":{"type":"object","properties":{"config":{"type":"object","properties":{"connect_timeout":{"type":"string"},"consistency":{"type":"string","enum":["any","one","two","three","all","quorum","localquorum","eachquorum","localone"]},"cql_version":{"type":"string"},"default_idempotence":{"type":"boolean"},"default_timestamp":{"type":"boolean"},"disable_initial_host_lookup":{"type":"boolean"},"disable_node_status_events":{"type":"boolean"},"disable_skip_metadata":{"type":"boolean"},"disable_topology_events":{"type":"boolean"},"enable_host_verification":{"type":"boolean"},"host_filter":{"type":"object","properties":{"data_center":{"type":"string"},"enabled":{"type":"boolean"},"white_list":{"type":"array","items":{"type":"string"}}}},"hosts":{"type":"array","items":{"type":"string"}},"ignore_peer_addr":{"type":"boolean"},"keyspace":{"type":"string"},"max_prepared_stmts":{"type":"integer"},"max_routing_key_info":{"type":"integer"},"max_wait_schema_agreement":{"type":"string"},"meta_table":{"type":"string"},"num_conns":{"type":"integer"},"page_size":{"type":"integer"},"password":{"type":"string"},"pool_config":{"type":"object","properties":{"data_center":{"type":"string"},"dc_aware_routing":{"type":"boolean"},"non_local_replicas_fallback":{"type":"boolean"},"shuffle_replicas":{"type":"boolean"},"token_aware_host_policy":{"type":"boolean"}}},"port":{"type":"integer"},"proto_version":{"type":"integer"},"reconnect_interval":{"type":"string"},"reconnection_policy":{"type":"object","properties":{"initial_interval":{"type":"string"},"max_retries":{"type":"integer"}}},"retry_policy":{"type":"object","properties":{"max_duration":{"type":"string"},"min_duration":{"type":"string"},"num_retries":{"type":"integer"}}},"serial_consistency":{"type":"string","enum":["localserial","serial"]},"socket_keepalive":{"type":"string"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"username":{"type":"string"},"write_coalesce_wait_time":{"type":"string"}}},"enabled":{"type":"boolean"}}},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"externalTrafficPolicy":{"type":"string"},"hpa":{"type":"object","properties":{"enabled":{"type":"boolean"},"targetCPUUtilizationPercentage":{"type":"integer"}}},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"initContainers":{"type":"array","items":{"type":"object"}},"kind":{"type":"string","enum":["Deployment","DaemonSet"]},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"maxReplicas":{"type":"integer","minimum":0},"maxUnavailable":{"type":"string"},"minReplicas":{"type":"integer","minimum":0},"name":{"type":"string"},"nodeName":{"type":"string"},"nodeSelector":{"type":"object"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"podAnnotations":{"type":"object"},"podPriority":{"type":"object","properties":{"enabled":{"type":"boolean"},"value":{"type":"integer"}}},"progressDeadlineSeconds":{"type":"integer"},"redis":{"type":"object","properties":{"config":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"db":{"type":"integer"},"dial_timeout":{"type":"string"},"idle_check_frequency":{"type":"string"},"idle_timeout":{"type":"string"},"key_pref":{"type":"string"},"kv_prefix":{"type":"string"},"max_conn_age":{"type":"string"},"max_redirects":{"type":"integer"},"max_retries":{"type":"integer"},"max_retry_backoff":{"type":"string"},"min_idle_conns":{"type":"integer"},"min_retry_backoff":{"type":"string"},"password":{"type":"string"},"pool_size":{"type":"integer"},"pool_timeout":{"type":"string"},"prefix_delimiter":{"type":"string"},"read_only":{"type":"boolean"},"read_timeout":{"type":"string"},"route_by_latency":{"type":"boolean"},"route_randomly":{"type":"boolean"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"vk_prefix":{"type":"string"},"write_timeout":{"type":"string"}}},"enabled":{"type":"boolean"}}},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"revisionHistoryLimit":{"type":"integer","minimum":0},"rollingUpdate":{"type":"object","properties":{"maxSurge":{"type":"string"},"maxUnavailable":{"type":"string"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"labels":{"type":"object"}}},"serviceType":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]},"terminationGracePeriodSeconds":{"type":"integer","minimum":0},"time_zone":{"type":"string"},"tolerations":{"type":"array","items":{"type":"object"}},"topologySpreadConstraints":{"type":"array","items":{"type":"object"}},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"},"volumeMounts":{"type":"array","items":{"type":"object"}},"volumes":{"type":"array","items":{"type":"object"}}}}}} +{"$schema":"http://json-schema.org/draft-07/schema#","title":"Values","type":"object","properties":{"agent":{"type":"object","properties":{"affinity":{"type":"object","properties":{"nodeAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object","properties":{"nodeSelectorTerms":{"type":"array","items":{"type":"object"}}}}}},"podAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}},"podAntiAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}}}},"annotations":{"type":"object"},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"externalTrafficPolicy":{"type":"string"},"hpa":{"type":"object","properties":{"enabled":{"type":"boolean"},"targetCPUUtilizationPercentage":{"type":"integer"}}},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"initContainers":{"type":"array","items":{"type":"object"}},"kind":{"type":"string","enum":["StatefulSet","Deployment","DaemonSet"]},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"maxReplicas":{"type":"integer","minimum":0},"maxUnavailable":{"type":"string"},"minReplicas":{"type":"integer","minimum":0},"name":{"type":"string"},"ngt":{"type":"object","properties":{"auto_index_check_duration":{"type":"string"},"auto_index_duration_limit":{"type":"string"},"auto_index_length":{"type":"integer"},"auto_save_index_duration":{"type":"string"},"bulk_insert_chunk_size":{"type":"integer"},"creation_edge_size":{"type":"integer"},"default_epsilon":{"type":"number"},"default_pool_size":{"type":"integer"},"default_radius":{"type":"number"},"dimension":{"type":"integer","minimum":1},"distance_type":{"type":"string","enum":["l1","l2","angle","hamming","cos","cosine","normalizedangle","normalizedcosine","jaccard"]},"enable_in_memory_mode":{"type":"boolean"},"enable_proactive_gc":{"type":"boolean"},"index_path":{"type":"string"},"initial_delay_max_duration":{"type":"string"},"load_index_timeout_factor":{"type":"string"},"max_load_index_timeout":{"type":"string"},"min_load_index_timeout":{"type":"string"},"object_type":{"type":"string","enum":["float","uint8"]},"search_edge_size":{"type":"integer"}}},"nodeName":{"type":"string"},"nodeSelector":{"type":"object"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"persistentVolume":{"type":"object","properties":{"accessMode":{"type":"string"},"enabled":{"type":"boolean"},"size":{"type":"string"},"storageClass":{"type":"string"}}},"podAnnotations":{"type":"object"},"podManagementPolicy":{"type":"string","enum":["OrderedReady","Parallel"]},"podPriority":{"type":"object","properties":{"enabled":{"type":"boolean"},"value":{"type":"integer"}}},"progressDeadlineSeconds":{"type":"integer"},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"revisionHistoryLimit":{"type":"integer","minimum":0},"rollingUpdate":{"type":"object","properties":{"maxSurge":{"type":"string"},"maxUnavailable":{"type":"string"},"partition":{"type":"integer"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"labels":{"type":"object"}}},"serviceType":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]},"sidecar":{"type":"object","properties":{"config":{"type":"object","properties":{"auto_backup_duration":{"type":"string"},"auto_backup_enabled":{"type":"boolean"},"blob_storage":{"type":"object","properties":{"bucket":{"type":"string"},"s3":{"type":"object","properties":{"access_key":{"type":"string"},"enable_100_continue":{"type":"boolean"},"enable_content_md5_validation":{"type":"boolean"},"enable_endpoint_discovery":{"type":"boolean"},"enable_endpoint_host_prefix":{"type":"boolean"},"enable_param_validation":{"type":"boolean"},"enable_ssl":{"type":"boolean"},"endpoint":{"type":"string"},"force_path_style":{"type":"boolean"},"max_chunk_size":{"type":"string","pattern":"^[0-9]+(kb|mb|gb)$"},"max_part_size":{"type":"string","pattern":"^[0-9]+(kb|mb|gb)$"},"max_retries":{"type":"integer"},"region":{"type":"string"},"secret_access_key":{"type":"string"},"token":{"type":"string"},"use_accelerate":{"type":"boolean"},"use_arn_region":{"type":"boolean"},"use_dual_stack":{"type":"boolean"}}},"storage_type":{"type":"string","enum":["s3"]}}},"client":{"type":"object","properties":{"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"transport":{"type":"object","properties":{"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"round_tripper":{"type":"object","properties":{"expect_continue_timeout":{"type":"string"},"force_attempt_http_2":{"type":"boolean"},"idle_conn_timeout":{"type":"string"},"max_conns_per_host":{"type":"integer"},"max_idle_conns":{"type":"integer"},"max_idle_conns_per_host":{"type":"integer"},"max_response_header_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"response_header_timeout":{"type":"string"},"tls_handshake_timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}}}}}},"compress":{"type":"object","properties":{"compress_algorithm":{"type":"string","enum":["gob","gzip","lz4","zstd"]},"compression_level":{"type":"integer"}}},"filename":{"type":"string"},"filename_suffix":{"type":"string"},"post_stop_timeout":{"type":"string"},"restore_backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"watch_enabled":{"type":"boolean"}}},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"initContainerEnabled":{"type":"boolean"},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"name":{"type":"string"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"enabled":{"type":"boolean"},"externalTrafficPolicy":{"type":"string"},"labels":{"type":"object"},"type":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]}}},"time_zone":{"type":"string"},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"}}},"terminationGracePeriodSeconds":{"type":"integer","minimum":0},"time_zone":{"type":"string"},"tolerations":{"type":"array","items":{"type":"object"}},"topologySpreadConstraints":{"type":"array","items":{"type":"object"}},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"},"volumeMounts":{"type":"array","items":{"type":"object"}},"volumes":{"type":"array","items":{"type":"object"}}}},"backupManager":{"type":"object","properties":{"affinity":{"type":"object","properties":{"nodeAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object","properties":{"nodeSelectorTerms":{"type":"array","items":{"type":"object"}}}}}},"podAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}},"podAntiAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}}}},"annotations":{"type":"object"},"cassandra":{"type":"object","properties":{"config":{"type":"object","properties":{"connect_timeout":{"type":"string"},"consistency":{"type":"string","enum":["any","one","two","three","all","quorum","localquorum","eachquorum","localone"]},"cql_version":{"type":"string"},"default_idempotence":{"type":"boolean"},"default_timestamp":{"type":"boolean"},"disable_initial_host_lookup":{"type":"boolean"},"disable_node_status_events":{"type":"boolean"},"disable_skip_metadata":{"type":"boolean"},"disable_topology_events":{"type":"boolean"},"enable_host_verification":{"type":"boolean"},"host_filter":{"type":"object","properties":{"data_center":{"type":"string"},"enabled":{"type":"boolean"},"white_list":{"type":"array","items":{"type":"string"}}}},"hosts":{"type":"array","items":{"type":"string"}},"ignore_peer_addr":{"type":"boolean"},"keyspace":{"type":"string"},"max_prepared_stmts":{"type":"integer"},"max_routing_key_info":{"type":"integer"},"max_wait_schema_agreement":{"type":"string"},"vector_backup_table":{"type":"string"},"num_conns":{"type":"integer"},"page_size":{"type":"integer"},"password":{"type":"string"},"pool_config":{"type":"object","properties":{"data_center":{"type":"string"},"dc_aware_routing":{"type":"boolean"},"non_local_replicas_fallback":{"type":"boolean"},"shuffle_replicas":{"type":"boolean"},"token_aware_host_policy":{"type":"boolean"}}},"port":{"type":"integer"},"proto_version":{"type":"integer"},"reconnect_interval":{"type":"string"},"reconnection_policy":{"type":"object","properties":{"initial_interval":{"type":"string"},"max_retries":{"type":"integer"}}},"retry_policy":{"type":"object","properties":{"max_duration":{"type":"string"},"min_duration":{"type":"string"},"num_retries":{"type":"integer"}}},"serial_consistency":{"type":"string","enum":["localserial","serial"]},"socket_keepalive":{"type":"string"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"username":{"type":"string"},"write_coalesce_wait_time":{"type":"string"}}},"enabled":{"type":"boolean"}}},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"externalTrafficPolicy":{"type":"string"},"hpa":{"type":"object","properties":{"enabled":{"type":"boolean"},"targetCPUUtilizationPercentage":{"type":"integer"}}},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"initContainers":{"type":"array","items":{"type":"object"}},"kind":{"type":"string","enum":["Deployment","DaemonSet"]},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"maxReplicas":{"type":"integer","minimum":0},"maxUnavailable":{"type":"string"},"minReplicas":{"type":"integer","minimum":0},"mysql":{"type":"object","properties":{"config":{"type":"object","properties":{"conn_max_life_time":{"type":"string"},"db":{"type":"string","enum":["mysql","postgres","sqlite3"]},"host":{"type":"string"},"max_idle_conns":{"type":"integer"},"max_open_conns":{"type":"integer"},"name":{"type":"string"},"pass":{"type":"string"},"port":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"user":{"type":"string"}}},"enabled":{"type":"boolean"}}},"name":{"type":"string"},"nodeName":{"type":"string"},"nodeSelector":{"type":"object"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"podAnnotations":{"type":"object"},"podPriority":{"type":"object","properties":{"enabled":{"type":"boolean"},"value":{"type":"integer"}}},"progressDeadlineSeconds":{"type":"integer"},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"revisionHistoryLimit":{"type":"integer","minimum":0},"rollingUpdate":{"type":"object","properties":{"maxSurge":{"type":"string"},"maxUnavailable":{"type":"string"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"labels":{"type":"object"}}},"serviceType":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]},"terminationGracePeriodSeconds":{"type":"integer","minimum":0},"time_zone":{"type":"string"},"tolerations":{"type":"array","items":{"type":"object"}},"topologySpreadConstraints":{"type":"array","items":{"type":"object"}},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"},"volumeMounts":{"type":"array","items":{"type":"object"}},"volumes":{"type":"array","items":{"type":"object"}}}},"compressor":{"type":"object","properties":{"affinity":{"type":"object","properties":{"nodeAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object","properties":{"nodeSelectorTerms":{"type":"array","items":{"type":"object"}}}}}},"podAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}},"podAntiAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}}}},"annotations":{"type":"object"},"backup":{"type":"object","properties":{"client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}}}},"compress":{"type":"object","properties":{"compress_algorithm":{"type":"string","enum":["gob","gzip","lz4","zstd"]},"compression_level":{"type":"integer"},"concurrent_limit":{"type":"integer"},"queue_check_duration":{"type":"string"}}},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"externalTrafficPolicy":{"type":"string"},"hpa":{"type":"object","properties":{"enabled":{"type":"boolean"},"targetCPUUtilizationPercentage":{"type":"integer"}}},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"initContainers":{"type":"array","items":{"type":"object"}},"kind":{"type":"string","enum":["Deployment","DaemonSet"]},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"maxReplicas":{"type":"integer","minimum":0},"maxUnavailable":{"type":"string"},"minReplicas":{"type":"integer","minimum":0},"name":{"type":"string"},"nodeName":{"type":"string"},"nodeSelector":{"type":"object"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"podAnnotations":{"type":"object"},"podPriority":{"type":"object","properties":{"enabled":{"type":"boolean"},"value":{"type":"integer"}}},"progressDeadlineSeconds":{"type":"integer"},"registerer":{"type":"object","properties":{"compressor":{"type":"object","properties":{"client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}}}},"concurrent_limit":{"type":"integer"},"queue_check_duration":{"type":"string"}}},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"revisionHistoryLimit":{"type":"integer","minimum":0},"rollingUpdate":{"type":"object","properties":{"maxSurge":{"type":"string"},"maxUnavailable":{"type":"string"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"labels":{"type":"object"}}},"serviceType":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]},"terminationGracePeriodSeconds":{"type":"integer","minimum":0},"time_zone":{"type":"string"},"tolerations":{"type":"array","items":{"type":"object"}},"topologySpreadConstraints":{"type":"array","items":{"type":"object"}},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"},"volumeMounts":{"type":"array","items":{"type":"object"}},"volumes":{"type":"array","items":{"type":"object"}}}},"defaults":{"type":"object","properties":{"grpc":{"type":"object","properties":{"client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}}}},"image":{"type":"object","properties":{"tag":{"type":"string"}}},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"time_zone":{"type":"string"}}},"discoverer":{"type":"object","properties":{"affinity":{"type":"object","properties":{"nodeAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object","properties":{"nodeSelectorTerms":{"type":"array","items":{"type":"object"}}}}}},"podAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}},"podAntiAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}}}},"annotations":{"type":"object"},"clusterRole":{"type":"object","properties":{"enabled":{"type":"boolean"},"name":{"type":"string"}}},"clusterRoleBinding":{"type":"object","properties":{"enabled":{"type":"boolean"},"name":{"type":"string"}}},"discoverer":{"type":"object","properties":{"discovery_duration":{"type":"string"},"name":{"type":"string"},"namespace":{"type":"string"}}},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"externalTrafficPolicy":{"type":"string"},"hpa":{"type":"object","properties":{"enabled":{"type":"boolean"},"targetCPUUtilizationPercentage":{"type":"integer"}}},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"initContainers":{"type":"array","items":{"type":"object"}},"kind":{"type":"string","enum":["Deployment","DaemonSet"]},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"maxReplicas":{"type":"integer","minimum":0},"maxUnavailable":{"type":"string"},"minReplicas":{"type":"integer","minimum":0},"name":{"type":"string"},"nodeName":{"type":"string"},"nodeSelector":{"type":"object"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"podAnnotations":{"type":"object"},"podPriority":{"type":"object","properties":{"enabled":{"type":"boolean"},"value":{"type":"integer"}}},"progressDeadlineSeconds":{"type":"integer"},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"revisionHistoryLimit":{"type":"integer","minimum":0},"rollingUpdate":{"type":"object","properties":{"maxSurge":{"type":"string"},"maxUnavailable":{"type":"string"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"labels":{"type":"object"}}},"serviceAccount":{"type":"object","properties":{"enabled":{"type":"boolean"},"name":{"type":"string"}}},"serviceType":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]},"terminationGracePeriodSeconds":{"type":"integer","minimum":0},"time_zone":{"type":"string"},"tolerations":{"type":"array","items":{"type":"object"}},"topologySpreadConstraints":{"type":"array","items":{"type":"object"}},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"},"volumeMounts":{"type":"array","items":{"type":"object"}},"volumes":{"type":"array","items":{"type":"object"}}}},"gateway":{"type":"object","properties":{"affinity":{"type":"object","properties":{"nodeAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object","properties":{"nodeSelectorTerms":{"type":"array","items":{"type":"object"}}}}}},"podAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}},"podAntiAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}}}},"annotations":{"type":"object"},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"externalTrafficPolicy":{"type":"string"},"filter":{"type":"object","properties":{"egress":{"type":"array","items":{"type":"string"}},"ingress":{"type":"array","items":{"type":"string"}}}},"gateway_config":{"type":"object","properties":{"agent_namespace":{"type":"string"},"backup":{"type":"object","properties":{"client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}}}},"discoverer":{"type":"object","properties":{"agent_client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}},"discover_client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}},"duration":{"type":"string"}}},"index_replica":{"type":"integer","minimum":1},"meta":{"type":"object","properties":{"cache_expiration":{"type":"string"},"client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}},"enable_cache":{"type":"boolean"},"expired_cache_check_duration":{"type":"string"}}},"node_namespace":{"type":"string"}}},"hpa":{"type":"object","properties":{"enabled":{"type":"boolean"},"targetCPUUtilizationPercentage":{"type":"integer"}}},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"ingress":{"type":"object","properties":{"annotations":{"type":"object"},"enabled":{"type":"boolean"},"host":{"type":"string"},"servicePort":{"type":"string"}}},"initContainers":{"type":"array","items":{"type":"object"}},"kind":{"type":"string","enum":["Deployment","DaemonSet"]},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"maxReplicas":{"type":"integer","minimum":0},"maxUnavailable":{"type":"string"},"minReplicas":{"type":"integer","minimum":0},"name":{"type":"string"},"nodeName":{"type":"string"},"nodeSelector":{"type":"object"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"podAnnotations":{"type":"object"},"podPriority":{"type":"object","properties":{"enabled":{"type":"boolean"},"value":{"type":"integer"}}},"progressDeadlineSeconds":{"type":"integer"},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"revisionHistoryLimit":{"type":"integer","minimum":0},"rollingUpdate":{"type":"object","properties":{"maxSurge":{"type":"string"},"maxUnavailable":{"type":"string"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"labels":{"type":"object"}}},"serviceType":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]},"terminationGracePeriodSeconds":{"type":"integer","minimum":0},"time_zone":{"type":"string"},"tolerations":{"type":"array","items":{"type":"object"}},"topologySpreadConstraints":{"type":"array","items":{"type":"object"}},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"},"volumeMounts":{"type":"array","items":{"type":"object"}},"volumes":{"type":"array","items":{"type":"object"}}}},"indexManager":{"type":"object","properties":{"affinity":{"type":"object","properties":{"nodeAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object","properties":{"nodeSelectorTerms":{"type":"array","items":{"type":"object"}}}}}},"podAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}},"podAntiAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}}}},"annotations":{"type":"object"},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"externalTrafficPolicy":{"type":"string"},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"indexer":{"type":"object","properties":{"agent_namespace":{"type":"string"},"auto_index_check_duration":{"type":"string"},"auto_index_duration_limit":{"type":"string"},"auto_index_length":{"type":"integer"},"concurrency":{"type":"integer","minimum":1},"creation_pool_size":{"type":"integer"},"discoverer":{"type":"object","properties":{"agent_client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}},"discover_client":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"backoff":{"type":"object","properties":{"backoff_factor":{"type":"number"},"backoff_time_limit":{"type":"string"},"enable_error_log":{"type":"boolean"},"initial_duration":{"type":"string"},"jitter_limit":{"type":"string"},"maximum_duration":{"type":"string"},"retry_count":{"type":"integer"}}},"call_option":{"type":"object"},"connection_pool":{"type":"object","properties":{"enable_dns_resolver":{"type":"boolean"},"enable_rebalance":{"type":"boolean"},"old_conn_close_duration":{"type":"string"},"rebalance_duration":{"type":"string"},"size":{"type":"integer"}}},"dial_option":{"type":"object","properties":{"enable_backoff":{"type":"boolean"},"initial_connection_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"insecure":{"type":"boolean"},"keep_alive":{"type":"object","properties":{"permit_without_stream":{"type":"boolean"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_backoff_delay":{"type":"string"},"max_msg_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"write_buffer_size":{"type":"integer"}}},"health_check_duration":{"type":"string"},"max_recv_msg_size":{"type":"integer"},"max_retry_rpc_buffer_size":{"type":"integer"},"max_send_msg_size":{"type":"integer"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"wait_for_ready":{"type":"boolean"}}},"duration":{"type":"string"}}},"node_name":{"type":"string"}}},"initContainers":{"type":"array","items":{"type":"object"}},"kind":{"type":"string","enum":["Deployment","DaemonSet"]},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"maxUnavailable":{"type":"string"},"name":{"type":"string"},"nodeName":{"type":"string"},"nodeSelector":{"type":"object"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"podAnnotations":{"type":"object"},"podPriority":{"type":"object","properties":{"enabled":{"type":"boolean"},"value":{"type":"integer"}}},"progressDeadlineSeconds":{"type":"integer"},"replicas":{"type":"integer","minimum":0},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"revisionHistoryLimit":{"type":"integer","minimum":0},"rollingUpdate":{"type":"object","properties":{"maxSurge":{"type":"string"},"maxUnavailable":{"type":"string"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"labels":{"type":"object"}}},"serviceType":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]},"terminationGracePeriodSeconds":{"type":"integer","minimum":0},"time_zone":{"type":"string"},"tolerations":{"type":"array","items":{"type":"object"}},"topologySpreadConstraints":{"type":"array","items":{"type":"object"}},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"},"volumeMounts":{"type":"array","items":{"type":"object"}},"volumes":{"type":"array","items":{"type":"object"}}}},"meta":{"type":"object","properties":{"affinity":{"type":"object","properties":{"nodeAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object","properties":{"nodeSelectorTerms":{"type":"array","items":{"type":"object"}}}}}},"podAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}},"podAntiAffinity":{"type":"object","properties":{"preferredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}},"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array","items":{"type":"object"}}}}}},"annotations":{"type":"object"},"cassandra":{"type":"object","properties":{"config":{"type":"object","properties":{"connect_timeout":{"type":"string"},"consistency":{"type":"string","enum":["any","one","two","three","all","quorum","localquorum","eachquorum","localone"]},"cql_version":{"type":"string"},"default_idempotence":{"type":"boolean"},"default_timestamp":{"type":"boolean"},"disable_initial_host_lookup":{"type":"boolean"},"disable_node_status_events":{"type":"boolean"},"disable_skip_metadata":{"type":"boolean"},"disable_topology_events":{"type":"boolean"},"enable_host_verification":{"type":"boolean"},"host_filter":{"type":"object","properties":{"data_center":{"type":"string"},"enabled":{"type":"boolean"},"white_list":{"type":"array","items":{"type":"string"}}}},"hosts":{"type":"array","items":{"type":"string"}},"ignore_peer_addr":{"type":"boolean"},"keyspace":{"type":"string"},"max_prepared_stmts":{"type":"integer"},"max_routing_key_info":{"type":"integer"},"max_wait_schema_agreement":{"type":"string"},"vector_backup_table":{"type":"string"},"num_conns":{"type":"integer"},"page_size":{"type":"integer"},"password":{"type":"string"},"pool_config":{"type":"object","properties":{"data_center":{"type":"string"},"dc_aware_routing":{"type":"boolean"},"non_local_replicas_fallback":{"type":"boolean"},"shuffle_replicas":{"type":"boolean"},"token_aware_host_policy":{"type":"boolean"}}},"port":{"type":"integer"},"proto_version":{"type":"integer"},"reconnect_interval":{"type":"string"},"reconnection_policy":{"type":"object","properties":{"initial_interval":{"type":"string"},"max_retries":{"type":"integer"}}},"retry_policy":{"type":"object","properties":{"max_duration":{"type":"string"},"min_duration":{"type":"string"},"num_retries":{"type":"integer"}}},"serial_consistency":{"type":"string","enum":["localserial","serial"]},"socket_keepalive":{"type":"string"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"timeout":{"type":"string"},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"username":{"type":"string"},"write_coalesce_wait_time":{"type":"string"}}},"enabled":{"type":"boolean"}}},"enabled":{"type":"boolean"},"env":{"type":"array","items":{"type":"object"}},"externalTrafficPolicy":{"type":"string"},"hpa":{"type":"object","properties":{"enabled":{"type":"boolean"},"targetCPUUtilizationPercentage":{"type":"integer"}}},"image":{"type":"object","properties":{"pullPolicy":{"type":"string","enum":["Always","Never","IfNotPresent"]},"repository":{"type":"string"},"tag":{"type":"string"}}},"initContainers":{"type":"array","items":{"type":"object"}},"kind":{"type":"string","enum":["Deployment","DaemonSet"]},"logging":{"type":"object","properties":{"format":{"type":"string","enum":["raw","json"]},"level":{"type":"string","enum":["debug","info","warn","error","fatal"]},"logger":{"type":"string","enum":["glg"]}}},"maxReplicas":{"type":"integer","minimum":0},"maxUnavailable":{"type":"string"},"minReplicas":{"type":"integer","minimum":0},"name":{"type":"string"},"nodeName":{"type":"string"},"nodeSelector":{"type":"object"},"observability":{"type":"object","properties":{"collector":{"type":"object","properties":{"duration":{"type":"string"},"metrics":{"type":"object","properties":{"enable_cgo":{"type":"boolean"},"enable_goroutine":{"type":"boolean"},"enable_memory":{"type":"boolean"},"enable_version_info":{"type":"boolean"},"version_info_labels":{"type":"array","items":{"type":"string","enum":["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","cgo_enabled","ngt_version","build_cpu_info_flags"]}}}}}},"enabled":{"type":"boolean"},"jaeger":{"type":"object","properties":{"agent_endpoint":{"type":"string"},"buffer_max_count":{"type":"integer"},"collector_endpoint":{"type":"string"},"enabled":{"type":"boolean"},"password":{"type":"string"},"service_name":{"type":"string"},"username":{"type":"string"}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"endpoint":{"type":"string"},"namespace":{"type":"string"}}},"stackdriver":{"type":"object","properties":{"client":{"type":"object","properties":{"api_key":{"type":"string"},"audiences":{"type":"array","items":{"type":"string"}},"authentication_enabled":{"type":"boolean"},"credentials_file":{"type":"string"},"credentials_json":{"type":"string"},"endpoint":{"type":"string"},"quota_project":{"type":"string"},"request_reason":{"type":"string"},"scopes":{"type":"array","items":{"type":"string"}},"telemetry_enabled":{"type":"boolean"},"user_agent":{"type":"string"}}},"exporter":{"type":"object","properties":{"bundle_count_threshold":{"type":"integer"},"bundle_delay_threshold":{"type":"string"},"location":{"type":"string"},"metric_prefix":{"type":"string"},"monitoring_enabled":{"type":"boolean"},"number_of_workers":{"type":"integer"},"reporting_interval":{"type":"string"},"skip_cmd":{"type":"boolean"},"timeout":{"type":"string"},"trace_spans_buffer_max_bytes":{"type":"integer"},"tracing_enabled":{"type":"boolean"}}},"profiler":{"type":"object","properties":{"alloc_force_gc":{"type":"boolean"},"alloc_profiling":{"type":"boolean"},"api_addr":{"type":"string"},"cpu_profiling":{"type":"boolean"},"debug_logging":{"type":"boolean"},"enabled":{"type":"boolean"},"goroutine_profiling":{"type":"boolean"},"heap_profiling":{"type":"boolean"},"instance":{"type":"string"},"mutex_profiling":{"type":"boolean"},"service":{"type":"string"},"service_version":{"type":"string"},"zone":{"type":"string"}}},"project_id":{"type":"string"}}},"trace":{"type":"object","properties":{"enabled":{"type":"boolean"},"sampling_rate":{"type":"number"}}}}},"podAnnotations":{"type":"object"},"podPriority":{"type":"object","properties":{"enabled":{"type":"boolean"},"value":{"type":"integer"}}},"progressDeadlineSeconds":{"type":"integer"},"redis":{"type":"object","properties":{"config":{"type":"object","properties":{"addrs":{"type":"array","items":{"type":"string"}},"db":{"type":"integer"},"dial_timeout":{"type":"string"},"idle_check_frequency":{"type":"string"},"idle_timeout":{"type":"string"},"key_pref":{"type":"string"},"kv_prefix":{"type":"string"},"max_conn_age":{"type":"string"},"max_redirects":{"type":"integer"},"max_retries":{"type":"integer"},"max_retry_backoff":{"type":"string"},"min_idle_conns":{"type":"integer"},"min_retry_backoff":{"type":"string"},"password":{"type":"string"},"pool_size":{"type":"integer"},"pool_timeout":{"type":"string"},"prefix_delimiter":{"type":"string"},"read_only":{"type":"boolean"},"read_timeout":{"type":"string"},"route_by_latency":{"type":"boolean"},"route_randomly":{"type":"boolean"},"tcp":{"type":"object","properties":{"dialer":{"type":"object","properties":{"dual_stack_enabled":{"type":"boolean"},"keep_alive":{"type":"string"},"timeout":{"type":"string"}}},"dns":{"type":"object","properties":{"cache_enabled":{"type":"boolean"},"cache_expiration":{"type":"string"},"refresh_duration":{"type":"string"}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}},"vk_prefix":{"type":"string"},"write_timeout":{"type":"string"}}},"enabled":{"type":"boolean"}}},"resources":{"type":"object","properties":{"limits":{"type":"object"},"requests":{"type":"object"}}},"revisionHistoryLimit":{"type":"integer","minimum":0},"rollingUpdate":{"type":"object","properties":{"maxSurge":{"type":"string"},"maxUnavailable":{"type":"string"}}},"server_config":{"type":"object","properties":{"full_shutdown_duration":{"type":"string"},"healths":{"type":"object","properties":{"liveness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"livenessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timetoutSeconds":{"type":"integer"}}},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"readiness":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"readinessProbe":{"type":"object","properties":{"failureThreshold":{"type":"integer"},"httpGet":{"type":"object","properties":{"path":{"type":"string"},"port":{"type":"string"},"scheme":{"type":"string"}}},"initialDelaySeconds":{"type":"integer"},"periodSeconds":{"type":"integer"},"successThreshold":{"type":"integer"},"timeoutSeconds":{"type":"integer"}}},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"metrics":{"type":"object","properties":{"pprof":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"prometheus":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"http":{"type":"object","properties":{"handler_timeout":{"type":"string"},"idle_timeout":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"servers":{"type":"object","properties":{"grpc":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"grpc":{"type":"object","properties":{"bidirectional_stream_concurrency":{"type":"integer"},"connection_timeout":{"type":"string"},"header_table_size":{"type":"integer"},"initial_conn_window_size":{"type":"integer"},"initial_window_size":{"type":"integer"},"interceptors":{"type":"array"},"keepalive":{"type":"object","properties":{"max_conn_age":{"type":"string"},"max_conn_age_grace":{"type":"string"},"max_conn_idle":{"type":"string"},"time":{"type":"string"},"timeout":{"type":"string"}}},"max_header_list_size":{"type":"integer"},"max_receive_message_size":{"type":"integer"},"max_send_message_size":{"type":"integer"},"read_buffer_size":{"type":"integer"},"write_buffer_size":{"type":"integer"}}},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"restart":{"type":"boolean"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}},"rest":{"type":"object","properties":{"enabled":{"type":"boolean"},"host":{"type":"string"},"port":{"type":"integer","maximum":65535,"minimum":0},"server":{"type":"object","properties":{"handler_timeout":{"type":"string"},"http":{"type":"object"},"idle_timeout":{"type":"string"},"mode":{"type":"string"},"probe_wait_time":{"type":"string"},"read_header_timeout":{"type":"string"},"read_timeout":{"type":"string"},"shutdown_duration":{"type":"string"},"write_timeout":{"type":"string"}}},"servicePort":{"type":"integer","maximum":65535,"minimum":0}}}}},"tls":{"type":"object","properties":{"ca":{"type":"string"},"cert":{"type":"string"},"enabled":{"type":"boolean"},"key":{"type":"string"}}}}},"service":{"type":"object","properties":{"annotations":{"type":"object"},"labels":{"type":"object"}}},"serviceType":{"type":"string","enum":["ClusterIP","LoadBalancer","NodePort"]},"terminationGracePeriodSeconds":{"type":"integer","minimum":0},"time_zone":{"type":"string"},"tolerations":{"type":"array","items":{"type":"object"}},"topologySpreadConstraints":{"type":"array","items":{"type":"object"}},"version":{"type":"string","pattern":"^v[0-9]+\\.[0-9]+\\.[0-9]$"},"volumeMounts":{"type":"array","items":{"type":"object"}},"volumes":{"type":"array","items":{"type":"object"}}}}}} diff --git a/charts/vald/values.yaml b/charts/vald/values.yaml index 4479b206fa..7ffb9bd9c6 100644 --- a/charts/vald/values.yaml +++ b/charts/vald/values.yaml @@ -755,285 +755,1212 @@ defaults: # @schema {"name": "gateway", "type": "object"} gateway: - # @schema {"name": "gateway.enabled", "type": "boolean"} - # gateway.enabled -- gateway enabled - enabled: true - # @schema {"name": "gateway.version", "type": "string", "pattern": "^v[0-9]+\\.[0-9]+\\.[0-9]$", "anchor": "version"} - # gateway.version -- version of gateway config - version: v0.0.0 - # @schema {"name": "gateway.time_zone", "type": "string"} - # gateway.time_zone -- Time zone - time_zone: "" - # @schema {"name": "gateway.logging", "alias": "logging"} - # gateway.logging -- logging config (overrides defaults.logging) - logging: {} - # @schema {"name": "gateway.name", "type": "string"} - # gateway.name -- name of gateway deployment - name: vald-gateway - # @schema {"name": "gateway.kind", "type": "string", "enum": ["Deployment", "DaemonSet"]} - # gateway.kind -- deployment kind: Deployment or DaemonSet - kind: Deployment - # @schema {"name": "gateway.serviceType", "type": "string", "enum": ["ClusterIP", "LoadBalancer", "NodePort"]} - # gateway.serviceType -- service type: ClusterIP, LoadBalancer or NodePort - serviceType: ClusterIP - # @schema {"name": "gateway.externalTrafficPolicy", "type": "string"} - # gateway.externalTrafficPolicy -- external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local - externalTrafficPolicy: "" - # @schema {"name": "gateway.progressDeadlineSeconds", "type": "integer"} - # gateway.progressDeadlineSeconds -- progress deadline seconds - progressDeadlineSeconds: 600 - # @schema {"name": "gateway.minReplicas", "type": "integer", "minimum": 0} - # gateway.minReplicas -- minimum number of replicas. - # if HPA is disabled, the replicas will be set to this value - minReplicas: 3 - # @schema {"name": "gateway.maxReplicas", "type": "integer", "minimum": 0} - # gateway.maxReplicas -- maximum number of replicas. - # if HPA is disabled, this value will be ignored. - maxReplicas: 9 - # @schema {"name": "gateway.maxUnavailable", "type": "string"} - # gateway.maxUnavailable -- maximum number of unavailable replicas - maxUnavailable: 50% - # @schema {"name": "gateway.revisionHistoryLimit", "type": "integer", "minimum": 0} - # gateway.revisionHistoryLimit -- number of old history to retain to allow rollback - revisionHistoryLimit: 2 - # @schema {"name": "gateway.terminationGracePeriodSeconds", "type": "integer", "minimum": 0} - # gateway.terminationGracePeriodSeconds -- duration in seconds pod needs to terminate gracefully - terminationGracePeriodSeconds: 30 - # @schema {"name": "gateway.podPriority", "type": "object", "anchor": "podPriority"} - podPriority: - # @schema {"name": "gateway.podPriority.enabled", "type": "boolean"} - # gateway.podPriority.enabled -- gateway pod PriorityClass enabled + # @schema {"name": "gateway.vald", "type": "object"} + vald: + # @schema {"name": "gateway.vald.enabled", "type": "boolean"} + # gateway.vald.enabled -- gateway enabled enabled: true - # @schema {"name": "gateway.podPriority.value", "type": "integer"} - # gateway.podPriority.value -- gateway pod PriorityClass value - value: 1000000 - # @schema {"name": "gateway.annotations", "type": "object"} - # gateway.annotations -- deployment annotations - annotations: {} - # @schema {"name": "gateway.podAnnotations", "type": "object"} - # gateway.podAnnotations -- pod annotations - podAnnotations: {} - # @schema {"name": "gateway.service", "type": "object", "anchor": "service"} - service: - # @schema {"name": "gateway.service.annotations", "type": "object"} - # gateway.service.annotations -- service annotations + # @schema {"name": "gateway.vald.version", "type": "string", "pattern": "^v[0-9]+\\.[0-9]+\\.[0-9]$", "anchor": "version"} + # gateway.vald.version -- version of gateway config + version: v0.0.0 + # @schema {"name": "gateway.vald.time_zone", "type": "string"} + # gateway.vald.time_zone -- Time zone + time_zone: "" + # @schema {"name": "gateway.vald.logging", "alias": "logging"} + # gateway.vald.logging -- logging config (overrides defaults.logging) + logging: {} + # @schema {"name": "gateway.vald.name", "type": "string"} + # gateway.vald.name -- name of gateway deployment + name: vald-gateway + # @schema {"name": "gateway.vald.kind", "type": "string", "enum": ["Deployment", "DaemonSet"]} + # gateway.vald.kind -- deployment kind: Deployment or DaemonSet + kind: Deployment + # @schema {"name": "gateway.vald.serviceType", "type": "string", "enum": ["ClusterIP", "LoadBalancer", "NodePort"]} + # gateway.vald.serviceType -- service type: ClusterIP, LoadBalancer or NodePort + serviceType: ClusterIP + # @schema {"name": "gateway.vald.externalTrafficPolicy", "type": "string"} + # gateway.vald.externalTrafficPolicy -- external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local + externalTrafficPolicy: "" + # @schema {"name": "gateway.vald.progressDeadlineSeconds", "type": "integer"} + # gateway.vald.progressDeadlineSeconds -- progress deadline seconds + progressDeadlineSeconds: 600 + # @schema {"name": "gateway.vald.minReplicas", "type": "integer", "minimum": 0} + # gateway.vald.minReplicas -- minimum number of replicas. + # if HPA is disabled, the replicas will be set to this value + minReplicas: 3 + # @schema {"name": "gateway.vald.maxReplicas", "type": "integer", "minimum": 0} + # gateway.vald.maxReplicas -- maximum number of replicas. + # if HPA is disabled, this value will be ignored. + maxReplicas: 9 + # @schema {"name": "gateway.vald.maxUnavailable", "type": "string"} + # gateway.vald.maxUnavailable -- maximum number of unavailable replicas + maxUnavailable: 50% + # @schema {"name": "gateway.vald.revisionHistoryLimit", "type": "integer", "minimum": 0} + # gateway.vald.revisionHistoryLimit -- number of old history to retain to allow rollback + revisionHistoryLimit: 2 + # @schema {"name": "gateway.vald.terminationGracePeriodSeconds", "type": "integer", "minimum": 0} + # gateway.vald.terminationGracePeriodSeconds -- duration in seconds pod needs to terminate gracefully + terminationGracePeriodSeconds: 30 + # @schema {"name": "gateway.vald.podPriority", "type": "object", "anchor": "podPriority"} + podPriority: + # @schema {"name": "gateway.vald.podPriority.enabled", "type": "boolean"} + # gateway.vald.podPriority.enabled -- gateway pod PriorityClass enabled + enabled: true + # @schema {"name": "gateway.vald.podPriority.value", "type": "integer"} + # gateway.vald.podPriority.value -- gateway pod PriorityClass value + value: 1000000 + # @schema {"name": "gateway.vald.annotations", "type": "object"} + # gateway.vald.annotations -- deployment annotations annotations: {} - # @schema {"name": "gateway.service.labels", "type": "object"} - # gateway.service.labels -- service labels - labels: {} - # @schema {"name": "gateway.hpa", "type": "object", "anchor": "hpa"} - hpa: - # @schema {"name": "gateway.hpa.enabled", "type": "boolean"} - # gateway.hpa.enabled -- HPA enabled - enabled: true - # @schema {"name": "gateway.hpa.targetCPUUtilizationPercentage", "type": "integer"} - # gateway.hpa.targetCPUUtilizationPercentage -- HPA CPU utilization percentage - targetCPUUtilizationPercentage: 80 - # @schema {"name": "gateway.image", "type": "object", "anchor": "image"} - image: - # @schema {"name": "gateway.image.repository", "type": "string"} - # gateway.image.repository -- image repository - repository: vdaas/vald-gateway - # @schema {"name": "gateway.image.tag", "type": "string"} - # gateway.image.tag -- image tag (overrides defaults.image.tag) - tag: "" - # @schema {"name": "gateway.image.pullPolicy", "type": "string", "enum": ["Always", "Never", "IfNotPresent"]} - # gateway.image.pullPolicy -- image pull policy - pullPolicy: Always - # @schema {"name": "gateway.rollingUpdate", "type": "object", "anchor": "rollingUpdate"} - rollingUpdate: - # @schema {"name": "gateway.rollingUpdate.maxSurge", "type": "string"} - # gateway.rollingUpdate.maxSurge -- max surge of rolling update - maxSurge: 25% - # @schema {"name": "gateway.rollingUpdate.maxUnavailable", "type": "string"} - # gateway.rollingUpdate.maxUnavailable -- max unavailable of rolling update - maxUnavailable: 25% - # @schema {"name": "gateway.initContainers", "type": "array", "items": {"type": "object"}, "anchor": "initContainers"} - # gateway.initContainers -- init containers - initContainers: - - type: wait-for - name: wait-for-manager-compressor - target: compressor - image: busybox - sleepDuration: 2 - - type: wait-for - name: wait-for-meta - target: meta - image: busybox - sleepDuration: 2 - - type: wait-for - name: wait-for-discoverer - target: discoverer - image: busybox - sleepDuration: 2 - - type: wait-for - name: wait-for-agent - target: agent - image: busybox - sleepDuration: 2 - # @schema {"name": "gateway.env", "type": "array", "items": {"type": "object"}, "anchor": "env"} - # gateway.env -- environment variables - env: - # - name: MY_NODE_NAME - # valueFrom: - # fieldRef: - # fieldPath: spec.nodeName - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - # @schema {"name": "gateway.volumeMounts", "type": "array", "items": {"type": "object"}, "anchor": "volumeMounts"} - # gateway.volumeMounts -- volume mounts - volumeMounts: [] - # @schema {"name": "gateway.volumes", "type": "array", "items": {"type": "object"}, "anchor": "volumes"} - # gateway.volumes -- volumes - volumes: [] - # @schema {"name": "gateway.nodeName", "type": "string"} - # gateway.nodeName -- node name - nodeName: "" - # @schema {"name": "gateway.nodeSelector", "type": "object", "anchor": "nodeSelector"} - # gateway.nodeSelector -- node selector - nodeSelector: {} - # @schema {"name": "gateway.tolerations", "type": "array", "items": {"type": "object"}, "anchor": "tolerations"} - # gateway.tolerations -- tolerations - tolerations: [] - # @schema {"name": "gateway.affinity", "type": "object", "anchor": "affinity"} - affinity: - # @schema {"name": "gateway.affinity.nodeAffinity", "type": "object"} - nodeAffinity: - # @schema {"name": "gateway.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} - # gateway.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms - preferredDuringSchedulingIgnoredDuringExecution: [] - # @schema {"name": "gateway.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "object"} - requiredDuringSchedulingIgnoredDuringExecution: - # @schema {"name": "gateway.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms", "type": "array", "items": {"type": "object"}} - # gateway.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors - nodeSelectorTerms: [] - # @schema {"name": "gateway.affinity.podAffinity", "type": "object"} - podAffinity: - # @schema {"name": "gateway.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} - # gateway.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms - preferredDuringSchedulingIgnoredDuringExecution: [] - # @schema {"name": "gateway.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} - # gateway.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms - requiredDuringSchedulingIgnoredDuringExecution: [] - # @schema {"name": "gateway.affinity.podAntiAffinity", "type": "object"} - podAntiAffinity: - # @schema {"name": "gateway.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} - # gateway.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - topologyKey: kubernetes.io/hostname - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vald-gateway - # @schema {"name": "gateway.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} - # gateway.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms - requiredDuringSchedulingIgnoredDuringExecution: [] - # @schema {"name": "gateway.topologySpreadConstraints", "type": "array", "items": {"type": "object"}, "anchor": "topologySpreadConstraints"} - # gateway.topologySpreadConstraints -- topology spread constraints of gateway pods - topologySpreadConstraints: [] - # @schema {"name": "gateway.server_config", "alias": "server_config"} - # gateway.server_config -- server config (overrides defaults.server_config) - server_config: - servers: - rest: {} - grpc: {} - healths: - liveness: {} - readiness: {} - metrics: - pprof: {} - prometheus: {} - # @schema {"name": "gateway.observability", "alias": "observability"} - # gateway.observability -- observability config (overrides defaults.observability) - observability: - jaeger: - service_name: vald-gateway - stackdriver: - profiler: - service: vald-gateway + # @schema {"name": "gateway.vald.podAnnotations", "type": "object"} + # gateway.vald.podAnnotations -- pod annotations + podAnnotations: {} + # @schema {"name": "gateway.vald.service", "type": "object", "anchor": "service"} + service: + # @schema {"name": "gateway.vald.service.annotations", "type": "object"} + # gateway.vald.service.annotations -- service annotations + annotations: {} + # @schema {"name": "gateway.vald.service.labels", "type": "object"} + # gateway.vald.service.labels -- service labels + labels: {} + # @schema {"name": "gateway.vald.hpa", "type": "object", "anchor": "hpa"} + hpa: + # @schema {"name": "gateway.vald.hpa.enabled", "type": "boolean"} + # gateway.vald.hpa.enabled -- HPA enabled + enabled: true + # @schema {"name": "gateway.vald.hpa.targetCPUUtilizationPercentage", "type": "integer"} + # gateway.vald.hpa.targetCPUUtilizationPercentage -- HPA CPU utilization percentage + targetCPUUtilizationPercentage: 80 + # @schema {"name": "gateway.vald.image", "type": "object", "anchor": "image"} + image: + # @schema {"name": "gateway.vald.image.repository", "type": "string"} + # gateway.vald.image.repository -- image repository + repository: vdaas/vald-gateway + # @schema {"name": "gateway.vald.image.tag", "type": "string"} + # gateway.vald.image.tag -- image tag (overrides defaults.image.tag) + tag: "" + # @schema {"name": "gateway.vald.image.pullPolicy", "type": "string", "enum": ["Always", "Never", "IfNotPresent"]} + # gateway.vald.image.pullPolicy -- image pull policy + pullPolicy: Always + # @schema {"name": "gateway.vald.rollingUpdate", "type": "object", "anchor": "rollingUpdate"} + rollingUpdate: + # @schema {"name": "gateway.vald.rollingUpdate.maxSurge", "type": "string"} + # gateway.vald.rollingUpdate.maxSurge -- max surge of rolling update + maxSurge: 25% + # @schema {"name": "gateway.vald.rollingUpdate.maxUnavailable", "type": "string"} + # gateway.vald.rollingUpdate.maxUnavailable -- max unavailable of rolling update + maxUnavailable: 25% + # @schema {"name": "gateway.vald.initContainers", "type": "array", "items": {"type": "object"}, "anchor": "initContainers"} + # gateway.vald.initContainers -- init containers + initContainers: + - type: wait-for + name: wait-for-manager-compressor + target: compressor + image: busybox + sleepDuration: 2 + - type: wait-for + name: wait-for-meta + target: meta + image: busybox + sleepDuration: 2 + - type: wait-for + name: wait-for-discoverer + target: discoverer + image: busybox + sleepDuration: 2 + - type: wait-for + name: wait-for-agent + target: agent + image: busybox + sleepDuration: 2 + # @schema {"name": "gateway.vald.env", "type": "array", "items": {"type": "object"}, "anchor": "env"} + # gateway.vald.env -- environment variables + env: + # - name: MY_NODE_NAME + # valueFrom: + # fieldRef: + # fieldPath: spec.nodeName + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # @schema {"name": "gateway.vald.volumeMounts", "type": "array", "items": {"type": "object"}, "anchor": "volumeMounts"} + # gateway.vald.volumeMounts -- volume mounts + volumeMounts: [] + # @schema {"name": "gateway.vald.volumes", "type": "array", "items": {"type": "object"}, "anchor": "volumes"} + # gateway.vald.volumes -- volumes + volumes: [] + # @schema {"name": "gateway.vald.nodeName", "type": "string"} + # gateway.vald.nodeName -- node name + nodeName: "" + # @schema {"name": "gateway.vald.nodeSelector", "type": "object", "anchor": "nodeSelector"} + # gateway.vald.nodeSelector -- node selector + nodeSelector: {} + # @schema {"name": "gateway.vald.tolerations", "type": "array", "items": {"type": "object"}, "anchor": "tolerations"} + # gateway.vald.tolerations -- tolerations + tolerations: [] + # @schema {"name": "gateway.vald.affinity", "type": "object", "anchor": "affinity"} + affinity: + # @schema {"name": "gateway.vald.affinity.nodeAffinity", "type": "object"} + nodeAffinity: + # @schema {"name": "gateway.vald.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.vald.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.vald.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "object"} + requiredDuringSchedulingIgnoredDuringExecution: + # @schema {"name": "gateway.vald.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms", "type": "array", "items": {"type": "object"}} + # gateway.vald.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors + nodeSelectorTerms: [] + # @schema {"name": "gateway.vald.affinity.podAffinity", "type": "object"} + podAffinity: + # @schema {"name": "gateway.vald.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.vald.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.vald.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.vald.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.vald.affinity.podAntiAffinity", "type": "object"} + podAntiAffinity: + # @schema {"name": "gateway.vald.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.vald.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vald-gateway + # @schema {"name": "gateway.vald.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.vald.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.vald.topologySpreadConstraints", "type": "array", "items": {"type": "object"}, "anchor": "topologySpreadConstraints"} + # gateway.vald.topologySpreadConstraints -- topology spread constraints of gateway pods + topologySpreadConstraints: [] + # @schema {"name": "gateway.vald.server_config", "alias": "server_config"} + # gateway.vald.server_config -- server config (overrides defaults.server_config) + server_config: + servers: + rest: {} + grpc: {} + healths: + liveness: {} + readiness: {} + metrics: + pprof: {} + prometheus: {} + # @schema {"name": "gateway.vald.observability", "alias": "observability"} + # gateway.vald.observability -- observability config (overrides defaults.observability) + observability: + jaeger: + service_name: vald-gateway + stackdriver: + profiler: + service: vald-gateway + # @schema {"name": "gateway.vald.ingress", "type": "object"} + ingress: + # @schema {"name": "gateway.vald.ingress.enabled", "type": "boolean"} + # gateway.vald.ingress.enabled -- gateway ingress enabled + enabled: true + # @schema {"name": "gateway.vald.ingress.annotations", "type": "object"} + # gateway.vald.ingress.annotations -- annotations for ingress + annotations: + nginx.ingress.kubernetes.io/grpc-backend: "true" + # @schema {"name": "gateway.vald.ingress.host", "type": "string"} + # gateway.vald.ingress.host -- ingress hostname + host: vald.gateway.vald.vdaas.org + # @schema {"name": "gateway.vald.ingress.servicePort", "type": "string"} + # gateway.vald.ingress.servicePort -- service port to be exposed by ingress + servicePort: grpc + # @schema {"name": "gateway.vald.resources", "type": "object", "anchor": "resources"} + # gateway.vald.resources -- compute resources + resources: + # @schema {"name": "gateway.vald.resources.requests", "type": "object"} + requests: + cpu: 200m + memory: 150Mi + # @schema {"name": "gateway.vald.resources.limits", "type": "object"} + limits: + cpu: 2000m + memory: 700Mi + # @schema {"name": "gateway.vald.gateway_config", "type": "object"} + gateway_config: + # @schema {"name": "gateway.vald.gateway_config.agent_namespace", "type": "string"} + # gateway.vald.gateway_config.agent_namespace -- agent namespace + agent_namespace: _MY_POD_NAMESPACE_ + # @schema {"name": "gateway.vald.gateway_config.node_namespace", "type": "string"} + # gateway.vald.gateway_config.node_name -- node name + node_name: "" # _MY_NODE_NAME_ + # @schema {"name": "gateway.vald.gateway_config.index_replica", "type": "integer", "minimum": 1} + # gateway.vald.gateway_config.index_replica -- number of index replica + index_replica: 5 + # @schema {"name": "gateway.vald.gateway_config.discoverer", "type": "object"} + discoverer: + # @schema {"name": "gateway.vald.gateway_config.discoverer.duration", "type": "string"} + # gateway.vald.gateway_config.discoverer.duration -- discoverer duration + duration: 200ms + # @schema {"name": "gateway.vald.gateway_config.discoverer.discover_client", "alias": "grpc.client"} + # gateway.vald.gateway_config.discoverer.discover_client -- gRPC client for discoverer (overrides defaults.grpc.client) + discover_client: {} + # @schema {"name": "gateway.vald.gateway_config.discoverer.agent_client", "alias": "grpc.client"} + # gateway.vald.gateway_config.discoverer.agent_client -- gRPC client for agents (overrides defaults.grpc.client) + agent_client: {} + # @schema {"name": "gateway.vald.gateway_config.meta", "type": "object"} + meta: + # @schema {"name": "gateway.vald.gateway_config.meta.client", "alias": "grpc.client"} + # gateway.vald.gateway_config.meta.client -- gRPC client for meta (overrides defaults.grpc.client) + client: {} + # @schema {"name": "gateway.vald.gateway_config.meta.enable_cache", "type": "boolean"} + # gateway.vald.gateway_config.meta.enable_cache -- meta cache enabled + enable_cache: true + # @schema {"name": "gateway.vald.gateway_config.meta.cache_expiration", "type": "string"} + # gateway.vald.gateway_config.meta.cache_expiration -- meta cache expire duration + cache_expiration: "30m" + # @schema {"name": "gateway.vald.gateway_config.meta.expired_cache_check_duration", "type": "string"} + # gateway.vald.gateway_config.meta.expired_cache_check_duration -- meta cache expired check duration + expired_cache_check_duration: "3m" + # @schema {"name": "gateway.vald.gateway_config.backup", "type": "object"} + backup: + # @schema {"name": "gateway.vald.gateway_config.backup.client", "alias": "grpc.client"} + # gateway.vald.gateway_config.backup.client -- gRPC client for backup (overrides defaults.grpc.client) + client: {} + # @schema {"name": "gateway.backup", "type": "object"} + backup: + # @schema {"name": "gateway.backup.enabled", "type": "boolean"} + # gateway.backup.enabled -- gateway enabled + enabled: false + # @schema {"name": "gateway.backup.version", "type": "string", "pattern": "^v[0-9]+\\.[0-9]+\\.[0-9]$", "anchor": "version"} + # gateway.backup.version -- version of gateway config + version: v0.0.0 + # @schema {"name": "gateway.backup.time_zone", "type": "string"} + # gateway.backup.time_zone -- Time zone + time_zone: "" + # @schema {"name": "gateway.backup.logging", "alias": "logging"} + # gateway.backup.logging -- logging config (overrides defaults.logging) + logging: {} + # @schema {"name": "gateway.backup.name", "type": "string"} + # gateway.backup.name -- name of backup gateway deployment + name: vald-gateway-backup + # @schema {"name": "gateway.backup.kind", "type": "string", "enum": ["Deployment", "DaemonSet"]} + # gateway.backup.kind -- deployment kind: Deployment or DaemonSet + kind: Deployment + # @schema {"name": "gateway.backup.serviceType", "type": "string", "enum": ["ClusterIP", "LoadBalancer", "NodePort"]} + # gateway.backup.serviceType -- service type: ClusterIP, LoadBalancer or NodePort + serviceType: ClusterIP + # @schema {"name": "gateway.backup.externalTrafficPolicy", "type": "string"} + # gateway.backup.externalTrafficPolicy -- external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local + externalTrafficPolicy: "" + # @schema {"name": "gateway.backup.progressDeadlineSeconds", "type": "integer"} + # gateway.backup.progressDeadlineSeconds -- progress deadline seconds + progressDeadlineSeconds: 600 + # @schema {"name": "gateway.backup.minReplicas", "type": "integer", "minimum": 0} + # gateway.backup.minReplicas -- minimum number of replicas. + # if HPA is disabled, the replicas will be set to this value + minReplicas: 3 + # @schema {"name": "gateway.backup.maxReplicas", "type": "integer", "minimum": 0} + # gateway.backup.maxReplicas -- maximum number of replicas. + # if HPA is disabled, this value will be ignored. + maxReplicas: 9 + # @schema {"name": "gateway.backup.maxUnavailable", "type": "string"} + # gateway.backup.maxUnavailable -- maximum number of unavailable replicas + maxUnavailable: 50% + # @schema {"name": "gateway.backup.revisionHistoryLimit", "type": "integer", "minimum": 0} + # gateway.backup.revisionHistoryLimit -- number of old history to retain to allow rollback + revisionHistoryLimit: 2 + # @schema {"name": "gateway.backup.terminationGracePeriodSeconds", "type": "integer", "minimum": 0} + # gateway.backup.terminationGracePeriodSeconds -- duration in seconds pod needs to terminate gracefully + terminationGracePeriodSeconds: 30 + # @schema {"name": "gateway.backup.podPriority", "type": "object", "anchor": "podPriority"} + podPriority: + # @schema {"name": "gateway.backup.podPriority.enabled", "type": "boolean"} + # gateway.backup.podPriority.enabled -- gateway pod PriorityClass enabled + enabled: true + # @schema {"name": "gateway.backup.podPriority.value", "type": "integer"} + # gateway.backup.podPriority.value -- gateway pod PriorityClass value + value: 1000000 + # @schema {"name": "gateway.backup.annotations", "type": "object"} + # gateway.backup.annotations -- deployment annotations + annotations: {} + # @schema {"name": "gateway.backup.podAnnotations", "type": "object"} + # gateway.backup.podAnnotations -- pod annotations + podAnnotations: {} + # @schema {"name": "gateway.backup.service", "type": "object", "anchor": "service"} + service: + # @schema {"name": "gateway.backup.service.annotations", "type": "object"} + # gateway.backup.service.annotations -- service annotations + annotations: {} + # @schema {"name": "gateway.backup.service.labels", "type": "object"} + # gateway.backup.service.labels -- service labels + labels: {} + # @schema {"name": "gateway.backup.hpa", "type": "object", "anchor": "hpa"} + hpa: + # @schema {"name": "gateway.backup.hpa.enabled", "type": "boolean"} + # gateway.backup.hpa.enabled -- HPA enabled + enabled: true + # @schema {"name": "gateway.backup.hpa.targetCPUUtilizationPercentage", "type": "integer"} + # gateway.backup.hpa.targetCPUUtilizationPercentage -- HPA CPU utilization percentage + targetCPUUtilizationPercentage: 80 + # @schema {"name": "gateway.backup.image", "type": "object", "anchor": "image"} + image: + # @schema {"name": "gateway.backup.image.repository", "type": "string"} + # gateway.backup.image.repository -- image repository + repository: vdaas/vald-gateway-backup + # @schema {"name": "gateway.backup.image.tag", "type": "string"} + # gateway.backup.image.tag -- image tag (overrides defaults.image.tag) + tag: "" + # @schema {"name": "gateway.backup.image.pullPolicy", "type": "string", "enum": ["Always", "Never", "IfNotPresent"]} + # gateway.backup.image.pullPolicy -- image pull policy + pullPolicy: Always + # @schema {"name": "gateway.backup.rollingUpdate", "type": "object", "anchor": "rollingUpdate"} + rollingUpdate: + # @schema {"name": "gateway.backup.rollingUpdate.maxSurge", "type": "string"} + # gateway.backup.rollingUpdate.maxSurge -- max surge of rolling update + maxSurge: 25% + # @schema {"name": "gateway.backup.rollingUpdate.maxUnavailable", "type": "string"} + # gateway.backup.rollingUpdate.maxUnavailable -- max unavailable of rolling update + maxUnavailable: 25% + # @schema {"name": "gateway.backup.initContainers", "type": "array", "items": {"type": "object"}, "anchor": "initContainers"} + # gateway.backup.initContainers -- init containers + initContainers: + - type: wait-for + name: wait-for-manager-compressor + target: compressor + image: busybox + sleepDuration: 2 + - type: wait-for + name: wait-for-gateway-lb + target: gateway-lb + image: busybox + sleepDuration: 2 + # @schema {"name": "gateway.backup.env", "type": "array", "items": {"type": "object"}, "anchor": "env"} + # gateway.backup.env -- environment variables + env: [] + # @schema {"name": "gateway.backup.volumeMounts", "type": "array", "items": {"type": "object"}, "anchor": "volumeMounts"} + # gateway.backup.volumeMounts -- volume mounts + volumeMounts: [] + # @schema {"name": "gateway.backup.volumes", "type": "array", "items": {"type": "object"}, "anchor": "volumes"} + # gateway.backup.volumes -- volumes + volumes: [] + # @schema {"name": "gateway.backup.nodeName", "type": "string"} + # gateway.backup.nodeName -- node name + nodeName: "" + # @schema {"name": "gateway.backup.nodeSelector", "type": "object", "anchor": "nodeSelector"} + # gateway.backup.nodeSelector -- node selector + nodeSelector: {} + # @schema {"name": "gateway.backup.tolerations", "type": "array", "items": {"type": "object"}, "anchor": "tolerations"} + # gateway.backup.tolerations -- tolerations + tolerations: [] + # @schema {"name": "gateway.backup.affinity", "type": "object", "anchor": "affinity"} + affinity: + # @schema {"name": "gateway.backup.affinity.nodeAffinity", "type": "object"} + nodeAffinity: + # @schema {"name": "gateway.backup.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.backup.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.backup.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "object"} + requiredDuringSchedulingIgnoredDuringExecution: + # @schema {"name": "gateway.backup.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms", "type": "array", "items": {"type": "object"}} + # gateway.backup.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors + nodeSelectorTerms: [] + # @schema {"name": "gateway.backup.affinity.podAffinity", "type": "object"} + podAffinity: + # @schema {"name": "gateway.backup.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.backup.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.backup.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.backup.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.backup.affinity.podAntiAffinity", "type": "object"} + podAntiAffinity: + # @schema {"name": "gateway.backup.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.backup.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vald-gateway-backup + # @schema {"name": "gateway.backup.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.backup.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.backup.topologySpreadConstraints", "type": "array", "items": {"type": "object"}, "anchor": "topologySpreadConstraints"} + # gateway.backup.topologySpreadConstraints -- topology spread constraints of gateway pods + topologySpreadConstraints: [] + # @schema {"name": "gateway.backup.server_config", "alias": "server_config"} + # gateway.backup.server_config -- server config (overrides defaults.server_config) + server_config: + servers: + rest: {} + grpc: {} + healths: + liveness: {} + readiness: {} + metrics: + pprof: {} + prometheus: {} + # @schema {"name": "gateway.backup.observability", "alias": "observability"} + # gateway.backup.observability -- observability config (overrides defaults.observability) + observability: + jaeger: + service_name: vald-gateway-backup + stackdriver: + profiler: + service: vald-gateway-backup + # @schema {"name": "gateway.backup.ingress", "type": "object"} + ingress: + # @schema {"name": "gateway.backup.ingress.enabled", "type": "boolean"} + # gateway.backup.ingress.enabled -- gateway ingress enabled + enabled: false + # @schema {"name": "gateway.backup.ingress.annotations", "type": "object"} + # gateway.backup.ingress.annotations -- annotations for ingress + annotations: + nginx.ingress.kubernetes.io/grpc-backend: "true" + # @schema {"name": "gateway.backup.ingress.host", "type": "string"} + # gateway.backup.ingress.host -- ingress hostname + host: backup.gateway.vald.vdaas.org + # @schema {"name": "gateway.backup.ingress.servicePort", "type": "string"} + # gateway.backup.ingress.servicePort -- service port to be exposed by ingress + servicePort: grpc + # @schema {"name": "gateway.backup.resources", "type": "object", "anchor": "resources"} + # gateway.backup.resources -- compute resources + resources: + # @schema {"name": "gateway.backup.resources.requests", "type": "object"} + requests: + cpu: 200m + memory: 150Mi + # @schema {"name": "gateway.backup.resources.limits", "type": "object"} + limits: + cpu: 2000m + memory: 700Mi + # @schema {"name": "gateway.backup.gateway_config", "type": "object"} + gateway_config: + # @schema {"name": "gateway.backup.gateway_config.client", "alias": "grpc.client"} + # gateway.backup.gateway_config.client -- gRPC client for next gateway (overrides defaults.grpc.client) + client: + addrs: + - "vald-gateway-lb.vald.svc.cluster.local" + # @schema {"name": "gateway.backup.gateway_config.backup", "type": "object"} + backup: + # @schema {"name": "gateway.backup.gateway_config.backup.client", "alias": "grpc.client"} + # gateway.backup.gateway_config.backup.client -- gRPC client for backup (overrides defaults.grpc.client) + client: {} # @schema {"name": "gateway.filter", "type": "object"} filter: - # @schema {"name": "gateway.filter.egress", "type": "array", "items": {"type": "string"}} - # gateway.filter.egress -- egress filters - egress: - - "" - # @schema {"name": "gateway.filter.ingress", "type": "array", "items": {"type": "string"}} - # gateway.filter.ingress -- ingress filters + # @schema {"name": "gateway.filter.enabled", "type": "boolean"} + # gateway.filter.enabled -- gateway enabled + enabled: false + # @schema {"name": "gateway.filter.version", "type": "string", "pattern": "^v[0-9]+\\.[0-9]+\\.[0-9]$", "anchor": "version"} + # gateway.filter.version -- version of gateway config + version: v0.0.0 + # @schema {"name": "gateway.filter.time_zone", "type": "string"} + # gateway.filter.time_zone -- Time zone + time_zone: "" + # @schema {"name": "gateway.filter.logging", "alias": "logging"} + # gateway.filter.logging -- logging config (overrides defaults.logging) + logging: {} + # @schema {"name": "gateway.filter.name", "type": "string"} + # gateway.filter.name -- name of filter gateway deployment + name: vald-gateway-filter + # @schema {"name": "gateway.filter.kind", "type": "string", "enum": ["Deployment", "DaemonSet"]} + # gateway.filter.kind -- deployment kind: Deployment or DaemonSet + kind: Deployment + # @schema {"name": "gateway.filter.serviceType", "type": "string", "enum": ["ClusterIP", "LoadBalancer", "NodePort"]} + # gateway.filter.serviceType -- service type: ClusterIP, LoadBalancer or NodePort + serviceType: ClusterIP + # @schema {"name": "gateway.filter.externalTrafficPolicy", "type": "string"} + # gateway.filter.externalTrafficPolicy -- external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local + externalTrafficPolicy: "" + # @schema {"name": "gateway.filter.progressDeadlineSeconds", "type": "integer"} + # gateway.filter.progressDeadlineSeconds -- progress deadline seconds + progressDeadlineSeconds: 600 + # @schema {"name": "gateway.filter.minReplicas", "type": "integer", "minimum": 0} + # gateway.filter.minReplicas -- minimum number of replicas. + # if HPA is disabled, the replicas will be set to this value + minReplicas: 3 + # @schema {"name": "gateway.filter.maxReplicas", "type": "integer", "minimum": 0} + # gateway.filter.maxReplicas -- maximum number of replicas. + # if HPA is disabled, this value will be ignored. + maxReplicas: 9 + # @schema {"name": "gateway.filter.maxUnavailable", "type": "string"} + # gateway.filter.maxUnavailable -- maximum number of unavailable replicas + maxUnavailable: 50% + # @schema {"name": "gateway.filter.revisionHistoryLimit", "type": "integer", "minimum": 0} + # gateway.filter.revisionHistoryLimit -- number of old history to retain to allow rollback + revisionHistoryLimit: 2 + # @schema {"name": "gateway.filter.terminationGracePeriodSeconds", "type": "integer", "minimum": 0} + # gateway.filter.terminationGracePeriodSeconds -- duration in seconds pod needs to terminate gracefully + terminationGracePeriodSeconds: 30 + # @schema {"name": "gateway.filter.podPriority", "type": "object", "anchor": "podPriority"} + podPriority: + # @schema {"name": "gateway.filter.podPriority.enabled", "type": "boolean"} + # gateway.filter.podPriority.enabled -- gateway pod PriorityClass enabled + enabled: true + # @schema {"name": "gateway.filter.podPriority.value", "type": "integer"} + # gateway.filter.podPriority.value -- gateway pod PriorityClass value + value: 1000000 + # @schema {"name": "gateway.filter.annotations", "type": "object"} + # gateway.filter.annotations -- deployment annotations + annotations: {} + # @schema {"name": "gateway.filter.podAnnotations", "type": "object"} + # gateway.filter.podAnnotations -- pod annotations + podAnnotations: {} + # @schema {"name": "gateway.filter.service", "type": "object", "anchor": "service"} + service: + # @schema {"name": "gateway.filter.service.annotations", "type": "object"} + # gateway.filter.service.annotations -- service annotations + annotations: {} + # @schema {"name": "gateway.filter.service.labels", "type": "object"} + # gateway.filter.service.labels -- service labels + labels: {} + # @schema {"name": "gateway.filter.hpa", "type": "object", "anchor": "hpa"} + hpa: + # @schema {"name": "gateway.filter.hpa.enabled", "type": "boolean"} + # gateway.filter.hpa.enabled -- HPA enabled + enabled: true + # @schema {"name": "gateway.filter.hpa.targetCPUUtilizationPercentage", "type": "integer"} + # gateway.filter.hpa.targetCPUUtilizationPercentage -- HPA CPU utilization percentage + targetCPUUtilizationPercentage: 80 + # @schema {"name": "gateway.filter.image", "type": "object", "anchor": "image"} + image: + # @schema {"name": "gateway.filter.image.repository", "type": "string"} + # gateway.filter.image.repository -- image repository + repository: vdaas/vald-gateway-filter + # @schema {"name": "gateway.filter.image.tag", "type": "string"} + # gateway.filter.image.tag -- image tag (overrides defaults.image.tag) + tag: "" + # @schema {"name": "gateway.filter.image.pullPolicy", "type": "string", "enum": ["Always", "Never", "IfNotPresent"]} + # gateway.filter.image.pullPolicy -- image pull policy + pullPolicy: Always + # @schema {"name": "gateway.filter.rollingUpdate", "type": "object", "anchor": "rollingUpdate"} + rollingUpdate: + # @schema {"name": "gateway.filter.rollingUpdate.maxSurge", "type": "string"} + # gateway.filter.rollingUpdate.maxSurge -- max surge of rolling update + maxSurge: 25% + # @schema {"name": "gateway.filter.rollingUpdate.maxUnavailable", "type": "string"} + # gateway.filter.rollingUpdate.maxUnavailable -- max unavailable of rolling update + maxUnavailable: 25% + # @schema {"name": "gateway.filter.initContainers", "type": "array", "items": {"type": "object"}, "anchor": "initContainers"} + # gateway.filter.initContainers -- init containers + initContainers: + - type: wait-for + name: wait-for-gateway-meta + target: gateway-meta + image: busybox + sleepDuration: 2 + # @schema {"name": "gateway.filter.env", "type": "array", "items": {"type": "object"}, "anchor": "env"} + # gateway.filter.env -- environment variables + env: [] + # @schema {"name": "gateway.filter.volumeMounts", "type": "array", "items": {"type": "object"}, "anchor": "volumeMounts"} + # gateway.filter.volumeMounts -- volume mounts + volumeMounts: [] + # @schema {"name": "gateway.filter.volumes", "type": "array", "items": {"type": "object"}, "anchor": "volumes"} + # gateway.filter.volumes -- volumes + volumes: [] + # @schema {"name": "gateway.filter.nodeName", "type": "string"} + # gateway.filter.nodeName -- node name + nodeName: "" + # @schema {"name": "gateway.filter.nodeSelector", "type": "object", "anchor": "nodeSelector"} + # gateway.filter.nodeSelector -- node selector + nodeSelector: {} + # @schema {"name": "gateway.filter.tolerations", "type": "array", "items": {"type": "object"}, "anchor": "tolerations"} + # gateway.filter.tolerations -- tolerations + tolerations: [] + # @schema {"name": "gateway.filter.affinity", "type": "object", "anchor": "affinity"} + affinity: + # @schema {"name": "gateway.filter.affinity.nodeAffinity", "type": "object"} + nodeAffinity: + # @schema {"name": "gateway.filter.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.filter.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.filter.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "object"} + requiredDuringSchedulingIgnoredDuringExecution: + # @schema {"name": "gateway.filter.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms", "type": "array", "items": {"type": "object"}} + # gateway.filter.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors + nodeSelectorTerms: [] + # @schema {"name": "gateway.filter.affinity.podAffinity", "type": "object"} + podAffinity: + # @schema {"name": "gateway.filter.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.filter.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.filter.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.filter.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.filter.affinity.podAntiAffinity", "type": "object"} + podAntiAffinity: + # @schema {"name": "gateway.filter.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.filter.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vald-gateway-filter + # @schema {"name": "gateway.filter.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.filter.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.filter.topologySpreadConstraints", "type": "array", "items": {"type": "object"}, "anchor": "topologySpreadConstraints"} + # gateway.filter.topologySpreadConstraints -- topology spread constraints of gateway pods + topologySpreadConstraints: [] + # @schema {"name": "gateway.filter.server_config", "alias": "server_config"} + # gateway.filter.server_config -- server config (overrides defaults.server_config) + server_config: + servers: + rest: {} + grpc: {} + healths: + liveness: {} + readiness: {} + metrics: + pprof: {} + prometheus: {} + # @schema {"name": "gateway.filter.observability", "alias": "observability"} + # gateway.filter.observability -- observability config (overrides defaults.observability) + observability: + jaeger: + service_name: vald-gateway-filter + stackdriver: + profiler: + service: vald-gateway-filter + # @schema {"name": "gateway.filter.ingress", "type": "object"} + ingress: + # @schema {"name": "gateway.filter.ingress.enabled", "type": "boolean"} + # gateway.filter.ingress.enabled -- gateway ingress enabled + enabled: false + # @schema {"name": "gateway.filter.ingress.annotations", "type": "object"} + # gateway.filter.ingress.annotations -- annotations for ingress + annotations: + nginx.ingress.kubernetes.io/grpc-backend: "true" + # @schema {"name": "gateway.filter.ingress.host", "type": "string"} + # gateway.filter.ingress.host -- ingress hostname + host: filter.gateway.vald.vdaas.org + # @schema {"name": "gateway.filter.ingress.servicePort", "type": "string"} + # gateway.filter.ingress.servicePort -- service port to be exposed by ingress + servicePort: grpc + # @schema {"name": "gateway.filter.resources", "type": "object", "anchor": "resources"} + # gateway.filter.resources -- compute resources + resources: + # @schema {"name": "gateway.filter.resources.requests", "type": "object"} + requests: + cpu: 200m + memory: 150Mi + # @schema {"name": "gateway.filter.resources.limits", "type": "object"} + limits: + cpu: 2000m + memory: 700Mi + # @schema {"name": "gateway.filter.gateway_config", "type": "object"} + gateway_config: + # @schema {"name": "gateway.filter.gateway_config.client", "alias": "grpc.client"} + # gateway.filter.gateway_config.client -- gRPC client for next gateway (overrides defaults.grpc.client) + client: + addrs: + - "vald-gateway-meta.vald.svc.cluster.local" + # @schema {"name": "gateway.filter.gateway_config.ingress_filter", "type": "object"} + ingress_filter: + # @schema {"name": "gateway.filter.gateway_config.ingress_filter.client", "alias": "grpc.client"} + # gateway.filter.gateway_config.ingress_filter.client -- gRPC client for ingress filters (overrides defaults.grpc.client) + client: {} + # @schema {"name": "gateway.filter.gateway_config.ingress_filter.search", "type": "array", "items": {"type": "string"}} + # gateway.filter.gateway_config.ingress_filter.search -- ingress filters for search operation + search: [] + # @schema {"name": "gateway.filter.gateway_config.ingress_filter.insert", "type": "array", "items": {"type": "string"}} + # gateway.filter.gateway_config.ingress_filter.insert -- ingress filters for insert operation + insert: [] + # @schema {"name": "gateway.filter.gateway_config.ingress_filter.update", "type": "array", "items": {"type": "string"}} + # gateway.filter.gateway_config.ingress_filter.update -- ingress filters for update operation + update: [] + # @schema {"name": "gateway.filter.gateway_config.ingress_filter.upsert", "type": "array", "items": {"type": "string"}} + # gateway.filter.gateway_config.ingress_filter.upsert -- ingress filters for upsert operation + upsert: [] + # @schema {"name": "gateway.filter.gateway_config.egress_filter", "type": "object"} + egress_filter: + # @schema {"name": "gateway.filter.gateway_config.egress_filter.client", "alias": "grpc.client"} + # gateway.filter.gateway_config.egress_filter.client -- gRPC client for egress filter (overrides defaults.grpc.client) + client: {} + # @schema {"name": "gateway.lb", "type": "object"} + lb: + # @schema {"name": "gateway.lb.enabled", "type": "boolean"} + # gateway.lb.enabled -- gateway enabled + enabled: false + # @schema {"name": "gateway.lb.version", "type": "string", "pattern": "^v[0-9]+\\.[0-9]+\\.[0-9]$", "anchor": "version"} + # gateway.lb.version -- version of gateway config + version: v0.0.0 + # @schema {"name": "gateway.lb.time_zone", "type": "string"} + # gateway.lb.time_zone -- Time zone + time_zone: "" + # @schema {"name": "gateway.lb.logging", "alias": "logging"} + # gateway.lb.logging -- logging config (overrides defaults.logging) + logging: {} + # @schema {"name": "gateway.lb.name", "type": "string"} + # gateway.lb.name -- name of gateway deployment + name: vald-gateway-lb + # @schema {"name": "gateway.lb.kind", "type": "string", "enum": ["Deployment", "DaemonSet"]} + # gateway.lb.kind -- deployment kind: Deployment or DaemonSet + kind: Deployment + # @schema {"name": "gateway.lb.serviceType", "type": "string", "enum": ["ClusterIP", "LoadBalancer", "NodePort"]} + # gateway.lb.serviceType -- service type: ClusterIP, LoadBalancer or NodePort + serviceType: ClusterIP + # @schema {"name": "gateway.lb.externalTrafficPolicy", "type": "string"} + # gateway.lb.externalTrafficPolicy -- external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local + externalTrafficPolicy: "" + # @schema {"name": "gateway.lb.progressDeadlineSeconds", "type": "integer"} + # gateway.lb.progressDeadlineSeconds -- progress deadline seconds + progressDeadlineSeconds: 600 + # @schema {"name": "gateway.lb.minReplicas", "type": "integer", "minimum": 0} + # gateway.lb.minReplicas -- minimum number of replicas. + # if HPA is disabled, the replicas will be set to this value + minReplicas: 3 + # @schema {"name": "gateway.lb.maxReplicas", "type": "integer", "minimum": 0} + # gateway.lb.maxReplicas -- maximum number of replicas. + # if HPA is disabled, this value will be ignored. + maxReplicas: 9 + # @schema {"name": "gateway.lb.maxUnavailable", "type": "string"} + # gateway.lb.maxUnavailable -- maximum number of unavailable replicas + maxUnavailable: 50% + # @schema {"name": "gateway.lb.revisionHistoryLimit", "type": "integer", "minimum": 0} + # gateway.lb.revisionHistoryLimit -- number of old history to retain to allow rollback + revisionHistoryLimit: 2 + # @schema {"name": "gateway.lb.terminationGracePeriodSeconds", "type": "integer", "minimum": 0} + # gateway.lb.terminationGracePeriodSeconds -- duration in seconds pod needs to terminate gracefully + terminationGracePeriodSeconds: 30 + # @schema {"name": "gateway.lb.podPriority", "type": "object", "anchor": "podPriority"} + podPriority: + # @schema {"name": "gateway.lb.podPriority.enabled", "type": "boolean"} + # gateway.lb.podPriority.enabled -- gateway pod PriorityClass enabled + enabled: true + # @schema {"name": "gateway.lb.podPriority.value", "type": "integer"} + # gateway.lb.podPriority.value -- gateway pod PriorityClass value + value: 1000000 + # @schema {"name": "gateway.lb.annotations", "type": "object"} + # gateway.lb.annotations -- deployment annotations + annotations: {} + # @schema {"name": "gateway.lb.podAnnotations", "type": "object"} + # gateway.lb.podAnnotations -- pod annotations + podAnnotations: {} + # @schema {"name": "gateway.lb.service", "type": "object", "anchor": "service"} + service: + # @schema {"name": "gateway.lb.service.annotations", "type": "object"} + # gateway.lb.service.annotations -- service annotations + annotations: {} + # @schema {"name": "gateway.lb.service.labels", "type": "object"} + # gateway.lb.service.labels -- service labels + labels: {} + # @schema {"name": "gateway.lb.hpa", "type": "object", "anchor": "hpa"} + hpa: + # @schema {"name": "gateway.lb.hpa.enabled", "type": "boolean"} + # gateway.lb.hpa.enabled -- HPA enabled + enabled: true + # @schema {"name": "gateway.lb.hpa.targetCPUUtilizationPercentage", "type": "integer"} + # gateway.lb.hpa.targetCPUUtilizationPercentage -- HPA CPU utilization percentage + targetCPUUtilizationPercentage: 80 + # @schema {"name": "gateway.lb.image", "type": "object", "anchor": "image"} + image: + # @schema {"name": "gateway.lb.image.repository", "type": "string"} + # gateway.lb.image.repository -- image repository + repository: vdaas/vald-gateway-lb + # @schema {"name": "gateway.lb.image.tag", "type": "string"} + # gateway.lb.image.tag -- image tag (overrides defaults.image.tag) + tag: "" + # @schema {"name": "gateway.lb.image.pullPolicy", "type": "string", "enum": ["Always", "Never", "IfNotPresent"]} + # gateway.lb.image.pullPolicy -- image pull policy + pullPolicy: Always + # @schema {"name": "gateway.lb.rollingUpdate", "type": "object", "anchor": "rollingUpdate"} + rollingUpdate: + # @schema {"name": "gateway.lb.rollingUpdate.maxSurge", "type": "string"} + # gateway.lb.rollingUpdate.maxSurge -- max surge of rolling update + maxSurge: 25% + # @schema {"name": "gateway.lb.rollingUpdate.maxUnavailable", "type": "string"} + # gateway.lb.rollingUpdate.maxUnavailable -- max unavailable of rolling update + maxUnavailable: 25% + # @schema {"name": "gateway.lb.initContainers", "type": "array", "items": {"type": "object"}, "anchor": "initContainers"} + # gateway.lb.initContainers -- init containers + initContainers: + - type: wait-for + name: wait-for-discoverer + target: discoverer + image: busybox + sleepDuration: 2 + - type: wait-for + name: wait-for-agent + target: agent + image: busybox + sleepDuration: 2 + # @schema {"name": "gateway.lb.env", "type": "array", "items": {"type": "object"}, "anchor": "env"} + # gateway.lb.env -- environment variables + env: + # - name: MY_NODE_NAME + # valueFrom: + # fieldRef: + # fieldPath: spec.nodeName + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # @schema {"name": "gateway.lb.volumeMounts", "type": "array", "items": {"type": "object"}, "anchor": "volumeMounts"} + # gateway.lb.volumeMounts -- volume mounts + volumeMounts: [] + # @schema {"name": "gateway.lb.volumes", "type": "array", "items": {"type": "object"}, "anchor": "volumes"} + # gateway.lb.volumes -- volumes + volumes: [] + # @schema {"name": "gateway.lb.nodeName", "type": "string"} + # gateway.lb.nodeName -- node name + nodeName: "" + # @schema {"name": "gateway.lb.nodeSelector", "type": "object", "anchor": "nodeSelector"} + # gateway.lb.nodeSelector -- node selector + nodeSelector: {} + # @schema {"name": "gateway.lb.tolerations", "type": "array", "items": {"type": "object"}, "anchor": "tolerations"} + # gateway.lb.tolerations -- tolerations + tolerations: [] + # @schema {"name": "gateway.lb.affinity", "type": "object", "anchor": "affinity"} + affinity: + # @schema {"name": "gateway.lb.affinity.nodeAffinity", "type": "object"} + nodeAffinity: + # @schema {"name": "gateway.lb.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.lb.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.lb.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "object"} + requiredDuringSchedulingIgnoredDuringExecution: + # @schema {"name": "gateway.lb.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms", "type": "array", "items": {"type": "object"}} + # gateway.lb.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors + nodeSelectorTerms: [] + # @schema {"name": "gateway.lb.affinity.podAffinity", "type": "object"} + podAffinity: + # @schema {"name": "gateway.lb.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.lb.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.lb.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.lb.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.lb.affinity.podAntiAffinity", "type": "object"} + podAntiAffinity: + # @schema {"name": "gateway.lb.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.lb.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vald-gateway-lb + # @schema {"name": "gateway.lb.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.lb.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.lb.topologySpreadConstraints", "type": "array", "items": {"type": "object"}, "anchor": "topologySpreadConstraints"} + # gateway.lb.topologySpreadConstraints -- topology spread constraints of gateway pods + topologySpreadConstraints: [] + # @schema {"name": "gateway.lb.server_config", "alias": "server_config"} + # gateway.lb.server_config -- server config (overrides defaults.server_config) + server_config: + servers: + rest: {} + grpc: {} + healths: + liveness: {} + readiness: {} + metrics: + pprof: {} + prometheus: {} + # @schema {"name": "gateway.lb.observability", "alias": "observability"} + # gateway.lb.observability -- observability config (overrides defaults.observability) + observability: + jaeger: + service_name: vald-gateway-lb + stackdriver: + profiler: + service: vald-gateway-lb + # @schema {"name": "gateway.lb.ingress", "type": "object"} ingress: - - "" - # @schema {"name": "gateway.ingress", "type": "object"} - ingress: - # @schema {"name": "gateway.ingress.enabled", "type": "boolean"} - # gateway.ingress.enabled -- gateway ingress enabled + # @schema {"name": "gateway.lb.ingress.enabled", "type": "boolean"} + # gateway.lb.ingress.enabled -- gateway ingress enabled + enabled: false + # @schema {"name": "gateway.lb.ingress.annotations", "type": "object"} + # gateway.lb.ingress.annotations -- annotations for ingress + annotations: + nginx.ingress.kubernetes.io/grpc-backend: "true" + # @schema {"name": "gateway.lb.ingress.host", "type": "string"} + # gateway.lb.ingress.host -- ingress hostname + host: lb.gateway.vald.vdaas.org + # @schema {"name": "gateway.lb.ingress.servicePort", "type": "string"} + # gateway.lb.ingress.servicePort -- service port to be exposed by ingress + servicePort: grpc + # @schema {"name": "gateway.lb.resources", "type": "object", "anchor": "resources"} + # gateway.lb.resources -- compute resources + resources: + # @schema {"name": "gateway.lb.resources.requests", "type": "object"} + requests: + cpu: 200m + memory: 150Mi + # @schema {"name": "gateway.lb.resources.limits", "type": "object"} + limits: + cpu: 2000m + memory: 700Mi + # @schema {"name": "gateway.lb.gateway_config", "type": "object"} + gateway_config: + # @schema {"name": "gateway.lb.gateway_config.agent_namespace", "type": "string"} + # gateway.lb.gateway_config.agent_namespace -- agent namespace + agent_namespace: _MY_POD_NAMESPACE_ + # @schema {"name": "gateway.lb.gateway_config.node_namespace", "type": "string"} + # gateway.lb.gateway_config.node_name -- node name + node_name: "" # _MY_NODE_NAME_ + # @schema {"name": "gateway.lb.gateway_config.index_replica", "type": "integer", "minimum": 1} + # gateway.lb.gateway_config.index_replica -- number of index replica + index_replica: 5 + # @schema {"name": "gateway.lb.gateway_config.discoverer", "type": "object"} + discoverer: + # @schema {"name": "gateway.lb.gateway_config.discoverer.duration", "type": "string"} + # gateway.lb.discoverer.duration -- discoverer duration + duration: 200ms + # @schema {"name": "gateway.lb.gateway_config.discoverer.discover_client", "alias": "grpc.client"} + # gateway.lb.gateway_config.discoverer.discover_client -- gRPC client for discoverer (overrides defaults.grpc.client) + discover_client: {} + # @schema {"name": "gateway.lb.gateway_config.discoverer.agent_client", "alias": "grpc.client"} + # gateway.lb.gateway_config.discoverer.agent_client -- gRPC client for agents (overrides defaults.grpc.client) + agent_client: {} + # @schema {"name": "gateway.meta", "type": "object"} + meta: + # @schema {"name": "gateway.meta.enabled", "type": "boolean"} + # gateway.meta.enabled -- gateway enabled enabled: true - # @schema {"name": "gateway.ingress.annotations", "type": "object"} - # gateway.ingress.annotations -- annotations for ingress - annotations: - nginx.ingress.kubernetes.io/grpc-backend: "true" - # @schema {"name": "gateway.ingress.host", "type": "string"} - # gateway.ingress.host -- ingress hostname - host: vald.gateway.vdaas.org - # @schema {"name": "gateway.ingress.servicePort", "type": "string"} - # gateway.ingress.servicePort -- service port to be exposed by ingress - servicePort: grpc - # @schema {"name": "gateway.resources", "type": "object", "anchor": "resources"} - # gateway.resources -- compute resources - resources: - # @schema {"name": "gateway.resources.requests", "type": "object"} - requests: - cpu: 200m - memory: 150Mi - # @schema {"name": "gateway.resources.limits", "type": "object"} - limits: - cpu: 2000m - memory: 700Mi - # @schema {"name": "gateway.gateway_config", "type": "object"} - gateway_config: - # @schema {"name": "gateway.gateway_config.agent_namespace", "type": "string"} - # gateway.gateway_config.agent_namespace -- agent namespace - agent_namespace: _MY_POD_NAMESPACE_ - # @schema {"name": "gateway.gateway_config.node_namespace", "type": "string"} - # gateway.gateway_config.node_name -- node name - node_name: "" # _MY_NODE_NAME_ - # @schema {"name": "gateway.gateway_config.index_replica", "type": "integer", "minimum": 1} - # gateway.gateway_config.index_replica -- number of index replica - index_replica: 5 - # @schema {"name": "gateway.gateway_config.discoverer", "type": "object"} - discoverer: - # @schema {"name": "gateway.gateway_config.discoverer.duration", "type": "string"} - # gateway.gateway_config.discoverer.duration -- discoverer duration - duration: 200ms - # @schema {"name": "gateway.gateway_config.discoverer.discover_client", "alias": "grpc.client"} - # gateway.gateway_config.discoverer.discover_client -- gRPC client for discoverer (overrides defaults.grpc.client) - discover_client: {} - # @schema {"name": "gateway.gateway_config.discoverer.agent_client", "alias": "grpc.client"} - # gateway.gateway_config.discoverer.agent_client -- gRPC client for agents (overrides defaults.grpc.client) - agent_client: {} - # @schema {"name": "gateway.gateway_config.meta", "type": "object"} - meta: - # @schema {"name": "gateway.gateway_config.meta.client", "alias": "grpc.client"} - # gateway.gateway_config.meta.client -- gRPC client for meta (overrides defaults.grpc.client) - client: {} - # @schema {"name": "gateway.gateway_config.meta.enable_cache", "type": "boolean"} - # gateway.gateway_config.meta.enable_cache -- meta cache enabled - enable_cache: true - # @schema {"name": "gateway.gateway_config.meta.cache_expiration", "type": "string"} - # gateway.gateway_config.meta.cache_expiration -- meta cache expire duration - cache_expiration: "30m" - # @schema {"name": "gateway.gateway_config.meta.expired_cache_check_duration", "type": "string"} - # gateway.gateway_config.meta.expired_cache_check_duration -- meta cache expired check duration - expired_cache_check_duration: "3m" - # @schema {"name": "gateway.gateway_config.backup", "type": "object"} - backup: - # @schema {"name": "gateway.gateway_config.backup.client", "alias": "grpc.client"} - # gateway.gateway_config.backup.client -- gRPC client for backup (overrides defaults.grpc.client) - client: {} + # @schema {"name": "gateway.meta.version", "type": "string", "pattern": "^v[0-9]+\\.[0-9]+\\.[0-9]$", "anchor": "version"} + # gateway.meta.version -- version of gateway config + version: v0.0.0 + # @schema {"name": "gateway.meta.time_zone", "type": "string"} + # gateway.meta.time_zone -- Time zone + time_zone: "" + # @schema {"name": "gateway.meta.logging", "alias": "logging"} + # gateway.meta.logging -- logging config (overrides defaults.logging) + logging: {} + # @schema {"name": "gateway.meta.name", "type": "string"} + # gateway.meta.name -- name of gateway deployment + name: vald-gateway-meta + # @schema {"name": "gateway.meta.kind", "type": "string", "enum": ["Deployment", "DaemonSet"]} + # gateway.meta.kind -- deployment kind: Deployment or DaemonSet + kind: Deployment + # @schema {"name": "gateway.meta.serviceType", "type": "string", "enum": ["ClusterIP", "LoadBalancer", "NodePort"]} + # gateway.meta.serviceType -- service type: ClusterIP, LoadBalancer or NodePort + serviceType: ClusterIP + # @schema {"name": "gateway.meta.externalTrafficPolicy", "type": "string"} + # gateway.meta.externalTrafficPolicy -- external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local + externalTrafficPolicy: "" + # @schema {"name": "gateway.meta.progressDeadlineSeconds", "type": "integer"} + # gateway.meta.progressDeadlineSeconds -- progress deadline seconds + progressDeadlineSeconds: 600 + # @schema {"name": "gateway.meta.minReplicas", "type": "integer", "minimum": 0} + # gateway.meta.minReplicas -- minimum number of replicas. + # if HPA is disabled, the replicas will be set to this value + minReplicas: 3 + # @schema {"name": "gateway.meta.maxReplicas", "type": "integer", "minimum": 0} + # gateway.meta.maxReplicas -- maximum number of replicas. + # if HPA is disabled, this value will be ignored. + maxReplicas: 9 + # @schema {"name": "gateway.meta.maxUnavailable", "type": "string"} + # gateway.meta.maxUnavailable -- maximum number of unavailable replicas + maxUnavailable: 50% + # @schema {"name": "gateway.meta.revisionHistoryLimit", "type": "integer", "minimum": 0} + # gateway.meta.revisionHistoryLimit -- number of old history to retain to allow rollback + revisionHistoryLimit: 2 + # @schema {"name": "gateway.meta.terminationGracePeriodSeconds", "type": "integer", "minimum": 0} + # gateway.meta.terminationGracePeriodSeconds -- duration in seconds pod needs to terminate gracefully + terminationGracePeriodSeconds: 30 + # @schema {"name": "gateway.meta.podPriority", "type": "object", "anchor": "podPriority"} + podPriority: + # @schema {"name": "gateway.meta.podPriority.enabled", "type": "boolean"} + # gateway.meta.podPriority.enabled -- gateway pod PriorityClass enabled + enabled: true + # @schema {"name": "gateway.meta.podPriority.value", "type": "integer"} + # gateway.meta.podPriority.value -- gateway pod PriorityClass value + value: 1000000 + # @schema {"name": "gateway.meta.annotations", "type": "object"} + # gateway.meta.annotations -- deployment annotations + annotations: {} + # @schema {"name": "gateway.meta.podAnnotations", "type": "object"} + # gateway.meta.podAnnotations -- pod annotations + podAnnotations: {} + # @schema {"name": "gateway.meta.service", "type": "object", "anchor": "service"} + service: + # @schema {"name": "gateway.meta.service.annotations", "type": "object"} + # gateway.meta.service.annotations -- service annotations + annotations: {} + # @schema {"name": "gateway.meta.service.labels", "type": "object"} + # gateway.meta.service.labels -- service labels + labels: {} + # @schema {"name": "gateway.meta.hpa", "type": "object", "anchor": "hpa"} + hpa: + # @schema {"name": "gateway.meta.hpa.enabled", "type": "boolean"} + # gateway.meta.hpa.enabled -- HPA enabled + enabled: true + # @schema {"name": "gateway.meta.hpa.targetCPUUtilizationPercentage", "type": "integer"} + # gateway.meta.hpa.targetCPUUtilizationPercentage -- HPA CPU utilization percentage + targetCPUUtilizationPercentage: 80 + # @schema {"name": "gateway.meta.image", "type": "object", "anchor": "image"} + image: + # @schema {"name": "gateway.meta.image.repository", "type": "string"} + # gateway.meta.image.repository -- image repository + repository: vdaas/vald-gateway-meta + # @schema {"name": "gateway.meta.image.tag", "type": "string"} + # gateway.meta.image.tag -- image tag (overrides defaults.image.tag) + tag: "" + # @schema {"name": "gateway.meta.image.pullPolicy", "type": "string", "enum": ["Always", "Never", "IfNotPresent"]} + # gateway.meta.image.pullPolicy -- image pull policy + pullPolicy: Always + # @schema {"name": "gateway.meta.rollingUpdate", "type": "object", "anchor": "rollingUpdate"} + rollingUpdate: + # @schema {"name": "gateway.meta.rollingUpdate.maxSurge", "type": "string"} + # gateway.meta.rollingUpdate.maxSurge -- max surge of rolling update + maxSurge: 25% + # @schema {"name": "gateway.meta.rollingUpdate.maxUnavailable", "type": "string"} + # gateway.meta.rollingUpdate.maxUnavailable -- max unavailable of rolling update + maxUnavailable: 25% + # @schema {"name": "gateway.meta.initContainers", "type": "array", "items": {"type": "object"}, "anchor": "initContainers"} + # gateway.meta.initContainers -- init containers + initContainers: + - type: wait-for + name: wait-for-meta + target: meta + image: busybox + sleepDuration: 2 + - type: wait-for + name: wait-for-gateway-backup + target: gateway-backup + image: busybox + sleepDuration: 2 + # @schema {"name": "gateway.meta.env", "type": "array", "items": {"type": "object"}, "anchor": "env"} + # gateway.meta.env -- environment variables + env: [] + # @schema {"name": "gateway.meta.volumeMounts", "type": "array", "items": {"type": "object"}, "anchor": "volumeMounts"} + # gateway.meta.volumeMounts -- volume mounts + volumeMounts: [] + # @schema {"name": "gateway.meta.volumes", "type": "array", "items": {"type": "object"}, "anchor": "volumes"} + # gateway.meta.volumes -- volumes + volumes: [] + # @schema {"name": "gateway.meta.nodeName", "type": "string"} + # gateway.meta.nodeName -- node name + nodeName: "" + # @schema {"name": "gateway.meta.nodeSelector", "type": "object", "anchor": "nodeSelector"} + # gateway.meta.nodeSelector -- node selector + nodeSelector: {} + # @schema {"name": "gateway.meta.tolerations", "type": "array", "items": {"type": "object"}, "anchor": "tolerations"} + # gateway.meta.tolerations -- tolerations + tolerations: [] + # @schema {"name": "gateway.meta.affinity", "type": "object", "anchor": "affinity"} + affinity: + # @schema {"name": "gateway.meta.affinity.nodeAffinity", "type": "object"} + nodeAffinity: + # @schema {"name": "gateway.meta.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.meta.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.meta.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "object"} + requiredDuringSchedulingIgnoredDuringExecution: + # @schema {"name": "gateway.meta.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms", "type": "array", "items": {"type": "object"}} + # gateway.meta.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors + nodeSelectorTerms: [] + # @schema {"name": "gateway.meta.affinity.podAffinity", "type": "object"} + podAffinity: + # @schema {"name": "gateway.meta.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.meta.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.meta.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.meta.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.meta.affinity.podAntiAffinity", "type": "object"} + podAntiAffinity: + # @schema {"name": "gateway.meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vald-gateway-meta + # @schema {"name": "gateway.meta.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution", "type": "array", "items": {"type": "object"}} + # gateway.meta.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "gateway.meta.topologySpreadConstraints", "type": "array", "items": {"type": "object"}, "anchor": "topologySpreadConstraints"} + # gateway.meta.topologySpreadConstraints -- topology spread constraints of gateway pods + topologySpreadConstraints: [] + # @schema {"name": "gateway.meta.server_config", "alias": "server_config"} + # gateway.meta.server_config -- server config (overrides defaults.server_config) + server_config: + servers: + rest: {} + grpc: {} + healths: + liveness: {} + readiness: {} + metrics: + pprof: {} + prometheus: {} + # @schema {"name": "gateway.meta.observability", "alias": "observability"} + # gateway.meta.observability -- observability config (overrides defaults.observability) + observability: + jaeger: + service_name: vald-gateway-meta + stackdriver: + profiler: + service: vald-gateway-meta + # @schema {"name": "gateway.meta.ingress", "type": "object"} + ingress: + # @schema {"name": "gateway.meta.ingress.enabled", "type": "boolean"} + # gateway.meta.ingress.enabled -- gateway ingress enabled + enabled: false + # @schema {"name": "gateway.meta.ingress.annotations", "type": "object"} + # gateway.meta.ingress.annotations -- annotations for ingress + annotations: + nginx.ingress.kubernetes.io/grpc-backend: "true" + # @schema {"name": "gateway.meta.ingress.host", "type": "string"} + # gateway.meta.ingress.host -- ingress hostname + host: meta.gateway.vald.vdaas.org + # @schema {"name": "gateway.meta.ingress.servicePort", "type": "string"} + # gateway.meta.ingress.servicePort -- service port to be exposed by ingress + servicePort: grpc + # @schema {"name": "gateway.meta.resources", "type": "object", "anchor": "resources"} + # gateway.meta.resources -- compute resources + resources: + # @schema {"name": "gateway.meta.resources.requests", "type": "object"} + requests: + cpu: 200m + memory: 150Mi + # @schema {"name": "gateway.meta.resources.limits", "type": "object"} + limits: + cpu: 2000m + memory: 700Mi + # @schema {"name": "gateway.meta.gateway_config", "type": "object"} + gateway_config: + # @schema {"name": "gateway.meta.gateway_config.client", "alias": "grpc.client"} + # gateway.meta.gateway_config.client -- gRPC client for next gateway (overrides defaults.grpc.client) + client: + addrs: + - "vald-gateway-backup.vald.svc.cluster.local" + # @schema {"name": "gateway.meta.gateway_config.meta", "type": "object"} + meta: + # @schema {"name": "gateway.meta.gateway_config.meta.client", "alias": "grpc.client"} + # gateway.meta.gateway_config.meta.client -- gRPC client for meta (overrides defaults.grpc.client) + client: {} + # @schema {"name": "gateway.meta.gateway_config.meta.enable_cache", "type": "boolean"} + # gateway.meta.gateway_config.meta.enable_cache -- meta cache enabled + enable_cache: true + # @schema {"name": "gateway.meta.gateway_config.meta.cache_expiration", "type": "string"} + # gateway.meta.gateway_config.meta.cache_expiration -- meta cache expire duration + cache_expiration: "30m" + # @schema {"name": "gateway.meta.gateway_config.meta.expired_cache_check_duration", "type": "string"} + # gateway.meta.gateway_config.meta.expired_cache_check_duration -- meta cache expired check duration + expired_cache_check_duration: "3m" # @schema {"name": "agent", "type": "object"} agent: @@ -1238,6 +2165,9 @@ agent: # @schema {"name": "agent.ngt.auto_save_index_duration", "type": "string"} # agent.ngt.auto_save_index_duration -- duration of automatic save index auto_save_index_duration: 35m + # @schema {"name": "agent.ngt.auto_create_index_pool_size", "type": "integer"} + # agent.ngt.auto_create_index_pool_size -- batch process pool size of automatic create index operation + auto_create_index_pool_size: 10000 # @schema {"name": "agent.ngt.initial_delay_max_duration", "type": "string"} # agent.ngt.initial_delay_max_duration -- maximum duration for initial delay initial_delay_max_duration: 3m @@ -1251,7 +2181,7 @@ agent: # agent.ngt.distance_type -- distance type. # it should be `l1`, `l2`, `angle`, `hamming`, `cosine`, `normalizedangle`, `normalizedcosine` or `jaccard`. # for further details about NGT libraries supported distance is https://github.com/yahoojapan/NGT/wiki/Command-Quick-Reference - # and vald agent's supported NGT distance type is https://pkg.go.dev/github.com/vdaas/vald/internal/core/ngt#pkg-constants + # and vald agent's supported NGT distance type is https://pkg.go.dev/github.com/vdaas/vald/internal/core/algorithm/ngt#pkg-constants distance_type: l2 # @schema {"name": "agent.ngt.object_type", "type": "string", "enum": ["float", "uint8"]} # agent.ngt.object_type -- object type. @@ -1794,861 +2724,864 @@ discoverer: # discoverer.serviceAccount.name -- name of service account name: vald -# @schema {"name": "compressor", "type": "object"} -compressor: - # @schema {"name": "compressor.enabled", "type": "boolean"} - # compressor.enabled -- compressor enabled - enabled: true - # @schema {"name": "compressor.version", "alias": "version"} - # compressor.version -- version of compressor config - version: v0.0.0 - # @schema {"name": "compressor.time_zone", "type": "string"} - # compressor.time_zone -- Time zone - time_zone: "" - # @schema {"name": "compressor.logging", "alias": "logging"} - # compressor.logging -- logging config (overrides defaults.logging) - logging: {} - # @schema {"name": "compressor.name", "type": "string"} - # compressor.name -- name of compressor deployment - name: vald-manager-compressor - # @schema {"name": "compressor.kind", "type": "string", "enum": ["Deployment", "DaemonSet"]} - # compressor.kind -- deployment kind: Deployment or DaemonSet - kind: Deployment - # @schema {"name": "compressor.serviceType", "type": "string", "enum": ["ClusterIP", "LoadBalancer", "NodePort"]} - # compressor.serviceType -- service type: ClusterIP, LoadBalancer or NodePort - serviceType: ClusterIP - # @schema {"name": "compressor.externalTrafficPolicy", "type": "string"} - # compressor.externalTrafficPolicy -- external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local - externalTrafficPolicy: "" - # @schema {"name": "compressor.progressDeadlineSeconds", "type": "integer"} - # compressor.progressDeadlineSeconds -- progress deadline seconds - progressDeadlineSeconds: 600 - # @schema {"name": "compressor.minReplicas", "type": "integer", "minimum": 0} - # compressor.minReplicas -- minimum number of replicas. - # if HPA is disabled, the replicas will be set to this value - minReplicas: 3 - # @schema {"name": "compressor.maxReplicas", "type": "integer", "minimum": 0} - # compressor.maxReplicas -- maximum number of replicas. - # if HPA is disabled, this value will be ignored. - maxReplicas: 15 - # @schema {"name": "compressor.maxUnavailable", "type": "string"} - # compressor.maxUnavailable -- maximum number of unavailable replicas - maxUnavailable: "1" - # @schema {"name": "compressor.revisionHistoryLimit", "type": "integer", "minimum": 0} - # compressor.revisionHistoryLimit -- number of old history to retain to allow rollback - revisionHistoryLimit: 2 - # @schema {"name": "compressor.terminationGracePeriodSeconds", "type": "integer", "minimum": 0} - # compressor.terminationGracePeriodSeconds -- duration in seconds pod needs to terminate gracefully - terminationGracePeriodSeconds: 120 - # @schema {"name": "compressor.podPriority", "alias": "podPriority"} - podPriority: - # compressor.podPriority.enabled -- compressor pod PriorityClass enabled +# @schema {"name": "manager", "type": "object"} +manager: + + # @schema {"name": "manager.compressor", "type": "object"} + compressor: + # @schema {"name": "manager.compressor.enabled", "type": "boolean"} + # manager.compressor.enabled -- compressor enabled enabled: true - # compressor.podPriority.value -- compressor pod PriorityClass value - value: 100000000 - # @schema {"name": "compressor.annotations", "type": "object"} - # compressor.annotations -- deployment annotations - annotations: {} - # @schema {"name": "compressor.podAnnotations", "type": "object"} - # compressor.podAnnotations -- pod annotations - podAnnotations: {} - # @schema {"name": "compressor.service", "alias": "service"} - service: - # compressor.service.annotations -- service annotations + # @schema {"name": "manager.compressor.version", "alias": "version"} + # manager.compressor.version -- version of compressor config + version: v0.0.0 + # @schema {"name": "manager.compressor.time_zone", "type": "string"} + # manager.compressor.time_zone -- Time zone + time_zone: "" + # @schema {"name": "manager.compressor.logging", "alias": "logging"} + # manager.compressor.logging -- logging config (overrides defaults.logging) + logging: {} + # @schema {"name": "manager.compressor.name", "type": "string"} + # manager.compressor.name -- name of compressor deployment + name: vald-manager-compressor + # @schema {"name": "manager.compressor.kind", "type": "string", "enum": ["Deployment", "DaemonSet"]} + # manager.compressor.kind -- deployment kind: Deployment or DaemonSet + kind: Deployment + # @schema {"name": "manager.compressor.serviceType", "type": "string", "enum": ["ClusterIP", "LoadBalancer", "NodePort"]} + # manager.compressor.serviceType -- service type: ClusterIP, LoadBalancer or NodePort + serviceType: ClusterIP + # @schema {"name": "manager.compressor.externalTrafficPolicy", "type": "string"} + # manager.compressor.externalTrafficPolicy -- external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local + externalTrafficPolicy: "" + # @schema {"name": "manager.compressor.progressDeadlineSeconds", "type": "integer"} + # manager.compressor.progressDeadlineSeconds -- progress deadline seconds + progressDeadlineSeconds: 600 + # @schema {"name": "manager.compressor.minReplicas", "type": "integer", "minimum": 0} + # manager.compressor.minReplicas -- minimum number of replicas. + # if HPA is disabled, the replicas will be set to this value + minReplicas: 3 + # @schema {"name": "manager.compressor.maxReplicas", "type": "integer", "minimum": 0} + # manager.compressor.maxReplicas -- maximum number of replicas. + # if HPA is disabled, this value will be ignored. + maxReplicas: 15 + # @schema {"name": "manager.compressor.maxUnavailable", "type": "string"} + # manager.compressor.maxUnavailable -- maximum number of unavailable replicas + maxUnavailable: "1" + # @schema {"name": "manager.compressor.revisionHistoryLimit", "type": "integer", "minimum": 0} + # manager.compressor.revisionHistoryLimit -- number of old history to retain to allow rollback + revisionHistoryLimit: 2 + # @schema {"name": "manager.compressor.terminationGracePeriodSeconds", "type": "integer", "minimum": 0} + # manager.compressor.terminationGracePeriodSeconds -- duration in seconds pod needs to terminate gracefully + terminationGracePeriodSeconds: 120 + # @schema {"name": "manager.compressor.podPriority", "alias": "podPriority"} + podPriority: + # manager.compressor.podPriority.enabled -- compressor pod PriorityClass enabled + enabled: true + # manager.compressor.podPriority.value -- compressor pod PriorityClass value + value: 100000000 + # @schema {"name": "manager.compressor.annotations", "type": "object"} + # manager.compressor.annotations -- deployment annotations annotations: {} - # compressor.service.labels -- service labels - labels: {} - # @schema {"name": "compressor.hpa", "alias": "hpa"} - hpa: - # compressor.hpa.enabled -- HPA enabled + # @schema {"name": "manager.compressor.podAnnotations", "type": "object"} + # manager.compressor.podAnnotations -- pod annotations + podAnnotations: {} + # @schema {"name": "manager.compressor.service", "alias": "service"} + service: + # manager.compressor.service.annotations -- service annotations + annotations: {} + # manager.compressor.service.labels -- service labels + labels: {} + # @schema {"name": "manager.compressor.hpa", "alias": "hpa"} + hpa: + # manager.compressor.hpa.enabled -- HPA enabled + enabled: true + # manager.compressor.hpa.targetCPUUtilizationPercentage -- HPA CPU utilization percentage + targetCPUUtilizationPercentage: 80 + # @schema {"name": "manager.compressor.image", "alias": "image"} + image: + # manager.compressor.image.repository -- image repository + repository: vdaas/vald-manager-compressor + # manager.compressor.image.tag -- image tag (overrides defaults.image.tag) + tag: "" + # manager.compressor.image.pullPolicy -- image pull policy + pullPolicy: Always + # @schema {"name": "manager.compressor.rollingUpdate", "alias": "rollingUpdate"} + rollingUpdate: + # manager.compressor.rollingUpdate.maxSurge -- max surge of rolling update + maxSurge: 25% + # manager.compressor.rollingUpdate.maxUnavailable -- max unavailable of rolling update + maxUnavailable: 25% + # @schema {"name": "manager.compressor.initContainers", "alias": "initContainers"} + # manager.compressor.initContainers -- init containers + initContainers: + - type: wait-for + name: wait-for-manager-backup + target: manager-backup + image: busybox + sleepDuration: 2 + # @schema {"name": "manager.compressor.env", "alias": "env"} + # manager.compressor.env -- environment variables + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + # @schema {"name": "manager.compressor.volumeMounts", "alias": "volumeMounts"} + # manager.compressor.volumeMounts -- volume mounts + volumeMounts: [] + # @schema {"name": "manager.compressor.volumes", "alias": "volumes"} + # manager.compressor.volumes -- volumes + volumes: [] + # @schema {"name": "manager.compressor.nodeName", "type": "string"} + # manager.compressor.nodeName -- node name + nodeName: "" + # @schema {"name": "manager.compressor.nodeSelector", "alias": "nodeSelector"} + # manager.compressor.nodeSelector -- node selector + nodeSelector: {} + # @schema {"name": "manager.compressor.tolerations", "alias": "tolerations"} + # manager.compressor.tolerations -- tolerations + tolerations: [] + # @schema {"name": "manager.compressor.affinity", "alias": "affinity"} + affinity: + nodeAffinity: + # manager.compressor.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + requiredDuringSchedulingIgnoredDuringExecution: + # manager.compressor.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors + nodeSelectorTerms: [] + podAffinity: + # manager.compressor.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # manager.compressor.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + podAntiAffinity: + # manager.compressor.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # manager.compressor.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "manager.compressor.topologySpreadConstraints", "alias": "topologySpreadConstraints"} + # manager.compressor.topologySpreadConstraints -- topology spread constraints of compressor pods + topologySpreadConstraints: [] + # @schema {"name": "manager.compressor.server_config", "alias": "server_config"} + # manager.compressor.server_config -- server config (overrides defaults.server_config) + server_config: + servers: + rest: {} + grpc: {} + healths: + liveness: + server: + http: + shutdown_duration: 2m + readiness: {} + metrics: + pprof: {} + prometheus: {} + # @schema {"name": "manager.compressor.observability", "alias": "observability"} + # manager.compressor.observability -- observability config (overrides defaults.observability) + observability: + jaeger: + service_name: vald-manager-compressor + stackdriver: + profiler: + service: vald-manager-compressor + # @schema {"name": "manager.compressor.resources", "alias": "resources"} + # manager.compressor.resources -- compute resources + resources: + requests: + cpu: 300m + memory: 50Mi + limits: + cpu: 800m + memory: 500Mi + # @schema {"name": "manager.compressor.backup", "type": "object"} + backup: + # @schema {"name": "manager.compressor.backup.client", "alias": "grpc.client"} + # manager.compressor.backup.client -- grpc client for backup (overrides defaults.grpc.client) + client: {} + # @schema {"name": "manager.compressor.compress", "type": "object"} + compress: + # @schema {"name": "manager.compressor.compress.compress_algorithm", "type": "string", "enum": ["gob", "gzip", "lz4", "zstd"]} + # manager.compressor.compress.compress_algorithm -- compression algorithm. + # must be `gob`, `gzip`, `lz4` or `zstd` + compress_algorithm: zstd + # @schema {"name": "manager.compressor.compress.compression_level", "type": "integer"} + # manager.compressor.compress.compression_level -- compression level. + # value range relies on which algorithm is used. + # `gob`: level will be ignored. + # `gzip`: -1 (default compression), 0 (no compression), or 1 (best speed) to 9 (best compression). + # `lz4`: >= 0, higher is better compression. + # `zstd`: 1 (fastest) to 22 (best), however implementation relies on klauspost/compress. + compression_level: 3 + # @schema {"name": "manager.compressor.compress.concurrent_limit", "type": "integer"} + # manager.compressor.compress.concurrent_limit -- concurrency limit for compress/decompress processes + concurrent_limit: 10 + # @schema {"name": "manager.compressor.compress.queue_check_duration", "type": "string"} + # manager.compressor.compress.queue_check_duration represents duration of queue daemon block + queue_check_duration: 200ms + # @schema {"name": "manager.compressor.registerer", "type": "object"} + registerer: + # @schema {"name": "manager.compressor.registerer.concurrent_limit", "type": "integer"} + # manager.compressor.registerer.concurrent_limit -- concurrency limit for registering vector processes + concurrent_limit: 10 + # @schema {"name": "manager.compressor.registerer.queue_check_duration", "type": "string"} + # manager.compressor.registerer.queue_check_duration represents duration of queue daemon block + queue_check_duration: 200ms + # @schema {"name": "manager.compressor.registerer.compressor", "type": "object"} + compressor: + # @schema {"name": "manager.compressor.registerer.compressor.client", "alias": "grpc.client"} + # manager.compressor.registerer.compressor.client -- gRPC client for compressor (overrides defaults.grpc.client) + client: {} + + # @schema {"name": "manager.backup", "type": "object"} + backup: + # @schema {"name": "manager.backup.enabled", "type": "boolean"} + # manager.backup.enabled -- backup manager enabled enabled: true - # compressor.hpa.targetCPUUtilizationPercentage -- HPA CPU utilization percentage - targetCPUUtilizationPercentage: 80 - # @schema {"name": "compressor.image", "alias": "image"} - image: - # compressor.image.repository -- image repository - repository: vdaas/vald-manager-compressor - # compressor.image.tag -- image tag (overrides defaults.image.tag) - tag: "" - # compressor.image.pullPolicy -- image pull policy - pullPolicy: Always - # @schema {"name": "compressor.rollingUpdate", "alias": "rollingUpdate"} - rollingUpdate: - # compressor.rollingUpdate.maxSurge -- max surge of rolling update - maxSurge: 25% - # compressor.rollingUpdate.maxUnavailable -- max unavailable of rolling update - maxUnavailable: 25% - # @schema {"name": "compressor.initContainers", "alias": "initContainers"} - # compressor.initContainers -- init containers - initContainers: - - type: wait-for - name: wait-for-manager-backup - target: manager-backup - image: busybox - sleepDuration: 2 - # @schema {"name": "compressor.env", "alias": "env"} - # compressor.env -- environment variables - env: - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - # @schema {"name": "compressor.volumeMounts", "alias": "volumeMounts"} - # compressor.volumeMounts -- volume mounts - volumeMounts: [] - # @schema {"name": "compressor.volumes", "alias": "volumes"} - # compressor.volumes -- volumes - volumes: [] - # @schema {"name": "compressor.nodeName", "type": "string"} - # compressor.nodeName -- node name - nodeName: "" - # @schema {"name": "compressor.nodeSelector", "alias": "nodeSelector"} - # compressor.nodeSelector -- node selector - nodeSelector: {} - # @schema {"name": "compressor.tolerations", "alias": "tolerations"} - # compressor.tolerations -- tolerations - tolerations: [] - # @schema {"name": "compressor.affinity", "alias": "affinity"} - affinity: - nodeAffinity: - # compressor.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms - preferredDuringSchedulingIgnoredDuringExecution: [] - requiredDuringSchedulingIgnoredDuringExecution: - # compressor.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors - nodeSelectorTerms: [] - podAffinity: - # compressor.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms - preferredDuringSchedulingIgnoredDuringExecution: [] - # compressor.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms - requiredDuringSchedulingIgnoredDuringExecution: [] - podAntiAffinity: - # compressor.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms - preferredDuringSchedulingIgnoredDuringExecution: [] - # compressor.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms - requiredDuringSchedulingIgnoredDuringExecution: [] - # @schema {"name": "compressor.topologySpreadConstraints", "alias": "topologySpreadConstraints"} - # compressor.topologySpreadConstraints -- topology spread constraints of compressor pods - topologySpreadConstraints: [] - # @schema {"name": "compressor.server_config", "alias": "server_config"} - # compressor.server_config -- server config (overrides defaults.server_config) - server_config: - servers: - rest: {} - grpc: {} - healths: - liveness: - server: - http: - shutdown_duration: 2m - readiness: {} - metrics: - pprof: {} - prometheus: {} - # @schema {"name": "compressor.observability", "alias": "observability"} - # compressor.observability -- observability config (overrides defaults.observability) - observability: - jaeger: - service_name: vald-manager-compressor - stackdriver: - profiler: - service: vald-manager-compressor - # @schema {"name": "compressor.resources", "alias": "resources"} - # compressor.resources -- compute resources - resources: - requests: - cpu: 300m - memory: 50Mi - limits: - cpu: 800m - memory: 500Mi - # @schema {"name": "compressor.backup", "type": "object"} - backup: - # @schema {"name": "compressor.backup.client", "alias": "grpc.client"} - # compressor.backup.client -- grpc client for backup (overrides defaults.grpc.client) - client: {} - # @schema {"name": "compressor.compress", "type": "object"} - compress: - # @schema {"name": "compressor.compress.compress_algorithm", "type": "string", "enum": ["gob", "gzip", "lz4", "zstd"]} - # compressor.compress.compress_algorithm -- compression algorithm. - # must be `gob`, `gzip`, `lz4` or `zstd` - compress_algorithm: zstd - # @schema {"name": "compressor.compress.compression_level", "type": "integer"} - # compressor.compress.compression_level -- compression level. - # value range relies on which algorithm is used. - # `gob`: level will be ignored. - # `gzip`: -1 (default compression), 0 (no compression), or 1 (best speed) to 9 (best compression). - # `lz4`: >= 0, higher is better compression. - # `zstd`: 1 (fastest) to 22 (best), however implementation relies on klauspost/compress. - compression_level: 3 - # @schema {"name": "compressor.compress.concurrent_limit", "type": "integer"} - # compressor.compress.concurrent_limit -- concurrency limit for compress/decompress processes - concurrent_limit: 10 - # @schema {"name": "compressor.compress.queue_check_duration", "type": "string"} - # compressor.compress.queue_check_duration represents duration of queue daemon block - queue_check_duration: 200ms - # @schema {"name": "compressor.registerer", "type": "object"} - registerer: - # @schema {"name": "compressor.registerer.concurrent_limit", "type": "integer"} - # compressor.registerer.concurrent_limit -- concurrency limit for registering vector processes - concurrent_limit: 10 - # @schema {"name": "compressor.registerer.queue_check_duration", "type": "string"} - # compressor.registerer.queue_check_duration represents duration of queue daemon block - queue_check_duration: 200ms - # @schema {"name": "compressor.registerer.compressor", "type": "object"} - compressor: - # @schema {"name": "compressor.registerer.compressor.client", "alias": "grpc.client"} - # compressor.registerer.compressor.client -- gRPC client for compressor (overrides defaults.grpc.client) - client: {} - -# @schema {"name": "backupManager", "type": "object"} -backupManager: - # @schema {"name": "backupManager.enabled", "type": "boolean"} - # backupManager.enabled -- backup manager enabled - enabled: true - # @schema {"name": "backupManager.version", "alias": "version"} - # backupManager.version -- version of backup manager config - version: v0.0.0 - # @schema {"name": "backupManager.time_zone", "type": "string"} - # backupManager.time_zone -- Time zone - time_zone: "" - # @schema {"name": "backupManager.logging", "alias": "logging"} - # backupManager.logging -- logging config (overrides defaults.logging) - logging: {} - # @schema {"name": "backupManager.name", "type": "string"} - # backupManager.name -- name of backup manager deployment - name: vald-manager-backup - # @schema {"name": "backupManager.kind", "type": "string", "enum": ["Deployment", "DaemonSet"]} - # backupManager.kind -- deployment kind: Deployment or DaemonSet - kind: Deployment - # @schema {"name": "backupManager.serviceType", "type": "string", "enum": ["ClusterIP", "LoadBalancer", "NodePort"]} - # backupManager.serviceType -- service type: ClusterIP, LoadBalancer or NodePort - serviceType: ClusterIP - # @schema {"name": "backupManager.externalTrafficPolicy", "type": "string"} - # backupManager.externalTrafficPolicy -- external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local - externalTrafficPolicy: "" - # @schema {"name": "backupManager.progressDeadlineSeconds", "type": "integer"} - # backupManager.progressDeadlineSeconds -- progress deadline seconds - progressDeadlineSeconds: 600 - # @schema {"name": "backupManager.minReplicas", "type": "integer", "minimum": 0} - # backupManager.minReplicas -- minimum number of replicas. - # if HPA is disabled, the replicas will be set to this value - minReplicas: 3 - # @schema {"name": "backupManager.maxReplicas", "type": "integer", "minimum": 0} - # backupManager.maxReplicas -- maximum number of replicas. - # if HPA is disabled, this value will be ignored. - maxReplicas: 15 - # @schema {"name": "backupManager.maxUnavailable", "type": "string"} - # backupManager.maxUnavailable -- maximum number of unavailable replicas - maxUnavailable: 50% - # @schema {"name": "backupManager.revisionHistoryLimit", "type": "integer", "minimum": 0} - # backupManager.revisionHistoryLimit -- number of old history to retain to allow rollback - revisionHistoryLimit: 2 - # @schema {"name": "backupManager.terminationGracePeriodSeconds", "type": "integer", "minimum": 0} - # backupManager.terminationGracePeriodSeconds -- duration in seconds pod needs to terminate gracefully - terminationGracePeriodSeconds: 30 - # @schema {"name": "backupManager.podPriority", "alias": "podPriority"} - podPriority: - # backupManager.podPriority.enabled -- backup manager pod PriorityClass enabled - enabled: true - # backupManager.podPriority.value -- backup manager pod PriorityClass value - value: 1000000 - # @schema {"name": "backupManager.annotations", "type": "object"} - # backupManager.annotations -- deployment annotations - annotations: {} - # @schema {"name": "backupManager.podAnnotations", "type": "object"} - # backupManager.podAnnotations -- pod annotations - podAnnotations: {} - # @schema {"name": "backupManager.service", "alias": "service"} - service: - # backupManager.service.annotations -- service annotations + # @schema {"name": "manager.backup.version", "alias": "version"} + # manager.backup.version -- version of backup manager config + version: v0.0.0 + # @schema {"name": "manager.backup.time_zone", "type": "string"} + # manager.backup.time_zone -- Time zone + time_zone: "" + # @schema {"name": "manager.backup.logging", "alias": "logging"} + # manager.backup.logging -- logging config (overrides defaults.logging) + logging: {} + # @schema {"name": "manager.backup.name", "type": "string"} + # manager.backup.name -- name of backup manager deployment + name: vald-manager-backup + # @schema {"name": "manager.backup.kind", "type": "string", "enum": ["Deployment", "DaemonSet"]} + # manager.backup.kind -- deployment kind: Deployment or DaemonSet + kind: Deployment + # @schema {"name": "manager.backup.serviceType", "type": "string", "enum": ["ClusterIP", "LoadBalancer", "NodePort"]} + # manager.backup.serviceType -- service type: ClusterIP, LoadBalancer or NodePort + serviceType: ClusterIP + # @schema {"name": "manager.backup.externalTrafficPolicy", "type": "string"} + # manager.backup.externalTrafficPolicy -- external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local + externalTrafficPolicy: "" + # @schema {"name": "manager.backup.progressDeadlineSeconds", "type": "integer"} + # manager.backup.progressDeadlineSeconds -- progress deadline seconds + progressDeadlineSeconds: 600 + # @schema {"name": "manager.backup.minReplicas", "type": "integer", "minimum": 0} + # manager.backup.minReplicas -- minimum number of replicas. + # if HPA is disabled, the replicas will be set to this value + minReplicas: 3 + # @schema {"name": "manager.backup.maxReplicas", "type": "integer", "minimum": 0} + # manager.backup.maxReplicas -- maximum number of replicas. + # if HPA is disabled, this value will be ignored. + maxReplicas: 15 + # @schema {"name": "manager.backup.maxUnavailable", "type": "string"} + # manager.backup.maxUnavailable -- maximum number of unavailable replicas + maxUnavailable: 50% + # @schema {"name": "manager.backup.revisionHistoryLimit", "type": "integer", "minimum": 0} + # manager.backup.revisionHistoryLimit -- number of old history to retain to allow rollback + revisionHistoryLimit: 2 + # @schema {"name": "manager.backup.terminationGracePeriodSeconds", "type": "integer", "minimum": 0} + # manager.backup.terminationGracePeriodSeconds -- duration in seconds pod needs to terminate gracefully + terminationGracePeriodSeconds: 30 + # @schema {"name": "manager.backup.podPriority", "alias": "podPriority"} + podPriority: + # manager.backup.podPriority.enabled -- backup manager pod PriorityClass enabled + enabled: true + # manager.backup.podPriority.value -- backup manager pod PriorityClass value + value: 1000000 + # @schema {"name": "manager.backup.annotations", "type": "object"} + # manager.backup.annotations -- deployment annotations annotations: {} - # backupManager.service.labels -- service labels - labels: {} - # @schema {"name": "backupManager.hpa", "alias": "hpa"} - hpa: - # backupManager.hpa.enabled -- HPA enabled - enabled: true - # backupManager.hpa.targetCPUUtilizationPercentage -- HPA CPU utilization percentage - targetCPUUtilizationPercentage: 80 - # @schema {"name": "backupManager.image", "alias": "image"} - image: - # backupManager.image.repository -- image repository - repository: vdaas/vald-manager-backup-mysql - # repository: vdaas/vald-manager-backup-cassandra - # backupManager.image.tag -- image tag (overrides defaults.image.tag) - tag: "" - # backupManager.image.pullPolicy -- image pull policy - pullPolicy: Always - # @schema {"name": "backupManager.rollingUpdate", "alias": "rollingUpdate"} - rollingUpdate: - # backupManager.rollingUpdate.maxSurge -- max surge of rolling update - maxSurge: 25% - # backupManager.rollingUpdate.maxUnavailable -- max unavailable of rolling update - maxUnavailable: 25% - # @schema {"name": "backupManager.initContainers", "alias": "initContainers"} - # backupManager.initContainers -- init containers - initContainers: - - type: wait-for-mysql - name: wait-for-mysql - image: mysql:latest - mysql: + # @schema {"name": "manager.backup.podAnnotations", "type": "object"} + # manager.backup.podAnnotations -- pod annotations + podAnnotations: {} + # @schema {"name": "manager.backup.service", "alias": "service"} + service: + # manager.backup.service.annotations -- service annotations + annotations: {} + # manager.backup.service.labels -- service labels + labels: {} + # @schema {"name": "manager.backup.hpa", "alias": "hpa"} + hpa: + # manager.backup.hpa.enabled -- HPA enabled + enabled: true + # manager.backup.hpa.targetCPUUtilizationPercentage -- HPA CPU utilization percentage + targetCPUUtilizationPercentage: 80 + # @schema {"name": "manager.backup.image", "alias": "image"} + image: + # manager.backup.image.repository -- image repository + repository: vdaas/vald-manager-backup-mysql + # repository: vdaas/vald-manager-backup-cassandra + # manager.backup.image.tag -- image tag (overrides defaults.image.tag) + tag: "" + # manager.backup.image.pullPolicy -- image pull policy + pullPolicy: Always + # @schema {"name": "manager.backup.rollingUpdate", "alias": "rollingUpdate"} + rollingUpdate: + # manager.backup.rollingUpdate.maxSurge -- max surge of rolling update + maxSurge: 25% + # manager.backup.rollingUpdate.maxUnavailable -- max unavailable of rolling update + maxUnavailable: 25% + # @schema {"name": "manager.backup.initContainers", "alias": "initContainers"} + # manager.backup.initContainers -- init containers + initContainers: + - type: wait-for-mysql + name: wait-for-mysql + image: mysql:latest + mysql: + hosts: + - mysql.default.svc.cluster.local + options: + - "-uroot" + - "-p${MYSQL_PASSWORD}" + sleepDuration: 2 + env: + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: mysql-secret + key: password + # - type: wait-for-cassandra + # name: wait-for-cassandra + # image: cassandra:latest + # cassandra: + # hosts: + # - cassandra-0.cassandra.default.svc.cluster.local + # - cassandra-1.cassandra.default.svc.cluster.local + # - cassandra-2.cassandra.default.svc.cluster.local + # options: + # - "-uroot" + # - "-p${CASSANDRA_PASSWORD}" + # sleepDuration: 2 + # env: + # - name: CASSANDRA_PASSWORD + # valueFrom: + # secretKeyRef: + # name: cassandra-secret + # key: password + # - type: wait-for-cassandra + # name: wait-for-scylla + # image: cassandra:latest + # cassandra: + # hosts: + # - scylla-0.scylla.default.svc.cluster.local + # - scylla-1.scylla.default.svc.cluster.local + # - scylla-2.scylla.default.svc.cluster.local + # sleepDuration: 2 + # @schema {"name": "manager.backup.env", "alias": "env"} + # manager.backup.env -- environment variables + env: + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: mysql-secret + key: password + # - name: CASSANDRA_PASSWORD + # valueFrom: + # secretKeyRef: + # name: cassandra-secret + # key: password + # @schema {"name": "manager.backup.volumeMounts", "alias": "volumeMounts"} + # manager.backup.volumeMounts -- volume mounts + volumeMounts: [] + # @schema {"name": "manager.backup.volumes", "alias": "volumes"} + # manager.backup.volumes -- volumes + volumes: [] + # @schema {"name": "manager.backup.nodeName", "type": "string"} + # manager.backup.nodeName -- node name + nodeName: "" + # @schema {"name": "manager.backup.nodeSelector", "alias": "nodeSelector"} + # manager.backup.nodeSelector -- node selector + nodeSelector: {} + # @schema {"name": "manager.backup.tolerations", "alias": "tolerations"} + # manager.backup.tolerations -- tolerations + tolerations: [] + # @schema {"name": "manager.backup.affinity", "alias": "affinity"} + affinity: + nodeAffinity: + # manager.backup.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + requiredDuringSchedulingIgnoredDuringExecution: + # manager.backup.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors + nodeSelectorTerms: [] + podAffinity: + # manager.backup.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # manager.backup.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + podAntiAffinity: + # manager.backup.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # manager.backup.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "manager.backup.topologySpreadConstraints", "alias": "topologySpreadConstraints"} + # manager.backup.topologySpreadConstraints -- topology spread constraints of backup manager pods + topologySpreadConstraints: [] + # @schema {"name": "manager.backup.server_config", "alias": "server_config"} + # manager.backup.server_config -- server config (overrides defaults.server_config) + server_config: + servers: + rest: {} + grpc: {} + healths: + liveness: {} + readiness: {} + metrics: + pprof: {} + prometheus: {} + # @schema {"name": "manager.backup.observability", "alias": "observability"} + # manager.backup.observability -- observability config (overrides defaults.observability) + observability: + jaeger: + service_name: vald-manager-backup + stackdriver: + profiler: + service: vald-manager-backup + # @schema {"name": "manager.backup.resources", "alias": "resources"} + # manager.backup.resources -- compute resources + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 500m + memory: 150Mi + # @schema {"name": "manager.backup.mysql", "type": "object"} + mysql: + # @schema {"name": "manager.backup.mysql.enabled", "type": "boolean"} + # manager.backup.mysql.enabled -- mysql config enabled + enabled: true + # @schema {"name": "manager.backup.mysql.config", "type": "object"} + config: + # @schema {"name": "manager.backup.mysql.config.db", "type": "string", "enum": ["mysql", "postgres", "sqlite3"]} + # manager.backup.mysql.config.db -- mysql db: mysql, postgres or sqlite3 + db: mysql + # @schema {"name": "manager.backup.mysql.config.host", "type": "string"} + # manager.backup.mysql.config.host -- mysql hostname + host: mysql.default.svc.cluster.local + # @schema {"name": "manager.backup.mysql.config.port", "type": "integer"} + # manager.backup.mysql.config.port -- mysql port + port: 3306 + # @schema {"name": "manager.backup.mysql.config.user", "type": "string"} + # manager.backup.mysql.config.user -- mysql username + user: root + # @schema {"name": "manager.backup.mysql.config.pass", "type": "string"} + # manager.backup.mysql.config.pass -- mysql password + pass: _MYSQL_PASSWORD_ + # @schema {"name": "manager.backup.mysql.config.name", "type": "string"} + # manager.backup.mysql.config.name -- mysql db name + name: vald + # @schema {"name": "manager.backup.mysql.config.conn_max_life_time", "type": "string"} + # manager.backup.mysql.config.conn_max_life_time -- connection maximum life time + conn_max_life_time: 30s + # @schema {"name": "manager.backup.mysql.config.max_open_conns", "type": "integer"} + # manager.backup.mysql.config.max_open_conns -- maximum number of open connections + max_open_conns: 100 + # @schema {"name": "manager.backup.mysql.config.max_idle_conns", "type": "integer"} + # manager.backup.mysql.config.max_idle_conns -- maximum number of idle connections + max_idle_conns: 100 + # @schema {"name": "manager.backup.mysql.config.tls", "alias": "tls"} + tls: + # manager.backup.mysql.config.tls.enabled -- TLS enabled + enabled: false + # manager.backup.mysql.config.tls.cert -- path to TLS cert + cert: /path/to/cert + # manager.backup.mysql.config.tls.key -- path to TLS key + key: /path/to/key + # manager.backup.mysql.config.tls.ca -- path to TLS ca + ca: /path/to/ca + # @schema {"name": "manager.backup.mysql.config.tls", "alias": "tcp"} + tcp: + dns: + # manager.backup.mysql.config.tcp.dns.cache_enabled -- TCP DNS cache enabled + cache_enabled: true + # manager.backup.mysql.config.tcp.dns.refresh_duration -- TCP DNS cache refresh duration + refresh_duration: 1h + # manager.backup.mysql.config.tcp.dns.cache_expiration -- TCP DNS cache expiration + cache_expiration: 24h + dialer: + # manager.backup.mysql.config.tcp.dialer.timeout -- TCP dialer timeout + timeout: 5s + # manager.backup.mysql.config.tcp.dialer.keep_alive -- TCP dialer keep alive + keep_alive: 5m + # manager.backup.mysql.config.tcp.dialer.dual_stack_enabled -- TCP dialer dual stack enabled + dual_stack_enabled: false + tls: + # manager.backup.mysql.config.tcp.tls.enabled -- TCP TLS enabled + enabled: false + # manager.backup.mysql.config.tcp.tls.cert -- path to TCP TLS cert + cert: /path/to/cert + # manager.backup.mysql.config.tcp.tls.key -- path to TCP TLS key + key: /path/to/key + # manager.backup.mysql.config.tcp.tls.ca -- path to TCP TLS ca + ca: /path/to/ca + # @schema {"name": "manager.backup.cassandra", "type": "object", "anchor": "cassandra"} + cassandra: + # @schema {"name": "manager.backup.cassandra.enabled", "type": "boolean"} + # manager.backup.cassandra.enabled -- cassandra config enabled + enabled: false + # @schema {"name": "manager.backup.cassandra.config", "type": "object"} + config: + # @schema {"name": "manager.backup.cassandra.config.hosts", "type": "array", "items": {"type": "string"}} + # manager.backup.cassandra.config.hosts -- cassandra hosts hosts: - - mysql.default.svc.cluster.local - options: - - "-uroot" - - "-p${MYSQL_PASSWORD}" - sleepDuration: 2 - env: - - name: MYSQL_PASSWORD - valueFrom: - secretKeyRef: - name: mysql-secret - key: password - # - type: wait-for-cassandra - # name: wait-for-cassandra - # image: cassandra:latest - # cassandra: - # hosts: - # - cassandra-0.cassandra.default.svc.cluster.local - # - cassandra-1.cassandra.default.svc.cluster.local - # - cassandra-2.cassandra.default.svc.cluster.local - # options: - # - "-uroot" - # - "-p${CASSANDRA_PASSWORD}" - # sleepDuration: 2 - # env: - # - name: CASSANDRA_PASSWORD - # valueFrom: - # secretKeyRef: - # name: cassandra-secret - # key: password - # - type: wait-for-cassandra - # name: wait-for-scylla - # image: cassandra:latest - # cassandra: - # hosts: - # - scylla-0.scylla.default.svc.cluster.local - # - scylla-1.scylla.default.svc.cluster.local - # - scylla-2.scylla.default.svc.cluster.local - # sleepDuration: 2 - # @schema {"name": "backupManager.env", "alias": "env"} - # backupManager.env -- environment variables - env: - - name: MYSQL_PASSWORD - valueFrom: - secretKeyRef: - name: mysql-secret - key: password - # - name: CASSANDRA_PASSWORD - # valueFrom: - # secretKeyRef: - # name: cassandra-secret - # key: password - # @schema {"name": "backupManager.volumeMounts", "alias": "volumeMounts"} - # backupManager.volumeMounts -- volume mounts - volumeMounts: [] - # @schema {"name": "backupManager.volumes", "alias": "volumes"} - # backupManager.volumes -- volumes - volumes: [] - # @schema {"name": "backupManager.nodeName", "type": "string"} - # backupManager.nodeName -- node name - nodeName: "" - # @schema {"name": "backupManager.nodeSelector", "alias": "nodeSelector"} - # backupManager.nodeSelector -- node selector - nodeSelector: {} - # @schema {"name": "backupManager.tolerations", "alias": "tolerations"} - # backupManager.tolerations -- tolerations - tolerations: [] - # @schema {"name": "backupManager.affinity", "alias": "affinity"} - affinity: - nodeAffinity: - # backupManager.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms - preferredDuringSchedulingIgnoredDuringExecution: [] - requiredDuringSchedulingIgnoredDuringExecution: - # backupManager.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors - nodeSelectorTerms: [] - podAffinity: - # backupManager.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms - preferredDuringSchedulingIgnoredDuringExecution: [] - # backupManager.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms - requiredDuringSchedulingIgnoredDuringExecution: [] - podAntiAffinity: - # backupManager.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms - preferredDuringSchedulingIgnoredDuringExecution: [] - # backupManager.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms - requiredDuringSchedulingIgnoredDuringExecution: [] - # @schema {"name": "backupManager.topologySpreadConstraints", "alias": "topologySpreadConstraints"} - # backupManager.topologySpreadConstraints -- topology spread constraints of backup manager pods - topologySpreadConstraints: [] - # @schema {"name": "backupManager.server_config", "alias": "server_config"} - # backupManager.server_config -- server config (overrides defaults.server_config) - server_config: - servers: - rest: {} - grpc: {} - healths: - liveness: {} - readiness: {} - metrics: - pprof: {} - prometheus: {} - # @schema {"name": "backupManager.observability", "alias": "observability"} - # backupManager.observability -- observability config (overrides defaults.observability) - observability: - jaeger: - service_name: vald-manager-backup - stackdriver: - profiler: - service: vald-manager-backup - # @schema {"name": "backupManager.resources", "alias": "resources"} - # backupManager.resources -- compute resources - resources: - requests: - cpu: 100m - memory: 50Mi - limits: - cpu: 500m - memory: 150Mi - # @schema {"name": "backupManager.mysql", "type": "object"} - mysql: - # @schema {"name": "backupManager.mysql.enabled", "type": "boolean"} - # backupManager.mysql.enabled -- mysql config enabled - enabled: true - # @schema {"name": "backupManager.mysql.config", "type": "object"} - config: - # @schema {"name": "backupManager.mysql.config.db", "type": "string", "enum": ["mysql", "postgres", "sqlite3"]} - # backupManager.mysql.config.db -- mysql db: mysql, postgres or sqlite3 - db: mysql - # @schema {"name": "backupManager.mysql.config.host", "type": "string"} - # backupManager.mysql.config.host -- mysql hostname - host: mysql.default.svc.cluster.local - # @schema {"name": "backupManager.mysql.config.port", "type": "integer"} - # backupManager.mysql.config.port -- mysql port - port: 3306 - # @schema {"name": "backupManager.mysql.config.user", "type": "string"} - # backupManager.mysql.config.user -- mysql username - user: root - # @schema {"name": "backupManager.mysql.config.pass", "type": "string"} - # backupManager.mysql.config.pass -- mysql password - pass: _MYSQL_PASSWORD_ - # @schema {"name": "backupManager.mysql.config.name", "type": "string"} - # backupManager.mysql.config.name -- mysql db name - name: vald - # @schema {"name": "backupManager.mysql.config.conn_max_life_time", "type": "string"} - # backupManager.mysql.config.conn_max_life_time -- connection maximum life time - conn_max_life_time: 30s - # @schema {"name": "backupManager.mysql.config.max_open_conns", "type": "integer"} - # backupManager.mysql.config.max_open_conns -- maximum number of open connections - max_open_conns: 100 - # @schema {"name": "backupManager.mysql.config.max_idle_conns", "type": "integer"} - # backupManager.mysql.config.max_idle_conns -- maximum number of idle connections - max_idle_conns: 100 - # @schema {"name": "backupManager.mysql.config.tls", "alias": "tls"} - tls: - # backupManager.mysql.config.tls.enabled -- TLS enabled - enabled: false - # backupManager.mysql.config.tls.cert -- path to TLS cert - cert: /path/to/cert - # backupManager.mysql.config.tls.key -- path to TLS key - key: /path/to/key - # backupManager.mysql.config.tls.ca -- path to TLS ca - ca: /path/to/ca - # @schema {"name": "backupManager.mysql.config.tls", "alias": "tcp"} - tcp: - dns: - # backupManager.mysql.config.tcp.dns.cache_enabled -- TCP DNS cache enabled - cache_enabled: true - # backupManager.mysql.config.tcp.dns.refresh_duration -- TCP DNS cache refresh duration - refresh_duration: 1h - # backupManager.mysql.config.tcp.dns.cache_expiration -- TCP DNS cache expiration - cache_expiration: 24h - dialer: - # backupManager.mysql.config.tcp.dialer.timeout -- TCP dialer timeout - timeout: 5s - # backupManager.mysql.config.tcp.dialer.keep_alive -- TCP dialer keep alive - keep_alive: 5m - # backupManager.mysql.config.tcp.dialer.dual_stack_enabled -- TCP dialer dual stack enabled - dual_stack_enabled: false + - cassandra-0.cassandra.default.svc.cluster.local + - cassandra-1.cassandra.default.svc.cluster.local + - cassandra-2.cassandra.default.svc.cluster.local + # - scylla-0.scylla.default.svc.cluster.local + # - scylla-1.scylla.default.svc.cluster.local + # - scylla-2.scylla.default.svc.cluster.local + # @schema {"name": "manager.backup.cassandra.config.cql_version", "type": "string"} + # manager.backup.cassandra.config.cql_version -- cassandra CQL version + cql_version: 3.0.0 + # @schema {"name": "manager.backup.cassandra.config.proto_version", "type": "integer"} + # manager.backup.cassandra.config.proto_version -- cassandra proto version + proto_version: 0 + # @schema {"name": "manager.backup.cassandra.config.timeout", "type": "string"} + # manager.backup.cassandra.config.timeout -- timeout + timeout: 600ms + # @schema {"name": "manager.backup.cassandra.config.connect_timeout", "type": "string"} + # manager.backup.cassandra.config.connect_timeout -- connect timeout + connect_timeout: 3s + # @schema {"name": "manager.backup.cassandra.config.port", "type": "integer"} + # manager.backup.cassandra.config.port -- cassandra port + port: 9042 + # @schema {"name": "manager.backup.cassandra.config.keyspace", "type": "string"} + # manager.backup.cassandra.config.keyspace -- cassandra keyspace + keyspace: vald + # @schema {"name": "manager.backup.cassandra.config.num_conns", "type": "integer"} + # manager.backup.cassandra.config.num_conns -- number of connections per hosts + num_conns: 2 + # @schema {"name": "manager.backup.cassandra.config.consistency", "type": "string", "enum": ["any", "one", "two", "three", "all", "quorum", "localquorum", "eachquorum", "localone"]} + # manager.backup.cassandra.config.consistency -- consistency type + consistency: quorum + # @schema {"name": "manager.backup.cassandra.config.serial_consistency", "type": "string", "enum": ["localserial", "serial"]} + # manager.backup.cassandra.config.serial_consistency -- read consistency type + serial_consistency: localserial + # @schema {"name": "manager.backup.cassandra.config.username", "type": "string"} + # manager.backup.cassandra.config.username -- cassandra username + username: root + # @schema {"name": "manager.backup.cassandra.config.password", "type": "string"} + # manager.backup.cassandra.config.password -- cassandra password + password: _CASSANDRA_PASSWORD_ + # @schema {"name": "manager.backup.cassandra.config.retry_policy", "type": "object"} + retry_policy: + # @schema {"name": "manager.backup.cassandra.config.retry_policy.num_retries", "type": "integer"} + # manager.backup.cassandra.config.retry_policy.num_retries -- number of retries + num_retries: 3 + # @schema {"name": "manager.backup.cassandra.config.retry_policy.min_duration", "type": "string"} + # manager.backup.cassandra.config.retry_policy.min_duration -- minimum duration to retry + min_duration: 10ms + # @schema {"name": "manager.backup.cassandra.config.retry_policy.max_duration", "type": "string"} + # manager.backup.cassandra.config.retry_policy.max_duration -- maximum duration to retry + max_duration: 1s + # @schema {"name": "manager.backup.cassandra.config.reconnection_policy", "type": "object"} + reconnection_policy: + # @schema {"name": "manager.backup.cassandra.config.reconnection_policy.max_retries", "type": "integer"} + # manager.backup.cassandra.config.reconnection_policy.max_retries -- maximum number of retries to reconnect + max_retries: 3 + # @schema {"name": "manager.backup.cassandra.config.reconnection_policy.initial_interval", "type": "string"} + # manager.backup.cassandra.config.reconnection_policy.initial_interval -- initial interval to reconnect + initial_interval: 100ms + # @schema {"name": "manager.backup.cassandra.config.socket_keepalive", "type": "string"} + # manager.backup.cassandra.config.socket_keepalive -- socket keep alive time + socket_keepalive: 0s + # @schema {"name": "manager.backup.cassandra.config.max_prepared_stmts", "type": "integer"} + # manager.backup.cassandra.config.max_prepared_stmts -- maximum number of prepared statements + max_prepared_stmts: 1000 + # @schema {"name": "manager.backup.cassandra.config.max_routing_key_info", "type": "integer"} + # manager.backup.cassandra.config.max_routing_key_info -- maximum number of routing key info + max_routing_key_info: 1000 + # @schema {"name": "manager.backup.cassandra.config.page_size", "type": "integer"} + # manager.backup.cassandra.config.page_size -- page size + page_size: 5000 + # @schema {"name": "manager.backup.cassandra.config.tls", "alias": "tls"} tls: - # backupManager.mysql.config.tcp.tls.enabled -- TCP TLS enabled + # manager.backup.cassandra.config.tls.enabled -- TLS enabled enabled: false - # backupManager.mysql.config.tcp.tls.cert -- path to TCP TLS cert + # manager.backup.cassandra.config.tls.cert -- path to TLS cert cert: /path/to/cert - # backupManager.mysql.config.tcp.tls.key -- path to TCP TLS key + # manager.backup.cassandra.config.tls.key -- path to TLS key key: /path/to/key - # backupManager.mysql.config.tcp.tls.ca -- path to TCP TLS ca + # manager.backup.cassandra.config.tls.ca -- path to TLS ca ca: /path/to/ca - # @schema {"name": "backupManager.cassandra", "type": "object", "anchor": "cassandra"} - cassandra: - # @schema {"name": "backupManager.cassandra.enabled", "type": "boolean"} - # backupManager.cassandra.enabled -- cassandra config enabled - enabled: false - # @schema {"name": "backupManager.cassandra.config", "type": "object"} - config: - # @schema {"name": "backupManager.cassandra.config.hosts", "type": "array", "items": {"type": "string"}} - # backupManager.cassandra.config.hosts -- cassandra hosts - hosts: - - cassandra-0.cassandra.default.svc.cluster.local - - cassandra-1.cassandra.default.svc.cluster.local - - cassandra-2.cassandra.default.svc.cluster.local - # - scylla-0.scylla.default.svc.cluster.local - # - scylla-1.scylla.default.svc.cluster.local - # - scylla-2.scylla.default.svc.cluster.local - # @schema {"name": "backupManager.cassandra.config.cql_version", "type": "string"} - # backupManager.cassandra.config.cql_version -- cassandra CQL version - cql_version: 3.0.0 - # @schema {"name": "backupManager.cassandra.config.proto_version", "type": "integer"} - # backupManager.cassandra.config.proto_version -- cassandra proto version - proto_version: 0 - # @schema {"name": "backupManager.cassandra.config.timeout", "type": "string"} - # backupManager.cassandra.config.timeout -- timeout - timeout: 600ms - # @schema {"name": "backupManager.cassandra.config.connect_timeout", "type": "string"} - # backupManager.cassandra.config.connect_timeout -- connect timeout - connect_timeout: 3s - # @schema {"name": "backupManager.cassandra.config.port", "type": "integer"} - # backupManager.cassandra.config.port -- cassandra port - port: 9042 - # @schema {"name": "backupManager.cassandra.config.keyspace", "type": "string"} - # backupManager.cassandra.config.keyspace -- cassandra keyspace - keyspace: vald - # @schema {"name": "backupManager.cassandra.config.num_conns", "type": "integer"} - # backupManager.cassandra.config.num_conns -- number of connections per hosts - num_conns: 2 - # @schema {"name": "backupManager.cassandra.config.consistency", "type": "string", "enum": ["any", "one", "two", "three", "all", "quorum", "localquorum", "eachquorum", "localone"]} - # backupManager.cassandra.config.consistency -- consistency type - consistency: quorum - # @schema {"name": "backupManager.cassandra.config.serial_consistency", "type": "string", "enum": ["localserial", "serial"]} - # backupManager.cassandra.config.serial_consistency -- read consistency type - serial_consistency: localserial - # @schema {"name": "backupManager.cassandra.config.username", "type": "string"} - # backupManager.cassandra.config.username -- cassandra username - username: root - # @schema {"name": "backupManager.cassandra.config.password", "type": "string"} - # backupManager.cassandra.config.password -- cassandra password - password: _CASSANDRA_PASSWORD_ - # @schema {"name": "backupManager.cassandra.config.retry_policy", "type": "object"} - retry_policy: - # @schema {"name": "backupManager.cassandra.config.retry_policy.num_retries", "type": "integer"} - # backupManager.cassandra.config.retry_policy.num_retries -- number of retries - num_retries: 3 - # @schema {"name": "backupManager.cassandra.config.retry_policy.min_duration", "type": "string"} - # backupManager.cassandra.config.retry_policy.min_duration -- minimum duration to retry - min_duration: 10ms - # @schema {"name": "backupManager.cassandra.config.retry_policy.max_duration", "type": "string"} - # backupManager.cassandra.config.retry_policy.max_duration -- maximum duration to retry - max_duration: 1s - # @schema {"name": "backupManager.cassandra.config.reconnection_policy", "type": "object"} - reconnection_policy: - # @schema {"name": "backupManager.cassandra.config.reconnection_policy.max_retries", "type": "integer"} - # backupManager.cassandra.config.reconnection_policy.max_retries -- maximum number of retries to reconnect - max_retries: 3 - # @schema {"name": "backupManager.cassandra.config.reconnection_policy.initial_interval", "type": "string"} - # backupManager.cassandra.config.reconnection_policy.initial_interval -- initial interval to reconnect - initial_interval: 100ms - # @schema {"name": "backupManager.cassandra.config.socket_keepalive", "type": "string"} - # backupManager.cassandra.config.socket_keepalive -- socket keep alive time - socket_keepalive: 0s - # @schema {"name": "backupManager.cassandra.config.max_prepared_stmts", "type": "integer"} - # backupManager.cassandra.config.max_prepared_stmts -- maximum number of prepared statements - max_prepared_stmts: 1000 - # @schema {"name": "backupManager.cassandra.config.max_routing_key_info", "type": "integer"} - # backupManager.cassandra.config.max_routing_key_info -- maximum number of routing key info - max_routing_key_info: 1000 - # @schema {"name": "backupManager.cassandra.config.page_size", "type": "integer"} - # backupManager.cassandra.config.page_size -- page size - page_size: 5000 - # @schema {"name": "backupManager.cassandra.config.tls", "alias": "tls"} - tls: - # backupManager.cassandra.config.tls.enabled -- TLS enabled - enabled: false - # backupManager.cassandra.config.tls.cert -- path to TLS cert - cert: /path/to/cert - # backupManager.cassandra.config.tls.key -- path to TLS key - key: /path/to/key - # backupManager.cassandra.config.tls.ca -- path to TLS ca - ca: /path/to/ca - # @schema {"name": "backupManager.cassandra.config.tcp", "alias": "tcp"} - tcp: - dns: - # backupManager.cassandra.config.tcp.dns.cache_enabled -- TCP DNS cache enabled - cache_enabled: true - # backupManager.cassandra.config.tcp.dns.refresh_duration -- TCP DNS cache refresh duration - refresh_duration: 5m - # backupManager.cassandra.config.tcp.dns.cache_expiration -- TCP DNS cache expiration - cache_expiration: 24h - dialer: - # backupManager.cassandra.config.tcp.dialer.timeout -- TCP dialer timeout - timeout: 30s - # backupManager.cassandra.config.tcp.dialer.keep_alive -- TCP dialer keep alive - keep_alive: 10m - # backupManager.cassandra.config.tcp.dialer.dual_stack_enabled -- TCP dialer dual stack enabled - dual_stack_enabled: false - # @schema {"name": "backupManager.cassandra.config.enable_host_verification", "type": "boolean"} - # backupManager.cassandra.config.enable_host_verification -- host verification enabled - enable_host_verification: false - # @schema {"name": "backupManager.cassandra.config.default_timestamp", "type": "boolean"} - # backupManager.cassandra.config.default_timestamp -- default timestamp enabled - default_timestamp: true - # @schema {"name": "backupManager.cassandra.config.reconnect_interval", "type": "string"} - # backupManager.cassandra.config.reconnect_interval -- interval of reconnection - reconnect_interval: 100ms - # @schema {"name": "backupManager.cassandra.config.max_wait_schema_agreement", "type": "string"} - # backupManager.cassandra.config.max_wait_schema_agreement -- maximum duration to wait for schema agreement - max_wait_schema_agreement: 1m - # @schema {"name": "backupManager.cassandra.config.ignore_peer_addr", "type": "boolean"} - # backupManager.cassandra.config.ignore_peer_addr -- ignore peer addresses - ignore_peer_addr: false - # @schema {"name": "backupManager.cassandra.config.disable_initial_host_lookup", "type": "boolean"} - # backupManager.cassandra.config.disable_initial_host_lookup -- initial host lookup disabled - disable_initial_host_lookup: false - # @schema {"name": "backupManager.cassandra.config.disable_node_status_events", "type": "boolean"} - # backupManager.cassandra.config.disable_node_status_events -- node status events disabled - disable_node_status_events: false - # @schema {"name": "backupManager.cassandra.config.disable_topology_events", "type": "boolean"} - # backupManager.cassandra.config.disable_topology_events -- topology events disabled - disable_topology_events: false - # @schema {"name": "backupManager.cassandra.config.disable_skip_metadata", "type": "boolean"} - # backupManager.cassandra.config.disable_skip_metadata -- skip metadata disabled - disable_skip_metadata: false - # @schema {"name": "backupManager.cassandra.config.default_idempotence", "type": "boolean"} - # backupManager.cassandra.config.default_idempotence -- default idempotence enabled - default_idempotence: false - # @schema {"name": "backupManager.cassandra.config.write_coalesce_wait_time", "type": "string"} - # backupManager.cassandra.config.write_coalesce_wait_time -- write coalesce wait time - write_coalesce_wait_time: 200µs - # @schema {"name": "backupManager.cassandra.config.meta_table", "type": "string"} - # backupManager.cassandra.config.meta_table -- table name of backup - meta_table: meta_vector - # @schema {"name": "backupManager.cassandra.config.pool_config", "type": "object"} - pool_config: - # @schema {"name": "backupManager.cassandra.config.pool_config.data_center", "type": "string"} - # backupManager.cassandra.config.pool_config.data_center -- name of data center - data_center: "" - # @schema {"name": "backupManager.cassandra.config.pool_config.dc_aware_routing", "type": "boolean"} - # backupManager.cassandra.config.pool_config.dc_aware_routing -- data center aware routine enabled - dc_aware_routing: false - # @schema {"name": "backupManager.cassandra.config.pool_config.non_local_replicas_fallback", "type": "boolean"} - # backupManager.cassandra.config.pool_config.non_local_replicas_fallback -- non-local replica fallback enabled - non_local_replicas_fallback: false - # @schema {"name": "backupManager.cassandra.config.pool_config.shuffle_replicas", "type": "boolean"} - # backupManager.cassandra.config.pool_config.shuffle_replicas -- shuffle replica enabled - shuffle_replicas: false - # @schema {"name": "backupManager.cassandra.config.pool_config.token_aware_host_policy", "type": "boolean"} - # backupManager.cassandra.config.pool_config.token_aware_host_policy -- token aware host policy enabled - token_aware_host_policy: false - # @schema {"name": "backupManager.cassandra.config.host_filter", "type": "object"} - host_filter: - # @schema {"name": "backupManager.cassandra.config.host_filter.enabled", "type": "boolean"} - # backupManager.cassandra.config.host_filter.enabled -- enables host filtering - enabled: false - # @schema {"name": "backupManager.cassandra.config.host_filter.data_center", "type": "string"} - # backupManager.cassandra.config.host_filter.data_center -- name of data center of filtering target - data_center: "" - # @schema {"name": "backupManager.cassandra.config.host_filter.white_list", "type": "array", "items": {"type": "string"}} - # backupManager.cassandra.config.host_filter.white_list -- list of white_list which allows each connection - white_list: [] + # @schema {"name": "manager.backup.cassandra.config.tcp", "alias": "tcp"} + tcp: + dns: + # manager.backup.cassandra.config.tcp.dns.cache_enabled -- TCP DNS cache enabled + cache_enabled: true + # manager.backup.cassandra.config.tcp.dns.refresh_duration -- TCP DNS cache refresh duration + refresh_duration: 5m + # manager.backup.cassandra.config.tcp.dns.cache_expiration -- TCP DNS cache expiration + cache_expiration: 24h + dialer: + # manager.backup.cassandra.config.tcp.dialer.timeout -- TCP dialer timeout + timeout: 30s + # manager.backup.cassandra.config.tcp.dialer.keep_alive -- TCP dialer keep alive + keep_alive: 10m + # manager.backup.cassandra.config.tcp.dialer.dual_stack_enabled -- TCP dialer dual stack enabled + dual_stack_enabled: false + # @schema {"name": "manager.backup.cassandra.config.enable_host_verification", "type": "boolean"} + # manager.backup.cassandra.config.enable_host_verification -- host verification enabled + enable_host_verification: false + # @schema {"name": "manager.backup.cassandra.config.default_timestamp", "type": "boolean"} + # manager.backup.cassandra.config.default_timestamp -- default timestamp enabled + default_timestamp: true + # @schema {"name": "manager.backup.cassandra.config.reconnect_interval", "type": "string"} + # manager.backup.cassandra.config.reconnect_interval -- interval of reconnection + reconnect_interval: 100ms + # @schema {"name": "manager.backup.cassandra.config.max_wait_schema_agreement", "type": "string"} + # manager.backup.cassandra.config.max_wait_schema_agreement -- maximum duration to wait for schema agreement + max_wait_schema_agreement: 1m + # @schema {"name": "manager.backup.cassandra.config.ignore_peer_addr", "type": "boolean"} + # manager.backup.cassandra.config.ignore_peer_addr -- ignore peer addresses + ignore_peer_addr: false + # @schema {"name": "manager.backup.cassandra.config.disable_initial_host_lookup", "type": "boolean"} + # manager.backup.cassandra.config.disable_initial_host_lookup -- initial host lookup disabled + disable_initial_host_lookup: false + # @schema {"name": "manager.backup.cassandra.config.disable_node_status_events", "type": "boolean"} + # manager.backup.cassandra.config.disable_node_status_events -- node status events disabled + disable_node_status_events: false + # @schema {"name": "manager.backup.cassandra.config.disable_topology_events", "type": "boolean"} + # manager.backup.cassandra.config.disable_topology_events -- topology events disabled + disable_topology_events: false + # @schema {"name": "manager.backup.cassandra.config.disable_skip_metadata", "type": "boolean"} + # manager.backup.cassandra.config.disable_skip_metadata -- skip metadata disabled + disable_skip_metadata: false + # @schema {"name": "manager.backup.cassandra.config.default_idempotence", "type": "boolean"} + # manager.backup.cassandra.config.default_idempotence -- default idempotence enabled + default_idempotence: false + # @schema {"name": "manager.backup.cassandra.config.write_coalesce_wait_time", "type": "string"} + # manager.backup.cassandra.config.write_coalesce_wait_time -- write coalesce wait time + write_coalesce_wait_time: 200µs + # @schema {"name": "manager.backup.cassandra.config.vector_backup_table", "type": "string"} + # manager.backup.cassandra.config.vector_backup_table -- table name of backup + vector_backup_table: backup_vector + # @schema {"name": "manager.backup.cassandra.config.pool_config", "type": "object"} + pool_config: + # @schema {"name": "manager.backup.cassandra.config.pool_config.data_center", "type": "string"} + # manager.backup.cassandra.config.pool_config.data_center -- name of data center + data_center: "" + # @schema {"name": "manager.backup.cassandra.config.pool_config.dc_aware_routing", "type": "boolean"} + # manager.backup.cassandra.config.pool_config.dc_aware_routing -- data center aware routine enabled + dc_aware_routing: false + # @schema {"name": "manager.backup.cassandra.config.pool_config.non_local_replicas_fallback", "type": "boolean"} + # manager.backup.cassandra.config.pool_config.non_local_replicas_fallback -- non-local replica fallback enabled + non_local_replicas_fallback: false + # @schema {"name": "manager.backup.cassandra.config.pool_config.shuffle_replicas", "type": "boolean"} + # manager.backup.cassandra.config.pool_config.shuffle_replicas -- shuffle replica enabled + shuffle_replicas: false + # @schema {"name": "manager.backup.cassandra.config.pool_config.token_aware_host_policy", "type": "boolean"} + # manager.backup.cassandra.config.pool_config.token_aware_host_policy -- token aware host policy enabled + token_aware_host_policy: false + # @schema {"name": "manager.backup.cassandra.config.host_filter", "type": "object"} + host_filter: + # @schema {"name": "manager.backup.cassandra.config.host_filter.enabled", "type": "boolean"} + # manager.backup.cassandra.config.host_filter.enabled -- enables host filtering + enabled: false + # @schema {"name": "manager.backup.cassandra.config.host_filter.data_center", "type": "string"} + # manager.backup.cassandra.config.host_filter.data_center -- name of data center of filtering target + data_center: "" + # @schema {"name": "manager.backup.cassandra.config.host_filter.white_list", "type": "array", "items": {"type": "string"}} + # manager.backup.cassandra.config.host_filter.white_list -- list of white_list which allows each connection + white_list: [] -# @schema {"name": "indexManager", "type": "object"} -indexManager: - # @schema {"name": "indexManager.enabled", "type": "boolean"} - # indexManager.enabled -- index manager enabled - enabled: true - # @schema {"name": "indexManager.version", "alias": "version"} - # indexManager.version -- version of index manager config - version: v0.0.0 - # @schema {"name": "indexManager.time_zone", "type": "string"} - # indexManager.time_zone -- Time zone - time_zone: "" - # @schema {"name": "indexManager.logging", "alias": "logging"} - # indexManager.logging -- logging config (overrides defaults.logging) - logging: {} - # @schema {"name": "indexManager.name", "type": "string"} - # indexManager.name -- name of index manager deployment - name: vald-manager-index - # @schema {"name": "indexManager.kind", "type": "string", "enum": ["Deployment", "DaemonSet"]} - # indexManager.kind -- deployment kind: Deployment or DaemonSet - kind: Deployment - # @schema {"name": "indexManager.serviceType", "type": "string", "enum": ["ClusterIP", "LoadBalancer", "NodePort"]} - # indexManager.serviceType -- service type: ClusterIP, LoadBalancer or NodePort - serviceType: ClusterIP - # @schema {"name": "indexManager.externalTrafficPolicy", "type": "string"} - # indexManager.externalTrafficPolicy -- external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local - externalTrafficPolicy: "" - # @schema {"name": "indexManager.progressDeadlineSeconds", "type": "integer"} - # indexManager.progressDeadlineSeconds -- progress deadline seconds - progressDeadlineSeconds: 600 - # @schema {"name": "indexManager.replicas", "type": "integer", "minimum": 0} - # indexManager.replicas -- number of replicas - replicas: 1 - # @schema {"name": "indexManager.maxUnavailable", "type": "string"} - # indexManager.maxUnavailable -- maximum number of unavailable replicas - maxUnavailable: 50% - # @schema {"name": "indexManager.revisionHistoryLimit", "type": "integer", "minimum": 0} - # indexManager.revisionHistoryLimit -- number of old history to retain to allow rollback - revisionHistoryLimit: 2 - # @schema {"name": "indexManager.terminationGracePeriodSeconds", "type": "integer", "minimum": 0} - # indexManager.terminationGracePeriodSeconds -- duration in seconds pod needs to terminate gracefully - terminationGracePeriodSeconds: 30 - # @schema {"name": "indexManager.podPriority", "alias": "podPriority"} - podPriority: - # indexManager.podPriority.enabled -- index manager pod PriorityClass enabled + # @schema {"name": "manager.index", "type": "object"} + index: + # @schema {"name": "manager.index.enabled", "type": "boolean"} + # manager.index.enabled -- index manager enabled enabled: true - # indexManager.podPriority.value -- index manager pod PriorityClass value - value: 1000000 - # @schema {"name": "indexManager.annotations", "type": "object"} - # indexManager.annotations -- deployment annotations - annotations: {} - # @schema {"name": "indexManager.podAnnotations", "type": "object"} - # indexManager.podAnnotations -- pod annotations - podAnnotations: {} - # @schema {"name": "indexManager.service", "alias": "service"} - service: - # indexManager.service.annotations -- service annotations + # @schema {"name": "manager.index.version", "alias": "version"} + # manager.index.version -- version of index manager config + version: v0.0.0 + # @schema {"name": "manager.index.time_zone", "type": "string"} + # manager.index.time_zone -- Time zone + time_zone: "" + # @schema {"name": "manager.index.logging", "alias": "logging"} + # manager.index.logging -- logging config (overrides defaults.logging) + logging: {} + # @schema {"name": "manager.index.name", "type": "string"} + # manager.index.name -- name of index manager deployment + name: vald-manager-index + # @schema {"name": "manager.index.kind", "type": "string", "enum": ["Deployment", "DaemonSet"]} + # manager.index.kind -- deployment kind: Deployment or DaemonSet + kind: Deployment + # @schema {"name": "manager.index.serviceType", "type": "string", "enum": ["ClusterIP", "LoadBalancer", "NodePort"]} + # manager.index.serviceType -- service type: ClusterIP, LoadBalancer or NodePort + serviceType: ClusterIP + # @schema {"name": "manager.index.externalTrafficPolicy", "type": "string"} + # manager.index.externalTrafficPolicy -- external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local + externalTrafficPolicy: "" + # @schema {"name": "manager.index.progressDeadlineSeconds", "type": "integer"} + # manager.index.progressDeadlineSeconds -- progress deadline seconds + progressDeadlineSeconds: 600 + # @schema {"name": "manager.index.replicas", "type": "integer", "minimum": 0} + # manager.index.replicas -- number of replicas + replicas: 1 + # @schema {"name": "manager.index.maxUnavailable", "type": "string"} + # manager.index.maxUnavailable -- maximum number of unavailable replicas + maxUnavailable: 50% + # @schema {"name": "manager.index.revisionHistoryLimit", "type": "integer", "minimum": 0} + # manager.index.revisionHistoryLimit -- number of old history to retain to allow rollback + revisionHistoryLimit: 2 + # @schema {"name": "manager.index.terminationGracePeriodSeconds", "type": "integer", "minimum": 0} + # manager.index.terminationGracePeriodSeconds -- duration in seconds pod needs to terminate gracefully + terminationGracePeriodSeconds: 30 + # @schema {"name": "manager.index.podPriority", "alias": "podPriority"} + podPriority: + # manager.index.podPriority.enabled -- index manager pod PriorityClass enabled + enabled: true + # manager.index.podPriority.value -- index manager pod PriorityClass value + value: 1000000 + # @schema {"name": "manager.index.annotations", "type": "object"} + # manager.index.annotations -- deployment annotations annotations: {} - # indexManager.service.labels -- service labels - labels: {} - # @schema {"name": "indexManager.image", "alias": "image"} - image: - # indexManager.image.repository -- image repository - repository: vdaas/vald-manager-index - # indexManager.image.tag -- image tag (overrides defaults.image.tag) - tag: "" - # indexManager.image.pullPolicy -- image pull policy - pullPolicy: Always - # @schema {"name": "indexManager.rollingUpdate", "alias": "rollingUpdate"} - rollingUpdate: - # indexManager.rollingUpdate.maxSurge -- max surge of rolling update - maxSurge: 25% - # indexManager.rollingUpdate.maxUnavailable -- max unavailable of rolling update - maxUnavailable: 25% - # @schema {"name": "indexManager.initContainers", "alias": "initContainers"} - # indexManager.initContainers -- init containers - initContainers: - - type: wait-for - name: wait-for-agent - target: agent - image: busybox - sleepDuration: 2 - - type: wait-for - name: wait-for-discoverer - target: discoverer - image: busybox - sleepDuration: 2 - # @schema {"name": "indexManager.env", "alias": "env"} - # indexManager.env -- environment variables - env: - # - name: MY_NODE_NAME - # valueFrom: - # fieldRef: - # fieldPath: spec.nodeName - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - # @schema {"name": "indexManager.volumeMounts", "alias": "volumeMounts"} - # indexManager.volumeMounts -- volume mounts - volumeMounts: [] - # @schema {"name": "indexManager.volumes", "alias": "volumes"} - # indexManager.volumes -- volumes - volumes: [] - # @schema {"name": "indexManager.nodeName", "type": "string"} - # indexManager.nodeName -- node name - nodeName: "" - # @schema {"name": "indexManager.nodeSelector", "alias": "nodeSelector"} - # indexManager.nodeSelector -- node selector - nodeSelector: {} - # @schema {"name": "indexManager.tolerations", "alias": "tolerations"} - # indexManager.tolerations -- tolerations - tolerations: [] - # @schema {"name": "indexManager.affinity", "alias": "affinity"} - affinity: - nodeAffinity: - # indexManager.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms - preferredDuringSchedulingIgnoredDuringExecution: [] - requiredDuringSchedulingIgnoredDuringExecution: - # indexManager.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors - nodeSelectorTerms: [] - podAffinity: - # indexManager.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms - preferredDuringSchedulingIgnoredDuringExecution: [] - # indexManager.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms - requiredDuringSchedulingIgnoredDuringExecution: [] - podAntiAffinity: - # indexManager.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms - preferredDuringSchedulingIgnoredDuringExecution: [] - # indexManager.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms - requiredDuringSchedulingIgnoredDuringExecution: [] - # @schema {"name": "indexManager.topologySpreadConstraints", "alias": "topologySpreadConstraints"} - # indexManager.topologySpreadConstraints -- topology spread constraints of index manager pods - topologySpreadConstraints: [] - # @schema {"name": "indexManager.server_config", "alias": "server_config"} - # indexManager.server_config -- server config (overrides defaults.server_config) - server_config: - servers: - rest: {} - grpc: {} - healths: - liveness: {} - readiness: {} - metrics: - pprof: {} - prometheus: {} - # @schema {"name": "indexManager.observability", "alias": "observability"} - # indexManager.observability -- observability config (overrides defaults.observability) - observability: - jaeger: - service_name: vald-manager-index - stackdriver: - profiler: - service: vald-manager-index - # @schema {"name": "indexManager.resources", "alias": "resources"} - # indexManager.resources -- compute resources - resources: - requests: - cpu: 200m - memory: 80Mi - limits: - cpu: 1000m - memory: 500Mi - # @schema {"name": "indexManager.indexer", "type": "object"} - indexer: - # @schema {"name": "indexManager.indexer.agent_namespace", "type": "string"} - # indexManager.indexer.agent_namespace -- namespace of agent pods to manage - agent_namespace: _MY_POD_NAMESPACE_ - # @schema {"name": "indexManager.indexer.node_name", "type": "string"} - # indexManager.indexer.node_name -- node name - node_name: "" # _MY_NODE_NAME_ - # @schema {"name": "indexManager.indexer.concurrency", "type": "integer", "minimum": 1} - # indexManager.indexer.concurrency -- concurrency - concurrency: 1 - # @schema {"name": "indexManager.indexer.auto_index_duration_limit", "type": "string"} - # indexManager.indexer.auto_index_duration_limit -- limit duration of automatic indexing - auto_index_duration_limit: 30m - # @schema {"name": "indexManager.indexer.auto_index_check_duration", "type": "string"} - # indexManager.indexer.auto_index_check_duration -- check duration of automatic indexing - auto_index_check_duration: 1m - # @schema {"name": "indexManager.indexer.auto_index_length", "type": "integer"} - # indexManager.indexer.auto_index_length -- number of cache to trigger automatic indexing - auto_index_length: 100 - # @schema {"name": "indexManager.indexer.creation_pool_size", "type": "integer"} - # indexManager.indexer.creation_pool_size -- number of pool size of create index processing - creation_pool_size: 10000 - # @schema {"name": "indexManager.indexer.discoverer", "type": "object"} - discoverer: - # @schema {"name": "indexManager.indexer.discoverer.duration", "type": "string"} - # indexManager.indexer.discoverer.duration -- refresh duration to discover - duration: 500ms - # @schema {"name": "indexManager.indexer.discoverer.discover_client", "alias": "grpc.client"} - # indexManager.indexer.discoverer.discover_client -- gRPC client for discoverer (overrides defaults.grpc.client) - discover_client: {} - # @schema {"name": "indexManager.indexer.discoverer.agent_client", "alias": "grpc.client"} - # indexManager.indexer.discoverer.agent_client -- gRPC client for agents (overrides defaults.grpc.client) - agent_client: - dial_option: - tcp: - dialer: - keep_alive: 15m + # @schema {"name": "manager.index.podAnnotations", "type": "object"} + # manager.index.podAnnotations -- pod annotations + podAnnotations: {} + # @schema {"name": "manager.index.service", "alias": "service"} + service: + # manager.index.service.annotations -- service annotations + annotations: {} + # manager.index.service.labels -- service labels + labels: {} + # @schema {"name": "manager.index.image", "alias": "image"} + image: + # manager.index.image.repository -- image repository + repository: vdaas/vald-manager-index + # manager.index.image.tag -- image tag (overrides defaults.image.tag) + tag: "" + # manager.index.image.pullPolicy -- image pull policy + pullPolicy: Always + # @schema {"name": "manager.index.rollingUpdate", "alias": "rollingUpdate"} + rollingUpdate: + # manager.index.rollingUpdate.maxSurge -- max surge of rolling update + maxSurge: 25% + # manager.index.rollingUpdate.maxUnavailable -- max unavailable of rolling update + maxUnavailable: 25% + # @schema {"name": "manager.index.initContainers", "alias": "initContainers"} + # manager.index.initContainers -- init containers + initContainers: + - type: wait-for + name: wait-for-agent + target: agent + image: busybox + sleepDuration: 2 + - type: wait-for + name: wait-for-discoverer + target: discoverer + image: busybox + sleepDuration: 2 + # @schema {"name": "manager.index.env", "alias": "env"} + # manager.index.env -- environment variables + env: + # - name: MY_NODE_NAME + # valueFrom: + # fieldRef: + # fieldPath: spec.nodeName + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # @schema {"name": "manager.index.volumeMounts", "alias": "volumeMounts"} + # manager.index.volumeMounts -- volume mounts + volumeMounts: [] + # @schema {"name": "manager.index.volumes", "alias": "volumes"} + # manager.index.volumes -- volumes + volumes: [] + # @schema {"name": "manager.index.nodeName", "type": "string"} + # manager.index.nodeName -- node name + nodeName: "" + # @schema {"name": "manager.index.nodeSelector", "alias": "nodeSelector"} + # manager.index.nodeSelector -- node selector + nodeSelector: {} + # @schema {"name": "manager.index.tolerations", "alias": "tolerations"} + # manager.index.tolerations -- tolerations + tolerations: [] + # @schema {"name": "manager.index.affinity", "alias": "affinity"} + affinity: + nodeAffinity: + # manager.index.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + requiredDuringSchedulingIgnoredDuringExecution: + # manager.index.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors + nodeSelectorTerms: [] + podAffinity: + # manager.index.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # manager.index.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + podAntiAffinity: + # manager.index.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # manager.index.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + # @schema {"name": "manager.index.topologySpreadConstraints", "alias": "topologySpreadConstraints"} + # manager.index.topologySpreadConstraints -- topology spread constraints of index manager pods + topologySpreadConstraints: [] + # @schema {"name": "manager.index.server_config", "alias": "server_config"} + # manager.index.server_config -- server config (overrides defaults.server_config) + server_config: + servers: + rest: {} + grpc: {} + healths: + liveness: {} + readiness: {} + metrics: + pprof: {} + prometheus: {} + # @schema {"name": "manager.index.observability", "alias": "observability"} + # manager.index.observability -- observability config (overrides defaults.observability) + observability: + jaeger: + service_name: vald-manager-index + stackdriver: + profiler: + service: vald-manager-index + # @schema {"name": "manager.index.resources", "alias": "resources"} + # manager.index.resources -- compute resources + resources: + requests: + cpu: 200m + memory: 80Mi + limits: + cpu: 1000m + memory: 500Mi + # @schema {"name": "manager.index.indexer", "type": "object"} + indexer: + # @schema {"name": "manager.index.indexer.agent_namespace", "type": "string"} + # manager.index.indexer.agent_namespace -- namespace of agent pods to manage + agent_namespace: _MY_POD_NAMESPACE_ + # @schema {"name": "manager.index.indexer.node_name", "type": "string"} + # manager.index.indexer.node_name -- node name + node_name: "" # _MY_NODE_NAME_ + # @schema {"name": "manager.index.indexer.concurrency", "type": "integer", "minimum": 1} + # manager.index.indexer.concurrency -- concurrency + concurrency: 1 + # @schema {"name": "manager.index.indexer.auto_index_duration_limit", "type": "string"} + # manager.index.indexer.auto_index_duration_limit -- limit duration of automatic indexing + auto_index_duration_limit: 30m + # @schema {"name": "manager.index.indexer.auto_index_check_duration", "type": "string"} + # manager.index.indexer.auto_index_check_duration -- check duration of automatic indexing + auto_index_check_duration: 1m + # @schema {"name": "manager.index.indexer.auto_index_length", "type": "integer"} + # manager.index.indexer.auto_index_length -- number of cache to trigger automatic indexing + auto_index_length: 100 + # @schema {"name": "manager.index.indexer.creation_pool_size", "type": "integer"} + # manager.index.indexer.creation_pool_size -- number of pool size of create index processing + creation_pool_size: 10000 + # @schema {"name": "manager.index.indexer.discoverer", "type": "object"} + discoverer: + # @schema {"name": "manager.index.indexer.discoverer.duration", "type": "string"} + # manager.index.indexer.discoverer.duration -- refresh duration to discover + duration: 500ms + # @schema {"name": "manager.index.indexer.discoverer.discover_client", "alias": "grpc.client"} + # manager.index.indexer.discoverer.discover_client -- gRPC client for discoverer (overrides defaults.grpc.client) + discover_client: {} + # @schema {"name": "manager.index.indexer.discoverer.agent_client", "alias": "grpc.client"} + # manager.index.indexer.discoverer.agent_client -- gRPC client for agents (overrides defaults.grpc.client) + agent_client: + dial_option: + tcp: + dialer: + keep_alive: 15m # @schema {"name": "meta", "type": "object"} meta: @@ -3071,8 +4004,8 @@ meta: default_idempotence: false # meta.cassandra.config.write_coalesce_wait_time -- write coalesce wait time write_coalesce_wait_time: 200µs - # meta.cassandra.config.meta_table -- table name of backup - meta_table: meta_vector + # meta.cassandra.config.vector_backup_table -- table name of backup + vector_backup_table: backup_vector pool_config: # meta.cassandra.config.pool_config.data_center -- name of data center data_center: "" @@ -3216,7 +4149,7 @@ initializer: # initializer.cassandra.configmap.backup.enabled -- backup table enabled enabled: true # initializer.cassandra.configmap.backup.name -- name of backup table - name: meta_vector + name: backup_vector secret: # initializer.cassandra.secret.enabled -- cassandra secret will be created enabled: false diff --git a/cmd/agent/core/ngt/main.go b/cmd/agent/core/ngt/main.go index 9081670efc..98cfcacd03 100644 --- a/cmd/agent/core/ngt/main.go +++ b/cmd/agent/core/ngt/main.go @@ -20,6 +20,7 @@ package main import ( "context" + "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/runner" @@ -43,7 +44,7 @@ func main() { runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { cfg, err := config.NewConfig(path) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") } return cfg, &cfg.GlobalConfig, nil }), diff --git a/cmd/agent/core/ngt/main_test.go b/cmd/agent/core/ngt/main_test.go index 77175e6295..0fd50d7b39 100644 --- a/cmd/agent/core/ngt/main_test.go +++ b/cmd/agent/core/ngt/main_test.go @@ -24,6 +24,7 @@ import ( ) func Test_main(t *testing.T) { + t.Parallel() type want struct { } type test struct { @@ -58,9 +59,11 @@ func Test_main(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } diff --git a/cmd/agent/sidecar/main.go b/cmd/agent/sidecar/main.go index 69598ab141..9d1b060571 100644 --- a/cmd/agent/sidecar/main.go +++ b/cmd/agent/sidecar/main.go @@ -20,6 +20,7 @@ package main import ( "context" + "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/runner" @@ -43,7 +44,7 @@ func main() { runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { cfg, err := config.NewConfig(path) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") } return cfg, &cfg.GlobalConfig, nil }), diff --git a/cmd/agent/sidecar/main_test.go b/cmd/agent/sidecar/main_test.go index 77175e6295..0fd50d7b39 100644 --- a/cmd/agent/sidecar/main_test.go +++ b/cmd/agent/sidecar/main_test.go @@ -24,6 +24,7 @@ import ( ) func Test_main(t *testing.T) { + t.Parallel() type want struct { } type test struct { @@ -58,9 +59,11 @@ func Test_main(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } diff --git a/cmd/discoverer/k8s/main.go b/cmd/discoverer/k8s/main.go index 404c9c4e3c..b167188d81 100644 --- a/cmd/discoverer/k8s/main.go +++ b/cmd/discoverer/k8s/main.go @@ -20,6 +20,7 @@ package main import ( "context" + "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/runner" @@ -43,7 +44,7 @@ func main() { runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { cfg, err := config.NewConfig(path) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") } return cfg, &cfg.GlobalConfig, nil }), diff --git a/cmd/discoverer/k8s/main_test.go b/cmd/discoverer/k8s/main_test.go index 77175e6295..0fd50d7b39 100644 --- a/cmd/discoverer/k8s/main_test.go +++ b/cmd/discoverer/k8s/main_test.go @@ -24,6 +24,7 @@ import ( ) func Test_main(t *testing.T) { + t.Parallel() type want struct { } type test struct { @@ -58,9 +59,11 @@ func Test_main(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } diff --git a/cmd/gateway/backup/main.go b/cmd/gateway/backup/main.go new file mode 100644 index 0000000000..51698ff09a --- /dev/null +++ b/cmd/gateway/backup/main.go @@ -0,0 +1,59 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package main provides program main +package main + +import ( + "context" + + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/info" + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/runner" + "github.com/vdaas/vald/internal/safety" + "github.com/vdaas/vald/pkg/gateway/backup/config" + "github.com/vdaas/vald/pkg/gateway/backup/usecase" +) + +const ( + maxVersion = "v0.0.10" + minVersion = "v0.0.0" + name = "gateway backup" +) + +func main() { + if err := safety.RecoverFunc(func() error { + return runner.Do( + context.Background(), + runner.WithName(name), + runner.WithVersion(info.Version, maxVersion, minVersion), + runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { + cfg, err := config.NewConfig(path) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") + } + return cfg, &cfg.GlobalConfig, nil + }), + runner.WithDaemonInitializer(func(cfg interface{}) (runner.Runner, error) { + return usecase.New(cfg.(*config.Data)) + }), + ) + })(); err != nil { + log.Fatal(err, info.Get()) + return + } +} diff --git a/cmd/gateway/backup/main_test.go b/cmd/gateway/backup/main_test.go new file mode 100644 index 0000000000..0fd50d7b39 --- /dev/null +++ b/cmd/gateway/backup/main_test.go @@ -0,0 +1,83 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package main provides program main +package main + +import ( + "testing" + + "go.uber.org/goleak" +) + +func Test_main(t *testing.T) { + t.Parallel() + type want struct { + } + type test struct { + name string + want want + checkFunc func(want) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want) error { + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + main() + if err := test.checkFunc(test.want); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/cmd/gateway/lb/main.go b/cmd/gateway/lb/main.go new file mode 100644 index 0000000000..6ee539f5aa --- /dev/null +++ b/cmd/gateway/lb/main.go @@ -0,0 +1,59 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package main provides program main +package main + +import ( + "context" + + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/info" + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/runner" + "github.com/vdaas/vald/internal/safety" + "github.com/vdaas/vald/pkg/gateway/lb/config" + "github.com/vdaas/vald/pkg/gateway/lb/usecase" +) + +const ( + maxVersion = "v0.0.10" + minVersion = "v0.0.0" + name = "gateway lb" +) + +func main() { + if err := safety.RecoverFunc(func() error { + return runner.Do( + context.Background(), + runner.WithName(name), + runner.WithVersion(info.Version, maxVersion, minVersion), + runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { + cfg, err := config.NewConfig(path) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") + } + return cfg, &cfg.GlobalConfig, nil + }), + runner.WithDaemonInitializer(func(cfg interface{}) (runner.Runner, error) { + return usecase.New(cfg.(*config.Data)) + }), + ) + })(); err != nil { + log.Fatal(err, info.Get()) + return + } +} diff --git a/cmd/gateway/lb/main_test.go b/cmd/gateway/lb/main_test.go new file mode 100644 index 0000000000..0fd50d7b39 --- /dev/null +++ b/cmd/gateway/lb/main_test.go @@ -0,0 +1,83 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package main provides program main +package main + +import ( + "testing" + + "go.uber.org/goleak" +) + +func Test_main(t *testing.T) { + t.Parallel() + type want struct { + } + type test struct { + name string + want want + checkFunc func(want) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want) error { + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + main() + if err := test.checkFunc(test.want); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/cmd/gateway/meta/main.go b/cmd/gateway/meta/main.go new file mode 100644 index 0000000000..007754e15d --- /dev/null +++ b/cmd/gateway/meta/main.go @@ -0,0 +1,59 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package main provides program main +package main + +import ( + "context" + + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/info" + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/runner" + "github.com/vdaas/vald/internal/safety" + "github.com/vdaas/vald/pkg/gateway/meta/config" + "github.com/vdaas/vald/pkg/gateway/meta/usecase" +) + +const ( + maxVersion = "v0.0.10" + minVersion = "v0.0.0" + name = "gateway meta" +) + +func main() { + if err := safety.RecoverFunc(func() error { + return runner.Do( + context.Background(), + runner.WithName(name), + runner.WithVersion(info.Version, maxVersion, minVersion), + runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { + cfg, err := config.NewConfig(path) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") + } + return cfg, &cfg.GlobalConfig, nil + }), + runner.WithDaemonInitializer(func(cfg interface{}) (runner.Runner, error) { + return usecase.New(cfg.(*config.Data)) + }), + ) + })(); err != nil { + log.Fatal(err, info.Get()) + return + } +} diff --git a/cmd/gateway/meta/main_test.go b/cmd/gateway/meta/main_test.go new file mode 100644 index 0000000000..0fd50d7b39 --- /dev/null +++ b/cmd/gateway/meta/main_test.go @@ -0,0 +1,83 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package main provides program main +package main + +import ( + "testing" + + "go.uber.org/goleak" +) + +func Test_main(t *testing.T) { + t.Parallel() + type want struct { + } + type test struct { + name string + want want + checkFunc func(want) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want) error { + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + main() + if err := test.checkFunc(test.want); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/cmd/gateway/vald/main.go b/cmd/gateway/vald/main.go index 3e9cb87429..8cd5c9a745 100644 --- a/cmd/gateway/vald/main.go +++ b/cmd/gateway/vald/main.go @@ -20,6 +20,7 @@ package main import ( "context" + "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/runner" @@ -43,7 +44,7 @@ func main() { runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { cfg, err := config.NewConfig(path) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") } return cfg, &cfg.GlobalConfig, nil }), diff --git a/cmd/gateway/vald/main_test.go b/cmd/gateway/vald/main_test.go index 77175e6295..0fd50d7b39 100644 --- a/cmd/gateway/vald/main_test.go +++ b/cmd/gateway/vald/main_test.go @@ -24,6 +24,7 @@ import ( ) func Test_main(t *testing.T) { + t.Parallel() type want struct { } type test struct { @@ -58,9 +59,11 @@ func Test_main(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } diff --git a/cmd/manager/backup/cassandra/main.go b/cmd/manager/backup/cassandra/main.go index b2a9504d35..a04ab74f5c 100644 --- a/cmd/manager/backup/cassandra/main.go +++ b/cmd/manager/backup/cassandra/main.go @@ -20,6 +20,7 @@ package main import ( "context" + "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/runner" @@ -43,7 +44,7 @@ func main() { runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { cfg, err := config.NewConfig(path) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") } return cfg, &cfg.GlobalConfig, nil }), diff --git a/cmd/manager/backup/cassandra/main_test.go b/cmd/manager/backup/cassandra/main_test.go index 77175e6295..0fd50d7b39 100644 --- a/cmd/manager/backup/cassandra/main_test.go +++ b/cmd/manager/backup/cassandra/main_test.go @@ -24,6 +24,7 @@ import ( ) func Test_main(t *testing.T) { + t.Parallel() type want struct { } type test struct { @@ -58,9 +59,11 @@ func Test_main(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } diff --git a/cmd/manager/backup/mysql/main.go b/cmd/manager/backup/mysql/main.go index 36d8c12236..fabfe15ad1 100644 --- a/cmd/manager/backup/mysql/main.go +++ b/cmd/manager/backup/mysql/main.go @@ -20,6 +20,7 @@ package main import ( "context" + "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/runner" @@ -43,7 +44,7 @@ func main() { runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { cfg, err := config.NewConfig(path) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") } return cfg, &cfg.GlobalConfig, nil }), diff --git a/cmd/manager/backup/mysql/main_test.go b/cmd/manager/backup/mysql/main_test.go index 77175e6295..0fd50d7b39 100644 --- a/cmd/manager/backup/mysql/main_test.go +++ b/cmd/manager/backup/mysql/main_test.go @@ -24,6 +24,7 @@ import ( ) func Test_main(t *testing.T) { + t.Parallel() type want struct { } type test struct { @@ -58,9 +59,11 @@ func Test_main(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } diff --git a/cmd/manager/compressor/main.go b/cmd/manager/compressor/main.go index 44a6adb038..5de4a54349 100644 --- a/cmd/manager/compressor/main.go +++ b/cmd/manager/compressor/main.go @@ -20,6 +20,7 @@ package main import ( "context" + "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/runner" @@ -43,7 +44,7 @@ func main() { runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { cfg, err := config.NewConfig(path) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") } return cfg, &cfg.GlobalConfig, nil }), diff --git a/cmd/manager/compressor/main_test.go b/cmd/manager/compressor/main_test.go index 77175e6295..0fd50d7b39 100644 --- a/cmd/manager/compressor/main_test.go +++ b/cmd/manager/compressor/main_test.go @@ -24,6 +24,7 @@ import ( ) func Test_main(t *testing.T) { + t.Parallel() type want struct { } type test struct { @@ -58,9 +59,11 @@ func Test_main(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } diff --git a/cmd/manager/index/main.go b/cmd/manager/index/main.go index 356115f574..87e83335a7 100644 --- a/cmd/manager/index/main.go +++ b/cmd/manager/index/main.go @@ -20,6 +20,7 @@ package main import ( "context" + "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/runner" @@ -43,7 +44,7 @@ func main() { runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { cfg, err := config.NewConfig(path) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") } return cfg, &cfg.GlobalConfig, nil }), diff --git a/cmd/manager/index/main_test.go b/cmd/manager/index/main_test.go index 77175e6295..0fd50d7b39 100644 --- a/cmd/manager/index/main_test.go +++ b/cmd/manager/index/main_test.go @@ -24,6 +24,7 @@ import ( ) func Test_main(t *testing.T) { + t.Parallel() type want struct { } type test struct { @@ -58,9 +59,11 @@ func Test_main(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } diff --git a/cmd/manager/replication/agent/main.go b/cmd/manager/replication/agent/main.go index c7a137e282..61c7b3f0ee 100644 --- a/cmd/manager/replication/agent/main.go +++ b/cmd/manager/replication/agent/main.go @@ -20,6 +20,7 @@ package main import ( "context" + "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/runner" @@ -43,7 +44,7 @@ func main() { runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { cfg, err := config.NewConfig(path) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") } return cfg, &cfg.GlobalConfig, nil }), diff --git a/cmd/manager/replication/agent/main_test.go b/cmd/manager/replication/agent/main_test.go index 77175e6295..0fd50d7b39 100644 --- a/cmd/manager/replication/agent/main_test.go +++ b/cmd/manager/replication/agent/main_test.go @@ -24,6 +24,7 @@ import ( ) func Test_main(t *testing.T) { + t.Parallel() type want struct { } type test struct { @@ -58,9 +59,11 @@ func Test_main(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } diff --git a/cmd/manager/replication/controller/main.go b/cmd/manager/replication/controller/main.go index add1336e8e..03df296f0e 100644 --- a/cmd/manager/replication/controller/main.go +++ b/cmd/manager/replication/controller/main.go @@ -20,6 +20,7 @@ package main import ( "context" + "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/runner" @@ -43,7 +44,7 @@ func main() { runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { cfg, err := config.NewConfig(path) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") } return cfg, &cfg.GlobalConfig, nil }), diff --git a/cmd/manager/replication/controller/main_test.go b/cmd/manager/replication/controller/main_test.go index 77175e6295..0fd50d7b39 100644 --- a/cmd/manager/replication/controller/main_test.go +++ b/cmd/manager/replication/controller/main_test.go @@ -24,6 +24,7 @@ import ( ) func Test_main(t *testing.T) { + t.Parallel() type want struct { } type test struct { @@ -58,9 +59,11 @@ func Test_main(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } diff --git a/cmd/meta/cassandra/main.go b/cmd/meta/cassandra/main.go index ecb6667d81..eac078d562 100644 --- a/cmd/meta/cassandra/main.go +++ b/cmd/meta/cassandra/main.go @@ -20,6 +20,7 @@ package main import ( "context" + "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/runner" @@ -43,7 +44,7 @@ func main() { runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { cfg, err := config.NewConfig(path) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") } return cfg, &cfg.GlobalConfig, nil }), diff --git a/cmd/meta/cassandra/main_test.go b/cmd/meta/cassandra/main_test.go index 77175e6295..0fd50d7b39 100644 --- a/cmd/meta/cassandra/main_test.go +++ b/cmd/meta/cassandra/main_test.go @@ -24,6 +24,7 @@ import ( ) func Test_main(t *testing.T) { + t.Parallel() type want struct { } type test struct { @@ -58,9 +59,11 @@ func Test_main(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } diff --git a/cmd/meta/redis/main.go b/cmd/meta/redis/main.go index 31d746f3a2..4b174c3089 100644 --- a/cmd/meta/redis/main.go +++ b/cmd/meta/redis/main.go @@ -20,6 +20,7 @@ package main import ( "context" + "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/runner" @@ -43,7 +44,7 @@ func main() { runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { cfg, err := config.NewConfig(path) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") } return cfg, &cfg.GlobalConfig, nil }), diff --git a/cmd/meta/redis/main_test.go b/cmd/meta/redis/main_test.go index 77175e6295..0fd50d7b39 100644 --- a/cmd/meta/redis/main_test.go +++ b/cmd/meta/redis/main_test.go @@ -24,6 +24,7 @@ import ( ) func Test_main(t *testing.T) { + t.Parallel() type want struct { } type test struct { @@ -58,9 +59,11 @@ func Test_main(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } diff --git a/cmd/tools/cli/loadtest/main.go b/cmd/tools/cli/loadtest/main.go index 9d884c19f1..03073027d0 100644 --- a/cmd/tools/cli/loadtest/main.go +++ b/cmd/tools/cli/loadtest/main.go @@ -18,6 +18,7 @@ package main import ( "context" + "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/runner" @@ -41,7 +42,7 @@ func main() { runner.WithConfigLoader(func(path string) (interface{}, *config.GlobalConfig, error) { cfg, err := config.NewConfig(path) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "failed to load "+name+"'s configuration") } return cfg, &cfg.GlobalConfig, nil }), diff --git a/cmd/tools/cli/loadtest/main_test.go b/cmd/tools/cli/loadtest/main_test.go index d8407687c3..9a722ab8dc 100644 --- a/cmd/tools/cli/loadtest/main_test.go +++ b/cmd/tools/cli/loadtest/main_test.go @@ -22,6 +22,7 @@ import ( ) func Test_main(t *testing.T) { + t.Parallel() type want struct { } type test struct { @@ -56,8 +57,10 @@ func Test_main(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() diff --git a/cmd/tools/cli/loadtest/sample.yaml b/cmd/tools/cli/loadtest/sample.yaml index 39178f9d19..844b66d645 100644 --- a/cmd/tools/cli/loadtest/sample.yaml +++ b/cmd/tools/cli/loadtest/sample.yaml @@ -21,7 +21,6 @@ logging: logger: glg level: info format: raw -service: gateway operation: insert dataset: fashion-mnist concurrency: 100 diff --git a/cmd/tools/cli/vdctl/main.go b/cmd/tools/cli/vdctl/main.go index 882e3398dc..f1b1e17077 100644 --- a/cmd/tools/cli/vdctl/main.go +++ b/cmd/tools/cli/vdctl/main.go @@ -17,5 +17,4 @@ package main func main() { - } diff --git a/cmd/tools/cli/vdctl/main_test.go b/cmd/tools/cli/vdctl/main_test.go index 30710c21ff..be3777642a 100644 --- a/cmd/tools/cli/vdctl/main_test.go +++ b/cmd/tools/cli/vdctl/main_test.go @@ -23,6 +23,7 @@ import ( ) func Test_main(t *testing.T) { + t.Parallel() type want struct { } type test struct { @@ -57,8 +58,10 @@ func Test_main(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() diff --git a/design/Vald Future Architecture Overview.svg b/design/Vald Future Architecture Overview.svg deleted file mode 100644 index 03e3fa1009..0000000000 --- a/design/Vald Future Architecture Overview.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Kubernetess
Kubernetess

Node
Node...

Ingress Node
Ingress Node...
      Custom Controller
      Custom Controller

Persistent Layer
Persistent Layer

Ingress
Ingress...

Deployment
Deployment...

HPA
HPA...
 Pod
 Pod

Vald Ingress Filter
Filter Requests

Vald Ingress Filter...
 Pod
 Pod

Vald Filter Gateway
Filter Requests / Responses
Vald Filter Gateway...
 Pod
 Pod

Vald Meta Gateway
Meta Gateway

Vald Meta Gateway...
 Pod
 Pod

Vald Backup Gateway
Meta Gateway
Vald Backup Gateway...
 Pod
 Pod

Vald LB Gateway
Balancing Gateway

Vald LB Gateway...
 Pod
 Pod

Agent Discoverer
Discover Pods

Agent Discoverer...

HPA
HPA...
 Pod
 Pod

Vald Egress Filter
Filter Responses
Vald Egress Filter...
 Pod
 Pod

Vald Meta
RDB,NoSQL,etc...

Vald Meta...
 Pod
 Pod

Vald Backup Manager
MySQL,Cassndra

Vald Backup Manager...
 Pod
 Pod

Vald Compressor
LZ4,ZSTD,GZIP,GOB
Vald Compressor...
 Pod
 Pod

Vald Agent
NGT,SPTAG,Faiss
Vald Agent...

Master
Master

Kube-APIServer
Kube-APIServer

Daemonset
Daemonset
Text
Text
 Pod
 Pod

Vald Index Manager
            Manage Indexing
Vald Index Manager            Man...
async
async

Architecture Overview

Architecture Overview
 Pod
 Pod

Vald CRD
Deploy & Manage Vald
Vald CRD...
 Pod
 Pod

Vald Agent Scheduler
Schedule Vald Agent
Vald Agent Scheduler...
 Pod
 Pod

Vald Replication
Manager Controller
Control Replication Agent
Vald Replication...
 Pod
 Pod

Vald Replication
Manager Agent
Manage Replication of Index
Vald Replication...
async
async
optional
optional
optional
optional

Vald Ingress
Vald Ingress
optional
optional
optional
optional
 
 
Redis
Redis
   
   
Cassandra
Cassan...
   
   
MySQL
MySQL
Object Storage
GCS, S3
Object S...
Vald Agent Sidecar
Backup Index Data
Vald Age...
Volume
To Save Index Data
Volume...
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/dockers/gateway/backup/Dockerfile b/dockers/gateway/backup/Dockerfile new file mode 100644 index 0000000000..3f2774206e --- /dev/null +++ b/dockers/gateway/backup/Dockerfile @@ -0,0 +1,72 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ARG GO_VERSION=latest +ARG BASE_TAG=latest +ARG DISTROLESS_IMAGE=gcr.io/distroless/static +ARG DISTROLESS_IMAGE_TAG=nonroot +ARG UPX_OPTIONS=-9 + +FROM golang:${GO_VERSION} AS golang + +FROM vdaas/vald-base:${BASE_TAG} AS builder +ARG UPX_OPTIONS + +ENV ORG vdaas +ENV REPO vald +ENV PKG gateway/backup +ENV APP_NAME backup + +COPY --from=golang /usr/local/go $GOROOT + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/internal +COPY internal . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/apis/grpc +COPY apis/grpc . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/pkg/${PKG} +COPY pkg/${PKG} . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/pkg/gateway/internal +COPY pkg/gateway/internal . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/cmd/${PKG} +COPY cmd/${PKG} . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/versions +COPY versions . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/Makefile.d +COPY Makefile.d . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO} +COPY Makefile . +COPY .git . + +RUN make REPO=${ORG} NAME=${REPO} cmd/${PKG}/${APP_NAME} \ + && upx ${UPX_OPTIONS} -o "/usr/bin/${APP_NAME}" "cmd/${PKG}/${APP_NAME}" + +FROM ${DISTROLESS_IMAGE}:${DISTROLESS_IMAGE_TAG} +LABEL maintainer "Vald team " + +ENV APP_NAME backup + +COPY --from=builder /usr/bin/${APP_NAME} /go/bin/${APP_NAME} + +USER nonroot:nonroot + +ENTRYPOINT ["/go/bin/backup"] diff --git a/dockers/gateway/lb/Dockerfile b/dockers/gateway/lb/Dockerfile new file mode 100644 index 0000000000..a57ee41b97 --- /dev/null +++ b/dockers/gateway/lb/Dockerfile @@ -0,0 +1,72 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ARG GO_VERSION=latest +ARG BASE_TAG=latest +ARG DISTROLESS_IMAGE=gcr.io/distroless/static +ARG DISTROLESS_IMAGE_TAG=nonroot +ARG UPX_OPTIONS=-9 + +FROM golang:${GO_VERSION} AS golang + +FROM vdaas/vald-base:${BASE_TAG} AS builder +ARG UPX_OPTIONS + +ENV ORG vdaas +ENV REPO vald +ENV PKG gateway/lb +ENV APP_NAME lb + +COPY --from=golang /usr/local/go $GOROOT + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/internal +COPY internal . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/apis/grpc +COPY apis/grpc . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/pkg/${PKG} +COPY pkg/${PKG} . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/pkg/gateway/internal +COPY pkg/gateway/internal . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/cmd/${PKG} +COPY cmd/${PKG} . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/versions +COPY versions . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/Makefile.d +COPY Makefile.d . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO} +COPY Makefile . +COPY .git . + +RUN make REPO=${ORG} NAME=${REPO} cmd/${PKG}/${APP_NAME} \ + && upx ${UPX_OPTIONS} -o "/usr/bin/${APP_NAME}" "cmd/${PKG}/${APP_NAME}" + +FROM ${DISTROLESS_IMAGE}:${DISTROLESS_IMAGE_TAG} +LABEL maintainer "Vald team " + +ENV APP_NAME lb + +COPY --from=builder /usr/bin/${APP_NAME} /go/bin/${APP_NAME} + +USER nonroot:nonroot + +ENTRYPOINT ["/go/bin/lb"] diff --git a/dockers/gateway/meta/Dockerfile b/dockers/gateway/meta/Dockerfile new file mode 100644 index 0000000000..8177ce1943 --- /dev/null +++ b/dockers/gateway/meta/Dockerfile @@ -0,0 +1,72 @@ +# +# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ARG GO_VERSION=latest +ARG BASE_TAG=latest +ARG DISTROLESS_IMAGE=gcr.io/distroless/static +ARG DISTROLESS_IMAGE_TAG=nonroot +ARG UPX_OPTIONS=-9 + +FROM golang:${GO_VERSION} AS golang + +FROM vdaas/vald-base:${BASE_TAG} AS builder +ARG UPX_OPTIONS + +ENV ORG vdaas +ENV REPO vald +ENV PKG gateway/meta +ENV APP_NAME meta + +COPY --from=golang /usr/local/go $GOROOT + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/internal +COPY internal . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/apis/grpc +COPY apis/grpc . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/pkg/${PKG} +COPY pkg/${PKG} . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/pkg/gateway/internal +COPY pkg/gateway/internal . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/cmd/${PKG} +COPY cmd/${PKG} . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/versions +COPY versions . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO}/Makefile.d +COPY Makefile.d . + +WORKDIR ${GOPATH}/src/github.com/${ORG}/${REPO} +COPY Makefile . +COPY .git . + +RUN make REPO=${ORG} NAME=${REPO} cmd/${PKG}/${APP_NAME} \ + && upx ${UPX_OPTIONS} -o "/usr/bin/${APP_NAME}" "cmd/${PKG}/${APP_NAME}" + +FROM ${DISTROLESS_IMAGE}:${DISTROLESS_IMAGE_TAG} +LABEL maintainer "Vald team " + +ENV APP_NAME meta + +COPY --from=builder /usr/bin/${APP_NAME} /go/bin/${APP_NAME} + +USER nonroot:nonroot + +ENTRYPOINT ["/go/bin/meta"] diff --git a/example/client/agent/main.go b/example/client/agent/main.go index 3f97e8e048..2a86ac1839 100644 --- a/example/client/agent/main.go +++ b/example/client/agent/main.go @@ -23,11 +23,13 @@ import ( "github.com/kpango/fuid" "github.com/kpango/glg" - agent "github.com/vdaas/vald-client-go/agent/core" - "github.com/vdaas/vald-client-go/payload" + "google.golang.org/grpc" + + "github.com/vdaas/vald-client-go/v1/vald" + // agent "github.com/vdaas/vald-client-go/v1/agent/core" + "github.com/vdaas/vald-client-go/v1/payload" "gonum.org/v1/hdf5" - "google.golang.org/grpc" ) const ( @@ -72,7 +74,7 @@ func main() { } // Creates Vald Agent client for gRPC. - client := agent.NewAgentClient(conn) + client := vald.NewValdClient(conn) glg.Infof("Start Inserting %d training vector to Vald Agent", insertCount) // Insert 400 example vectors into Vald cluster @@ -82,9 +84,14 @@ func main() { } // Calls `Insert` function of Vald Agent client. // Sends set of vector and id to server via gRPC. - _, err := client.Insert(ctx, &payload.Object_Vector{ - Id: ids[i], - Vector: train[i], + _, err := client.Insert(ctx, &payload.Insert_Request{ + Vector: &payload.Object_Vector{ + Id: ids[i], + Vector: train[i], + }, + Config: &payload.Insert_Config{ + SkipStrictExistCheck: true, + }, }) if err != nil { glg.Fatal(err) @@ -95,7 +102,7 @@ func main() { If you run client.CreateIndex, it costs less time for search **/ // glg.Info("Start Indexing dataset.") - // _, err = client.CreateIndex(ctx, &payload.Control_CreateIndexRequest{ + // _, err = agent.NewAgentClient(conn).CreateIndex(ctx, &payload.Control_CreateIndexRequest{ // PoolSize: uint32(insertCount), // }) // if err != nil { diff --git a/go.mod b/go.mod index d6c4566990..dc31390547 100755 --- a/go.mod +++ b/go.mod @@ -3,28 +3,28 @@ module github.com/vdaas/vald go 1.15 replace ( - cloud.google.com/go => cloud.google.com/go v0.66.0 + cloud.google.com/go => cloud.google.com/go v0.71.0 github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.0+incompatible - github.com/aws/aws-sdk-go => github.com/aws/aws-sdk-go v1.34.25 + github.com/aws/aws-sdk-go => github.com/aws/aws-sdk-go v1.35.24 github.com/boltdb/bolt => github.com/boltdb/bolt v1.3.1 github.com/chzyer/logex => github.com/chzyer/logex v1.1.11-0.20170329064859-445be9e134b2 github.com/coreos/etcd => go.etcd.io/etcd v3.3.25+incompatible github.com/docker/docker => github.com/moby/moby v1.13.1 github.com/envoyproxy/protoc-gen-validate => github.com/envoyproxy/protoc-gen-validate v0.4.1 github.com/go-sql-driver/mysql => github.com/go-sql-driver/mysql v1.5.0 - github.com/gocql/gocql => github.com/gocql/gocql v0.0.0-20200815110948-5378c8f664e9 + github.com/gocql/gocql => github.com/gocql/gocql v0.0.0-20201024154641-5913df4d474e github.com/gogo/googleapis => github.com/gogo/googleapis v1.4.0 github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.1 github.com/google/go-cmp => github.com/google/go-cmp v0.5.2 - github.com/google/pprof => github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7 + github.com/google/pprof => github.com/google/pprof v0.0.0-20201109224723-20978b51388d github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.4.0 - github.com/gophercloud/gophercloud => github.com/gophercloud/gophercloud v0.12.0 + github.com/gophercloud/gophercloud => github.com/gophercloud/gophercloud v0.13.0 github.com/gorilla/websocket => github.com/gorilla/websocket v1.4.2 github.com/hailocab/go-hostpool => github.com/monzo/go-hostpool v0.0.0-20200724120130-287edbb29340 - github.com/klauspost/compress => github.com/klauspost/compress v1.11.1-0.20200908135004-a2bf5b1ec3aa - github.com/tensorflow/tensorflow => github.com/tensorflow/tensorflow v2.1.0+incompatible - golang.org/x/crypto => golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a - google.golang.org/grpc => google.golang.org/grpc v1.32.0 + github.com/klauspost/compress => github.com/klauspost/compress v1.11.3-0.20201110090307-d693bc87c5fb + github.com/tensorflow/tensorflow => github.com/tensorflow/tensorflow v2.1.2+incompatible + golang.org/x/crypto => golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 + google.golang.org/grpc => google.golang.org/grpc v1.33.2 google.golang.org/protobuf => google.golang.org/protobuf v1.25.0 k8s.io/api => k8s.io/api v0.18.9 k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.9 @@ -40,9 +40,8 @@ require ( contrib.go.opencensus.io/exporter/jaeger v0.2.1 contrib.go.opencensus.io/exporter/prometheus v0.2.0 contrib.go.opencensus.io/exporter/stackdriver v0.13.4 - github.com/aws/aws-sdk-go v1.23.20 + github.com/aws/aws-sdk-go v1.35.24 github.com/cespare/xxhash/v2 v2.1.1 - github.com/danielvladco/go-proto-gql/pb v0.6.1 github.com/envoyproxy/protoc-gen-validate v0.1.0 github.com/fsnotify/fsnotify v1.4.9 github.com/go-redis/redis/v7 v7.4.0 @@ -66,17 +65,17 @@ require ( github.com/tensorflow/tensorflow v0.0.0-00010101000000-000000000000 github.com/yahoojapan/gongt v0.0.0-20190517050727-966dcc7aa5e8 github.com/yahoojapan/ngtd v0.0.0-20200424071638-9872bbae3700 - go.opencensus.io v0.22.4 + go.opencensus.io v0.22.5 go.uber.org/automaxprocs v1.3.0 go.uber.org/goleak v1.1.10 - golang.org/x/net v0.0.0-20200904194848-62affa334b73 - golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 - golang.org/x/sys v0.0.0-20200916084744-dbad9cb7cb7a + golang.org/x/net v0.0.0-20201110031124-69a78807bb2b + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 + golang.org/x/sys v0.0.0-20201109165425-215b40eba54c gonum.org/v1/hdf5 v0.0.0-20200504100616-496fefe91614 - gonum.org/v1/plot v0.8.0 - google.golang.org/api v0.32.0 - google.golang.org/genproto v0.0.0-20200916143405-f6a2fa72f0c4 - google.golang.org/grpc v1.31.1 + gonum.org/v1/plot v0.8.1 + google.golang.org/api v0.35.0 + google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb + google.golang.org/grpc v1.33.1 gopkg.in/yaml.v2 v2.3.0 k8s.io/api v0.18.9 k8s.io/apimachinery v0.18.9 diff --git a/go.sum b/go.sum index 4f9c53bf85..ecc98d96a5 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -cloud.google.com/go v0.66.0 h1:DZeAkuQGQqnm9Xv36SbMJEU8aFBz4wL04UpMWPWwjzg= -cloud.google.com/go v0.66.0/go.mod h1:dgqGAjKCDxyhGTtC9dAREQGUJpkceNm1yt590Qno0Ko= +cloud.google.com/go v0.71.0 h1:2ha722Z08cmRa0orJrzBaszYQcLbLFcsZHsGSj/kIF4= +cloud.google.com/go v0.71.0/go.mod h1:qZfY4Y7AEIQwG/fQYD3xrxLNkQZ0Xzf3HGeqCkA6LVM= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= @@ -58,8 +58,8 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.34.25 h1:yHNez503p+NuQ5QdMKjwEIkwTa2u+TeUAPAqCVdFu4I= -github.com/aws/aws-sdk-go v1.34.25/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.35.24 h1:U3GNTg8+7xSM6OAJ8zksiSM4bRqxBWmVwwehvOSNG3A= +github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -91,8 +91,6 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/danielvladco/go-proto-gql/pb v0.6.1 h1:aCcZci9B8bRfAXJST65qNGw2QkoGKDy1m4619JLDOag= -github.com/danielvladco/go-proto-gql/pb v0.6.1/go.mod h1:jX98VVm9haVTbUA3iy8JzyJemHXe/vzEVCkO8ZIX8PY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -190,8 +188,8 @@ github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRf github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gocql/gocql v0.0.0-20200815110948-5378c8f664e9 h1:SBOCi413wRa7i5ZET6dmeg8iqpKO/hE+buwIZ7WhNg4= -github.com/gocql/gocql v0.0.0-20200815110948-5378c8f664e9/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/gocql/gocql v0.0.0-20201024154641-5913df4d474e h1:p5NB/+xroUR8OnumV9/cbCav+mmSjrGi2uwYtXNFJG4= +github.com/gocql/gocql v0.0.0-20201024154641-5913df4d474e/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= github.com/gocraft/dbr/v2 v2.7.0 h1:x+UnhSBYPFBBdtikLSMLQ9KPuquSUj4yBijsQAhhNZo= github.com/gocraft/dbr/v2 v2.7.0/go.mod h1:wQdbxPBSloo2OlSedMxfNW0mgk0GXys9O1VFmQiwcx4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= @@ -216,8 +214,9 @@ github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaW github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= @@ -229,19 +228,20 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7 h1:k+KkMRk8mGOu1xG38StS7dQ+Z6oW1i9n3dgrAVU9Q/E= -github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20201109224723-20978b51388d h1:l48/ohaaIyJbCld0g0U9nO8mTv4E7QMopIiiOFtJB5Y= +github.com/google/pprof v0.0.0-20201109224723-20978b51388d/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.0 h1:BXDUo8p/DaxC+4FJY/SSx3gvnx9C1VdHNgaUkiEL5mk= github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= +github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= @@ -259,13 +259,15 @@ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -285,8 +287,8 @@ github.com/jung-kurt/gofpdf v1.16.2 h1:jgbatWHfRlPYiK85qgevsZTHviWXKwB1TTiKdz5Pt github.com/jung-kurt/gofpdf v1.16.2/go.mod h1:1hl7y57EsiPAkLbOwzpzqgx1A30nQCk/YmFV8S2vmK0= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.11.1-0.20200908135004-a2bf5b1ec3aa h1:9xEtLQvhsiWZvijuoPGoFVxijpWuacg3KDA+kvlI4+4= -github.com/klauspost/compress v1.11.1-0.20200908135004-a2bf5b1ec3aa/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.3-0.20201110090307-d693bc87c5fb h1:EArM0Sv5momvbrfT6ze947iHS+qVQoCGjupj3D9urZc= +github.com/klauspost/compress v1.11.3-0.20201110090307-d693bc87c5fb/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kpango/fastime v1.0.8/go.mod h1:Y5XY5bLG5yc7g2XmMUzc22XYV1XaH+KgUOHkDvLp4SA= @@ -303,8 +305,9 @@ github.com/kpango/glg v1.5.1 h1:ecOOgdPMt7OdDUYjoUZ9dbnY8MVwUUMc6D5ZN3exLNM= github.com/kpango/glg v1.5.1/go.mod h1:xIbZZSoRgDaYrXYmBK4wccGHkHK3qk61H/pK3R4qyE8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= @@ -333,7 +336,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/moby/moby v1.13.1 h1:mC5WwQwCXt/dYxZ1cIrRsnJAWw7VdtcTZUIGr4tXzOM= github.com/moby/moby v1.13.1/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -439,13 +441,12 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/tensorflow/tensorflow v2.1.0+incompatible h1:piL4AzfPuv67+gbsKo2IhIecCe4ILpN0294O8ZrxneI= -github.com/tensorflow/tensorflow v2.1.0+incompatible/go.mod h1:itOSERT4trABok4UOoG+X4BoKds9F3rIsySdn+Lvu90= +github.com/tensorflow/tensorflow v2.1.2+incompatible h1:RLnKj9KWGJhp22JmzSW/ilEKC1MZb5RN49iAb69Zafg= +github.com/tensorflow/tensorflow v2.1.2+incompatible/go.mod h1:itOSERT4trABok4UOoG+X4BoKds9F3rIsySdn+Lvu90= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= @@ -462,7 +463,6 @@ github.com/yahoojapan/ngtd v0.0.0-20200424071638-9872bbae3700 h1:NPdk9P5cTgD6orW github.com/yahoojapan/ngtd v0.0.0-20200424071638-9872bbae3700/go.mod h1:K4qaOV0B+PP5TQp0/uU1YCASMlVxmGxMNVYAG9rcL4c= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zeebo/xxh3 v0.8.2 h1:8V3CL8YrAiyCw8W9fN8CsZsknxMrbVaLEDva6+ajFUw= github.com/zeebo/xxh3 v0.8.2/go.mod h1:AQY73TOrhF3jNsdiM9zZOb8MThrYbZONHj7ryDBaLpg= @@ -476,8 +476,9 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= @@ -494,8 +495,8 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.0 h1:/pduUoebOeeJzTDFuoMgC6nRkiasr1sBCIEorly7m4o= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 h1:pLI5jrR7OSLijeIDcmRxNmw2api+jEfxLoykJVice/E= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -553,7 +554,6 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -563,10 +563,11 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -580,8 +581,9 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -621,10 +623,10 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200727154430-2d971f7391a4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200916084744-dbad9cb7cb7a h1:chkwkn8HYWVtTE5DCQNKYlkyptadXYY0+PuyaVdyMo4= -golang.org/x/sys v0.0.0-20200916084744-dbad9cb7cb7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201109165425-215b40eba54c h1:+B+zPA6081G5cEb2triOIJpcvSW4AYzmIyWAqMn2JAc= +golang.org/x/sys v0.0.0-20201109165425-215b40eba54c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -661,7 +663,6 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -677,10 +678,9 @@ golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200828161849-5deb26317202/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c h1:AQsh/7arPVFDBraQa8x7GoVnwnGg1kM7J2ySI0kF5WU= -golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201030143252-cf7a54d06671 h1:8ylPbtgKXakJwDQKPjMJ6BSnlEIFViV0tYnu5/1Omk8= +golang.org/x/tools v0.0.0-20201030143252-cf7a54d06671/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -697,8 +697,8 @@ gonum.org/v1/hdf5 v0.0.0-20200504100616-496fefe91614/go.mod h1:BQUWDHIAygjdt1HnU gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gonum.org/v1/plot v0.8.0 h1:dNgubmltsMoehfn6XgbutHpicbUfbkcGSxkICy1bC4o= -gonum.org/v1/plot v0.8.0/go.mod h1:3GH8dTfoceRTELDnv+4HNwbvM/eMfdDUGHFG2bo3NeE= +gonum.org/v1/plot v0.8.1 h1:1oWyfw7tIDDtKb+t+SbR9RFruMmNJlsKiZUolHdys2I= +gonum.org/v1/plot v0.8.1/go.mod h1:3GH8dTfoceRTELDnv+4HNwbvM/eMfdDUGHFG2bo3NeE= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= @@ -712,9 +712,9 @@ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.31.0/go.mod h1:CL+9IBCa2WWU6gRuBWaKqGWLFFwbEUXkfeMkHLQWYWo= -google.golang.org/api v0.32.0 h1:Le77IccnTqEa8ryp9wIpX5W3zYm7Gf9LhOp9PHcwFts= -google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.34.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.35.0 h1:TBCmTTxUrRDA1iTctnK/fIeitxIZ+TQuaf0j29fmCGo= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -742,13 +742,12 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200831141814-d751682dd103/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200914193844-75d14daec038/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200916143405-f6a2fa72f0c4 h1:0FQbRyP6f/LVRsofCaQD4BMMd5kRAXMo/WvispzB940= -google.golang.org/genproto v0.0.0-20200916143405-f6a2fa72f0c4/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/genproto v0.0.0-20201030142918-24207fddd1c3/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb h1:MoNcrN5yaH+35Ge8RUwFbL7ekwq9ED2fiDpgWKrR29w= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= @@ -772,7 +771,6 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/hack/benchmark/assets/x1b/loader.go b/hack/benchmark/assets/x1b/loader.go index 277db20338..7d88dfaf4e 100644 --- a/hack/benchmark/assets/x1b/loader.go +++ b/hack/benchmark/assets/x1b/loader.go @@ -65,9 +65,11 @@ type file struct { type bvecs struct { *file } + type fvecs struct { *file } + type ivecs struct { *file } diff --git a/hack/benchmark/assets/x1b/loader_test.go b/hack/benchmark/assets/x1b/loader_test.go index aae11b84f6..cba78f7954 100644 --- a/hack/benchmark/assets/x1b/loader_test.go +++ b/hack/benchmark/assets/x1b/loader_test.go @@ -24,6 +24,7 @@ import ( ) func Test_open(t *testing.T) { + t.Parallel() type args struct { fname string elementSize int @@ -42,10 +43,10 @@ func Test_open(t *testing.T) { } defaultCheckFunc := func(w want, gotF *file, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(gotF, w.wantF) { - return errors.Errorf("got = %v, want %v", gotF, w.wantF) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotF, w.wantF) } return nil } @@ -79,8 +80,10 @@ func Test_open(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -96,12 +99,12 @@ func Test_open(t *testing.T) { if err := test.checkFunc(test.want, gotF, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_file_Close(t *testing.T) { + t.Parallel() type fields struct { mem []byte dim int @@ -121,7 +124,7 @@ func Test_file_Close(t *testing.T) { } defaultCheckFunc := func(w want, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } return nil } @@ -159,8 +162,10 @@ func Test_file_Close(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -182,12 +187,12 @@ func Test_file_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_file_load(t *testing.T) { + t.Parallel() type args struct { i int } @@ -212,10 +217,10 @@ func Test_file_load(t *testing.T) { } defaultCheckFunc := func(w want, got []byte, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -259,8 +264,10 @@ func Test_file_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -282,12 +289,12 @@ func Test_file_load(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_file_Dimension(t *testing.T) { + t.Parallel() type fields struct { mem []byte dim int @@ -307,7 +314,7 @@ func Test_file_Dimension(t *testing.T) { } defaultCheckFunc := func(w want, got int) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -345,8 +352,10 @@ func Test_file_Dimension(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -368,12 +377,12 @@ func Test_file_Dimension(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_file_Size(t *testing.T) { + t.Parallel() type fields struct { mem []byte dim int @@ -393,7 +402,7 @@ func Test_file_Size(t *testing.T) { } defaultCheckFunc := func(w want, got int) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -431,8 +440,10 @@ func Test_file_Size(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -454,12 +465,12 @@ func Test_file_Size(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_bvecs_LoadUint8(t *testing.T) { + t.Parallel() type args struct { i int } @@ -481,10 +492,10 @@ func Test_bvecs_LoadUint8(t *testing.T) { } defaultCheckFunc := func(w want, got []uint8, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -522,8 +533,10 @@ func Test_bvecs_LoadUint8(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -542,12 +555,12 @@ func Test_bvecs_LoadUint8(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_bvecs_Load(t *testing.T) { + t.Parallel() type args struct { i int } @@ -569,10 +582,10 @@ func Test_bvecs_Load(t *testing.T) { } defaultCheckFunc := func(w want, got interface{}, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -610,8 +623,10 @@ func Test_bvecs_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -630,12 +645,12 @@ func Test_bvecs_Load(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_fvecs_LoadFloat32(t *testing.T) { + t.Parallel() type args struct { i int } @@ -657,10 +672,10 @@ func Test_fvecs_LoadFloat32(t *testing.T) { } defaultCheckFunc := func(w want, got []float32, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -698,8 +713,10 @@ func Test_fvecs_LoadFloat32(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -718,12 +735,12 @@ func Test_fvecs_LoadFloat32(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_fvecs_Load(t *testing.T) { + t.Parallel() type args struct { i int } @@ -745,10 +762,10 @@ func Test_fvecs_Load(t *testing.T) { } defaultCheckFunc := func(w want, got interface{}, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -786,8 +803,10 @@ func Test_fvecs_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -806,12 +825,12 @@ func Test_fvecs_Load(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ivecs_LoadInt32(t *testing.T) { + t.Parallel() type args struct { i int } @@ -833,10 +852,10 @@ func Test_ivecs_LoadInt32(t *testing.T) { } defaultCheckFunc := func(w want, got []int32, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -874,8 +893,10 @@ func Test_ivecs_LoadInt32(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -894,12 +915,12 @@ func Test_ivecs_LoadInt32(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ivecs_Load(t *testing.T) { + t.Parallel() type args struct { i int } @@ -921,10 +942,10 @@ func Test_ivecs_Load(t *testing.T) { } defaultCheckFunc := func(w want, got interface{}, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -962,8 +983,10 @@ func Test_ivecs_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -982,12 +1005,12 @@ func Test_ivecs_Load(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func TestNewBVecs(t *testing.T) { +func TestNewUint8Vectors(t *testing.T) { + t.Parallel() type args struct { fname string } @@ -1005,10 +1028,10 @@ func TestNewBVecs(t *testing.T) { } defaultCheckFunc := func(w want, got Uint8Vectors, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -1040,8 +1063,10 @@ func TestNewBVecs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1057,12 +1082,12 @@ func TestNewBVecs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func TestNewFVecs(t *testing.T) { +func TestNewFloatVectors(t *testing.T) { + t.Parallel() type args struct { fname string } @@ -1080,10 +1105,10 @@ func TestNewFVecs(t *testing.T) { } defaultCheckFunc := func(w want, got FloatVectors, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -1115,8 +1140,10 @@ func TestNewFVecs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1132,12 +1159,12 @@ func TestNewFVecs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func TestNewIVecs(t *testing.T) { +func TestNewInt32Vectors(t *testing.T) { + t.Parallel() type args struct { fname string } @@ -1155,10 +1182,10 @@ func TestNewIVecs(t *testing.T) { } defaultCheckFunc := func(w want, got Int32Vectors, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -1190,8 +1217,10 @@ func TestNewIVecs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1207,12 +1236,12 @@ func TestNewIVecs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestOpen(t *testing.T) { + t.Parallel() type args struct { fname string } @@ -1230,10 +1259,10 @@ func TestOpen(t *testing.T) { } defaultCheckFunc := func(w want, got BillionScaleVectors, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -1265,8 +1294,10 @@ func TestOpen(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1282,7 +1313,6 @@ func TestOpen(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/core/benchmark/benchmark_test.go b/hack/benchmark/core/benchmark/benchmark_test.go index d9dfb119c4..1cc705d829 100644 --- a/hack/benchmark/core/benchmark/benchmark_test.go +++ b/hack/benchmark/core/benchmark/benchmark_test.go @@ -94,7 +94,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/core/benchmark/option.go b/hack/benchmark/core/benchmark/option.go index 897664f623..a36aba9b94 100644 --- a/hack/benchmark/core/benchmark/option.go +++ b/hack/benchmark/core/benchmark/option.go @@ -19,9 +19,7 @@ package benchmark type Option func(*benchmark) -var ( - defaultOptions = []Option{} -) +var defaultOptions = []Option{} func WithName(name string) Option { return func(b *benchmark) { diff --git a/hack/benchmark/core/benchmark/strategy.go b/hack/benchmark/core/benchmark/strategy.go index 0c23198346..87238285f5 100644 --- a/hack/benchmark/core/benchmark/strategy.go +++ b/hack/benchmark/core/benchmark/strategy.go @@ -22,7 +22,7 @@ import ( "testing" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" ) // Strategy is an interface for benchmark. @@ -30,5 +30,5 @@ type Strategy interface { Run(context.Context, *testing.B, assets.Dataset, []uint) Init(context.Context, *testing.B, assets.Dataset) error PreProp(context.Context, *testing.B, assets.Dataset) ([]uint, error) - core.Closer + algorithm.Closer } diff --git a/hack/benchmark/core/benchmark/strategy/bulk_insert.go b/hack/benchmark/core/benchmark/strategy/bulk_insert.go index 8846ea1014..da16b17eb0 100644 --- a/hack/benchmark/core/benchmark/strategy/bulk_insert.go +++ b/hack/benchmark/core/benchmark/strategy/bulk_insert.go @@ -23,7 +23,7 @@ import ( "github.com/vdaas/vald/hack/benchmark/core/benchmark" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" ) const ( @@ -34,7 +34,7 @@ func NewBulkInsert(opts ...StrategyOption) benchmark.Strategy { return newStrategy(append([]StrategyOption{ WithPropName("BulkInsert"), WithProp32( - func(ctx context.Context, b *testing.B, c core.Core32, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit32, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { size := func() int { if maxBulkSize < dataset.TrainSize() { return maxBulkSize @@ -60,7 +60,7 @@ func NewBulkInsert(opts ...StrategyOption) benchmark.Strategy { }, ), WithProp64( - func(ctx context.Context, b *testing.B, c core.Core64, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit64, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { size := func() int { if maxBulkSize < dataset.TrainSize() { return maxBulkSize diff --git a/hack/benchmark/core/benchmark/strategy/bulk_insert_commit.go b/hack/benchmark/core/benchmark/strategy/bulk_insert_commit.go index 80eea2a5b1..b757736024 100644 --- a/hack/benchmark/core/benchmark/strategy/bulk_insert_commit.go +++ b/hack/benchmark/core/benchmark/strategy/bulk_insert_commit.go @@ -23,14 +23,14 @@ import ( "github.com/vdaas/vald/hack/benchmark/core/benchmark" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" ) func NewBulkInsertCommit(poolSize uint32, opts ...StrategyOption) benchmark.Strategy { return newStrategy(append([]StrategyOption{ WithPropName("BulkInsertCommit"), WithProp32( - func(ctx context.Context, b *testing.B, c core.Core32, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit32, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { size := func() int { if maxBulkSize < dataset.TrainSize() { return maxBulkSize @@ -56,7 +56,7 @@ func NewBulkInsertCommit(poolSize uint32, opts ...StrategyOption) benchmark.Stra }, ), WithProp64( - func(ctx context.Context, b *testing.B, c core.Core64, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit64, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { size := func() int { if maxBulkSize < dataset.TrainSize() { return maxBulkSize diff --git a/hack/benchmark/core/benchmark/strategy/bulk_insert_commit_test.go b/hack/benchmark/core/benchmark/strategy/bulk_insert_commit_test.go index 29a01fa4f4..be7da7edbd 100644 --- a/hack/benchmark/core/benchmark/strategy/bulk_insert_commit_test.go +++ b/hack/benchmark/core/benchmark/strategy/bulk_insert_commit_test.go @@ -93,7 +93,6 @@ func TestNewBulkInsertCommit(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/core/benchmark/strategy/bulk_insert_test.go b/hack/benchmark/core/benchmark/strategy/bulk_insert_test.go index 1ab449423a..5aa7bdaf40 100644 --- a/hack/benchmark/core/benchmark/strategy/bulk_insert_test.go +++ b/hack/benchmark/core/benchmark/strategy/bulk_insert_test.go @@ -90,7 +90,6 @@ func TestNewBulkInsert(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/core/benchmark/strategy/get_vector.go b/hack/benchmark/core/benchmark/strategy/get_vector.go index 8be24b9d75..9356af1ad3 100644 --- a/hack/benchmark/core/benchmark/strategy/get_vector.go +++ b/hack/benchmark/core/benchmark/strategy/get_vector.go @@ -24,29 +24,29 @@ import ( "github.com/vdaas/vald/hack/benchmark/core/benchmark" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" ) func NewGetVector(opts ...StrategyOption) benchmark.Strategy { return newStrategy(append([]StrategyOption{ WithPropName("GetVector"), WithPreProp32( - func(ctx context.Context, b *testing.B, c core.Core32, dataset assets.Dataset) (ids []uint, err error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit32, dataset assets.Dataset) (ids []uint, err error) { return insertAndCreateIndex32(ctx, c, dataset) }, ), WithProp32( - func(ctx context.Context, b *testing.B, c core.Core32, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit32, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { return c.GetVector(ids[int(atomic.LoadUint64(cnt))%len(ids)]) }, ), WithPreProp64( - func(ctx context.Context, b *testing.B, c core.Core64, dataset assets.Dataset) (ids []uint, err error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit64, dataset assets.Dataset) (ids []uint, err error) { return insertAndCreateIndex64(ctx, c, dataset) }, ), WithProp64( - func(ctx context.Context, b *testing.B, c core.Core64, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit64, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { return c.GetVector(ids[int(atomic.LoadUint64(cnt))%len(ids)]) }, ), diff --git a/hack/benchmark/core/benchmark/strategy/get_vector_test.go b/hack/benchmark/core/benchmark/strategy/get_vector_test.go index e9167e496e..2bf3cbce18 100644 --- a/hack/benchmark/core/benchmark/strategy/get_vector_test.go +++ b/hack/benchmark/core/benchmark/strategy/get_vector_test.go @@ -90,7 +90,6 @@ func TestNewGetVector(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/core/benchmark/strategy/insert.go b/hack/benchmark/core/benchmark/strategy/insert.go index 688d4c68cb..00f8d90217 100644 --- a/hack/benchmark/core/benchmark/strategy/insert.go +++ b/hack/benchmark/core/benchmark/strategy/insert.go @@ -24,14 +24,14 @@ import ( "github.com/vdaas/vald/hack/benchmark/core/benchmark" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" ) func NewInsert(opts ...StrategyOption) benchmark.Strategy { return newStrategy(append([]StrategyOption{ WithPropName("Insert"), WithProp32( - func(ctx context.Context, b *testing.B, c core.Core32, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit32, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { v, err := dataset.Train(int(atomic.LoadUint64(cnt)) % dataset.TrainSize()) if err != nil { return nil, err @@ -42,7 +42,7 @@ func NewInsert(opts ...StrategyOption) benchmark.Strategy { }, ), WithProp64( - func(ctx context.Context, b *testing.B, c core.Core64, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit64, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { v, err := dataset.Train(int(atomic.LoadUint64(cnt)) % dataset.TrainSize()) if err != nil { return nil, err diff --git a/hack/benchmark/core/benchmark/strategy/insert_commit.go b/hack/benchmark/core/benchmark/strategy/insert_commit.go index 7b8bb98baa..7ca483c519 100644 --- a/hack/benchmark/core/benchmark/strategy/insert_commit.go +++ b/hack/benchmark/core/benchmark/strategy/insert_commit.go @@ -24,14 +24,14 @@ import ( "github.com/vdaas/vald/hack/benchmark/core/benchmark" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" ) func NewInsertCommit(poolSize uint32, opts ...StrategyOption) benchmark.Strategy { return newStrategy(append([]StrategyOption{ WithPropName("InsertCommit"), WithProp32( - func(ctx context.Context, b *testing.B, c core.Core32, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit32, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { v, err := dataset.Train(int(atomic.LoadUint64(cnt)) % dataset.TrainSize()) if err != nil { return nil, err @@ -42,7 +42,7 @@ func NewInsertCommit(poolSize uint32, opts ...StrategyOption) benchmark.Strategy }, ), WithProp64( - func(ctx context.Context, b *testing.B, c core.Core64, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit64, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { v, err := dataset.Train(int(atomic.LoadUint64(cnt)) % dataset.TrainSize()) if err != nil { return nil, err diff --git a/hack/benchmark/core/benchmark/strategy/insert_commit_test.go b/hack/benchmark/core/benchmark/strategy/insert_commit_test.go index 0f3ad1a2d1..4a26b84b68 100644 --- a/hack/benchmark/core/benchmark/strategy/insert_commit_test.go +++ b/hack/benchmark/core/benchmark/strategy/insert_commit_test.go @@ -93,7 +93,6 @@ func TestNewInsertCommit(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/core/benchmark/strategy/insert_test.go b/hack/benchmark/core/benchmark/strategy/insert_test.go index 13582a28b8..c9863e31df 100644 --- a/hack/benchmark/core/benchmark/strategy/insert_test.go +++ b/hack/benchmark/core/benchmark/strategy/insert_test.go @@ -90,7 +90,6 @@ func TestNewInsert(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/core/benchmark/strategy/remove.go b/hack/benchmark/core/benchmark/strategy/remove.go index 431b3a3740..0113e06c1b 100644 --- a/hack/benchmark/core/benchmark/strategy/remove.go +++ b/hack/benchmark/core/benchmark/strategy/remove.go @@ -24,30 +24,30 @@ import ( "github.com/vdaas/vald/hack/benchmark/core/benchmark" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" ) func NewRemove(opts ...StrategyOption) benchmark.Strategy { return newStrategy(append([]StrategyOption{ WithPropName("Remove"), WithPreProp32( - func(ctx context.Context, b *testing.B, c core.Core32, dataset assets.Dataset) (ids []uint, err error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit32, dataset assets.Dataset) (ids []uint, err error) { return insertAndCreateIndex32(ctx, c, dataset) }, ), WithProp32( - func(ctx context.Context, b *testing.B, c core.Core32, dataset assets.Dataset, ids []uint, cnt *uint64) (obj interface{}, err error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit32, dataset assets.Dataset, ids []uint, cnt *uint64) (obj interface{}, err error) { err = c.Remove(ids[int(atomic.LoadUint64(cnt))%len(ids)]) return }, ), WithPreProp64( - func(ctx context.Context, b *testing.B, c core.Core64, dataset assets.Dataset) (ids []uint, err error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit64, dataset assets.Dataset) (ids []uint, err error) { return insertAndCreateIndex64(ctx, c, dataset) }, ), WithProp64( - func(ctx context.Context, b *testing.B, c core.Core64, dataset assets.Dataset, ids []uint, cnt *uint64) (obj interface{}, err error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit64, dataset assets.Dataset, ids []uint, cnt *uint64) (obj interface{}, err error) { err = c.Remove(ids[int(atomic.LoadUint64(cnt))%len(ids)]) return }, diff --git a/hack/benchmark/core/benchmark/strategy/remove_test.go b/hack/benchmark/core/benchmark/strategy/remove_test.go index 49e713d4c9..1c35a87b09 100644 --- a/hack/benchmark/core/benchmark/strategy/remove_test.go +++ b/hack/benchmark/core/benchmark/strategy/remove_test.go @@ -90,7 +90,6 @@ func TestNewRemove(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/core/benchmark/strategy/search.go b/hack/benchmark/core/benchmark/strategy/search.go index d05d753283..e1f9801443 100644 --- a/hack/benchmark/core/benchmark/strategy/search.go +++ b/hack/benchmark/core/benchmark/strategy/search.go @@ -24,19 +24,19 @@ import ( "github.com/vdaas/vald/hack/benchmark/core/benchmark" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" ) func NewSearch(size int, epsilon, radius float32, opts ...StrategyOption) benchmark.Strategy { return newStrategy(append([]StrategyOption{ WithPropName("Search"), WithPreProp32( - func(ctx context.Context, b *testing.B, c core.Core32, dataset assets.Dataset) (ids []uint, err error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit32, dataset assets.Dataset) (ids []uint, err error) { return insertAndCreateIndex32(ctx, c, dataset) }, ), WithProp32( - func(ctx context.Context, b *testing.B, c core.Core32, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit32, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { v, err := dataset.Query(int(atomic.LoadUint64(cnt)) % dataset.TrainSize()) if err != nil { return nil, err @@ -47,12 +47,12 @@ func NewSearch(size int, epsilon, radius float32, opts ...StrategyOption) benchm }, ), WithPreProp64( - func(ctx context.Context, b *testing.B, c core.Core64, dataset assets.Dataset) (ids []uint, err error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit64, dataset assets.Dataset) (ids []uint, err error) { return insertAndCreateIndex64(ctx, c, dataset) }, ), WithProp64( - func(ctx context.Context, b *testing.B, c core.Core64, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { + func(ctx context.Context, b *testing.B, c algorithm.Bit64, dataset assets.Dataset, ids []uint, cnt *uint64) (interface{}, error) { v, err := dataset.Train(int(atomic.LoadUint64(cnt)) % dataset.TrainSize()) if err != nil { return nil, err diff --git a/hack/benchmark/core/benchmark/strategy/search_test.go b/hack/benchmark/core/benchmark/strategy/search_test.go index e634a6c126..9dbf2a3dfa 100644 --- a/hack/benchmark/core/benchmark/strategy/search_test.go +++ b/hack/benchmark/core/benchmark/strategy/search_test.go @@ -99,7 +99,6 @@ func TestNewSearch(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/core/benchmark/strategy/strategy.go b/hack/benchmark/core/benchmark/strategy/strategy.go index cc1a513fec..c32fe38bc6 100644 --- a/hack/benchmark/core/benchmark/strategy/strategy.go +++ b/hack/benchmark/core/benchmark/strategy/strategy.go @@ -24,29 +24,29 @@ import ( "github.com/vdaas/vald/hack/benchmark/core/benchmark" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" "github.com/vdaas/vald/internal/errors" ) type strategy struct { - core32 core.Core32 - core64 core.Core64 - initCore32 func(context.Context, *testing.B, assets.Dataset) (core.Core32, core.Closer, error) - initCore64 func(context.Context, *testing.B, assets.Dataset) (core.Core64, core.Closer, error) - closer core.Closer - propName string - preProp32 func(context.Context, *testing.B, core.Core32, assets.Dataset) ([]uint, error) - preProp64 func(context.Context, *testing.B, core.Core64, assets.Dataset) ([]uint, error) - mode core.Mode - prop32 func(context.Context, *testing.B, core.Core32, assets.Dataset, []uint, *uint64) (interface{}, error) - prop64 func(context.Context, *testing.B, core.Core64, assets.Dataset, []uint, *uint64) (interface{}, error) - parallel bool + core32 algorithm.Bit32 + core64 algorithm.Bit64 + initBit32 func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit32, algorithm.Closer, error) + initBit64 func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit64, algorithm.Closer, error) + closer algorithm.Closer + propName string + preProp32 func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset) ([]uint, error) + preProp64 func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset) ([]uint, error) + mode algorithm.Mode + prop32 func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset, []uint, *uint64) (interface{}, error) + prop64 func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset, []uint, *uint64) (interface{}, error) + parallel bool } func newStrategy(opts ...StrategyOption) benchmark.Strategy { s := &strategy{ // invalid mode. - mode: core.Mode(100), + mode: algorithm.Mode(100), } for _, opt := range append(defaultStrategyOptions, opts...) { opt(s) @@ -57,15 +57,15 @@ func newStrategy(opts ...StrategyOption) benchmark.Strategy { func (s *strategy) Init(ctx context.Context, b *testing.B, dataset assets.Dataset) error { b.Helper() switch s.mode { - case core.Float32: - core32, closer, err := s.initCore32(ctx, b, dataset) + case algorithm.Float32: + core32, closer, err := s.initBit32(ctx, b, dataset) if err != nil { b.Error(err) return err } s.core32, s.closer = core32, closer - case core.Float64: - core64, closer, err := s.initCore64(ctx, b, dataset) + case algorithm.Float64: + core64, closer, err := s.initBit64(ctx, b, dataset) if err != nil { b.Error(err) return err @@ -82,9 +82,9 @@ func (s *strategy) PreProp(ctx context.Context, b *testing.B, dataset assets.Dat b.Helper() switch s.mode { - case core.Float32: + case algorithm.Float32: return s.preProp32(ctx, b, s.core32, dataset) - case core.Float64: + case algorithm.Float64: return s.preProp64(ctx, b, s.core64, dataset) default: return nil, errors.ErrInvalidCoreMode @@ -103,11 +103,11 @@ func (s *strategy) Run(ctx context.Context, b *testing.B, dataset assets.Dataset defer b.StopTimer() switch s.mode { - case core.Float32: + case algorithm.Float32: b.Run(s.propName, func(bb *testing.B) { s.float32(ctx, bb, dataset, ids, &cnt) }) - case core.Float64: + case algorithm.Float64: b.Run(s.propName, func(bb *testing.B) { s.float64(ctx, bb, dataset, ids, &cnt) }) diff --git a/hack/benchmark/core/benchmark/strategy/strategy_option.go b/hack/benchmark/core/benchmark/strategy/strategy_option.go index 4d3027842d..a22ffe2923 100644 --- a/hack/benchmark/core/benchmark/strategy/strategy_option.go +++ b/hack/benchmark/core/benchmark/strategy/strategy_option.go @@ -22,24 +22,22 @@ import ( "testing" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" ) type StrategyOption func(*strategy) error -var ( - defaultStrategyOptions = []StrategyOption{ - WithPreProp32(func(context.Context, *testing.B, core.Core32, assets.Dataset) ([]uint, error) { - return nil, nil - }), - WithPreProp64(func(context.Context, *testing.B, core.Core64, assets.Dataset) ([]uint, error) { - return nil, nil - }), - } -) +var defaultStrategyOptions = []StrategyOption{ + WithPreProp32(func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset) ([]uint, error) { + return nil, nil + }), + WithPreProp64(func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset) ([]uint, error) { + return nil, nil + }), +} func WithPreProp32( - fn func(context.Context, *testing.B, core.Core32, assets.Dataset) ([]uint, error), + fn func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset) ([]uint, error), ) StrategyOption { return func(s *strategy) error { if fn != nil { @@ -50,7 +48,7 @@ func WithPreProp32( } func WithProp32( - fn func(context.Context, *testing.B, core.Core32, assets.Dataset, []uint, *uint64) (interface{}, error), + fn func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset, []uint, *uint64) (interface{}, error), ) StrategyOption { return func(s *strategy) error { if fn != nil { @@ -61,7 +59,7 @@ func WithProp32( } func WithPreProp64( - fn func(context.Context, *testing.B, core.Core64, assets.Dataset) ([]uint, error), + fn func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset) ([]uint, error), ) StrategyOption { return func(s *strategy) error { if fn != nil { @@ -72,7 +70,7 @@ func WithPreProp64( } func WithProp64( - fn func(context.Context, *testing.B, core.Core64, assets.Dataset, []uint, *uint64) (interface{}, error), + fn func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset, []uint, *uint64) (interface{}, error), ) StrategyOption { return func(s *strategy) error { if fn != nil { @@ -91,25 +89,25 @@ func WithPropName(str string) StrategyOption { } } -func WithCore32( - fn func(context.Context, *testing.B, assets.Dataset) (core.Core32, core.Closer, error), +func WithBit32( + fn func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit32, algorithm.Closer, error), ) StrategyOption { return func(s *strategy) (err error) { if fn != nil { - s.mode = core.Float32 - s.initCore32 = fn + s.mode = algorithm.Float32 + s.initBit32 = fn } return nil } } -func WithCore64( - fn func(context.Context, *testing.B, assets.Dataset) (core.Core64, core.Closer, error), +func WithBit64( + fn func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit64, algorithm.Closer, error), ) StrategyOption { return func(s *strategy) error { if fn != nil { - s.mode = core.Float64 - s.initCore64 = fn + s.mode = algorithm.Float64 + s.initBit64 = fn } return nil } diff --git a/hack/benchmark/core/benchmark/strategy/strategy_option_test.go b/hack/benchmark/core/benchmark/strategy/strategy_option_test.go index d92069e421..6097c142f2 100644 --- a/hack/benchmark/core/benchmark/strategy/strategy_option_test.go +++ b/hack/benchmark/core/benchmark/strategy/strategy_option_test.go @@ -22,7 +22,7 @@ import ( "testing" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" "go.uber.org/goleak" ) @@ -30,7 +30,7 @@ import ( func TestWithPreProp32(t *testing.T) { type T = interface{} type args struct { - fn func(context.Context, *testing.B, core.Core32, assets.Dataset) ([]uint, error) + fn func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset) ([]uint, error) } type want struct { obj *T @@ -143,7 +143,7 @@ func TestWithPreProp32(t *testing.T) { func TestWithProp32(t *testing.T) { type T = interface{} type args struct { - fn func(context.Context, *testing.B, core.Core32, assets.Dataset, []uint, *uint64) (interface{}, error) + fn func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset, []uint, *uint64) (interface{}, error) } type want struct { obj *T @@ -256,7 +256,7 @@ func TestWithProp32(t *testing.T) { func TestWithPreProp64(t *testing.T) { type T = interface{} type args struct { - fn func(context.Context, *testing.B, core.Core64, assets.Dataset) ([]uint, error) + fn func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset) ([]uint, error) } type want struct { obj *T @@ -369,7 +369,7 @@ func TestWithPreProp64(t *testing.T) { func TestWithProp64(t *testing.T) { type T = interface{} type args struct { - fn func(context.Context, *testing.B, core.Core64, assets.Dataset, []uint, *uint64) (interface{}, error) + fn func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset, []uint, *uint64) (interface{}, error) } type want struct { obj *T @@ -592,10 +592,10 @@ func TestWithPropName(t *testing.T) { } } -func TestWithCore32(t *testing.T) { +func TestWithBit32(t *testing.T) { type T = interface{} type args struct { - fn func(context.Context, *testing.B, assets.Dataset) (core.Core32, core.Closer, error) + fn func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit32, algorithm.Closer, error) } type want struct { obj *T @@ -682,7 +682,7 @@ func TestWithCore32(t *testing.T) { test.checkFunc = defaultCheckFunc } - got := WithCore32(test.args.fn) + got := WithBit32(test.args.fn) obj := new(T) if err := test.checkFunc(test.want, obj, got(obj)); err != nil { tt.Errorf("error = %v", err) @@ -694,7 +694,7 @@ func TestWithCore32(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - got := WithCore32(test.args.fn) + got := WithBit32(test.args.fn) obj := new(T) got(obj) if err := test.checkFunc(tt.want, obj); err != nil { @@ -705,10 +705,10 @@ func TestWithCore32(t *testing.T) { } } -func TestWithCore64(t *testing.T) { +func TestWithBit64(t *testing.T) { type T = interface{} type args struct { - fn func(context.Context, *testing.B, assets.Dataset) (core.Core64, core.Closer, error) + fn func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit64, algorithm.Closer, error) } type want struct { obj *T @@ -795,7 +795,7 @@ func TestWithCore64(t *testing.T) { test.checkFunc = defaultCheckFunc } - got := WithCore64(test.args.fn) + got := WithBit64(test.args.fn) obj := new(T) if err := test.checkFunc(test.want, obj, got(obj)); err != nil { tt.Errorf("error = %v", err) @@ -807,7 +807,7 @@ func TestWithCore64(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - got := WithCore64(test.args.fn) + got := WithBit64(test.args.fn) obj := new(T) got(obj) if err := test.checkFunc(tt.want, obj); err != nil { diff --git a/hack/benchmark/core/benchmark/strategy/strategy_test.go b/hack/benchmark/core/benchmark/strategy/strategy_test.go index 89eb93cf20..395d9b1230 100644 --- a/hack/benchmark/core/benchmark/strategy/strategy_test.go +++ b/hack/benchmark/core/benchmark/strategy/strategy_test.go @@ -24,7 +24,7 @@ import ( "github.com/vdaas/vald/hack/benchmark/core/benchmark" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" "github.com/vdaas/vald/internal/errors" "go.uber.org/goleak" @@ -96,7 +96,6 @@ func Test_newStrategy(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -108,18 +107,18 @@ func Test_strategy_Init(t *testing.T) { dataset assets.Dataset } type fields struct { - core32 core.Core32 - core64 core.Core64 - initCore32 func(context.Context, *testing.B, assets.Dataset) (core.Core32, core.Closer, error) - initCore64 func(context.Context, *testing.B, assets.Dataset) (core.Core64, core.Closer, error) - closer core.Closer - propName string - preProp32 func(context.Context, *testing.B, core.Core32, assets.Dataset) ([]uint, error) - preProp64 func(context.Context, *testing.B, core.Core64, assets.Dataset) ([]uint, error) - mode core.Mode - prop32 func(context.Context, *testing.B, core.Core32, assets.Dataset, []uint, *uint64) (interface{}, error) - prop64 func(context.Context, *testing.B, core.Core64, assets.Dataset, []uint, *uint64) (interface{}, error) - parallel bool + core32 algorithm.Bit32 + core64 algorithm.Bit64 + initBit32 func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit32, algorithm.Closer, error) + initBit64 func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit64, algorithm.Closer, error) + closer algorithm.Closer + propName string + preProp32 func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset) ([]uint, error) + preProp64 func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset) ([]uint, error) + mode algorithm.Mode + prop32 func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset, []uint, *uint64) (interface{}, error) + prop64 func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset, []uint, *uint64) (interface{}, error) + parallel bool } type want struct { err error @@ -152,8 +151,8 @@ func Test_strategy_Init(t *testing.T) { fields: fields { core32: nil, core64: nil, - initCore32: nil, - initCore64: nil, + initBit32: nil, + initBit64: nil, closer: nil, propName: "", preProp32: nil, @@ -181,8 +180,8 @@ func Test_strategy_Init(t *testing.T) { fields: fields { core32: nil, core64: nil, - initCore32: nil, - initCore64: nil, + initBit32: nil, + initBit64: nil, closer: nil, propName: "", preProp32: nil, @@ -212,25 +211,24 @@ func Test_strategy_Init(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &strategy{ - core32: test.fields.core32, - core64: test.fields.core64, - initCore32: test.fields.initCore32, - initCore64: test.fields.initCore64, - closer: test.fields.closer, - propName: test.fields.propName, - preProp32: test.fields.preProp32, - preProp64: test.fields.preProp64, - mode: test.fields.mode, - prop32: test.fields.prop32, - prop64: test.fields.prop64, - parallel: test.fields.parallel, + core32: test.fields.core32, + core64: test.fields.core64, + initBit32: test.fields.initBit32, + initBit64: test.fields.initBit64, + closer: test.fields.closer, + propName: test.fields.propName, + preProp32: test.fields.preProp32, + preProp64: test.fields.preProp64, + mode: test.fields.mode, + prop32: test.fields.prop32, + prop64: test.fields.prop64, + parallel: test.fields.parallel, } err := s.Init(test.args.ctx, test.args.b, test.args.dataset) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -242,18 +240,18 @@ func Test_strategy_PreProp(t *testing.T) { dataset assets.Dataset } type fields struct { - core32 core.Core32 - core64 core.Core64 - initCore32 func(context.Context, *testing.B, assets.Dataset) (core.Core32, core.Closer, error) - initCore64 func(context.Context, *testing.B, assets.Dataset) (core.Core64, core.Closer, error) - closer core.Closer - propName string - preProp32 func(context.Context, *testing.B, core.Core32, assets.Dataset) ([]uint, error) - preProp64 func(context.Context, *testing.B, core.Core64, assets.Dataset) ([]uint, error) - mode core.Mode - prop32 func(context.Context, *testing.B, core.Core32, assets.Dataset, []uint, *uint64) (interface{}, error) - prop64 func(context.Context, *testing.B, core.Core64, assets.Dataset, []uint, *uint64) (interface{}, error) - parallel bool + core32 algorithm.Bit32 + core64 algorithm.Bit64 + initBit32 func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit32, algorithm.Closer, error) + initBit64 func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit64, algorithm.Closer, error) + closer algorithm.Closer + propName string + preProp32 func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset) ([]uint, error) + preProp64 func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset) ([]uint, error) + mode algorithm.Mode + prop32 func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset, []uint, *uint64) (interface{}, error) + prop64 func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset, []uint, *uint64) (interface{}, error) + parallel bool } type want struct { want []uint @@ -290,8 +288,8 @@ func Test_strategy_PreProp(t *testing.T) { fields: fields { core32: nil, core64: nil, - initCore32: nil, - initCore64: nil, + initBit32: nil, + initBit64: nil, closer: nil, propName: "", preProp32: nil, @@ -319,8 +317,8 @@ func Test_strategy_PreProp(t *testing.T) { fields: fields { core32: nil, core64: nil, - initCore32: nil, - initCore64: nil, + initBit32: nil, + initBit64: nil, closer: nil, propName: "", preProp32: nil, @@ -350,25 +348,24 @@ func Test_strategy_PreProp(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &strategy{ - core32: test.fields.core32, - core64: test.fields.core64, - initCore32: test.fields.initCore32, - initCore64: test.fields.initCore64, - closer: test.fields.closer, - propName: test.fields.propName, - preProp32: test.fields.preProp32, - preProp64: test.fields.preProp64, - mode: test.fields.mode, - prop32: test.fields.prop32, - prop64: test.fields.prop64, - parallel: test.fields.parallel, + core32: test.fields.core32, + core64: test.fields.core64, + initBit32: test.fields.initBit32, + initBit64: test.fields.initBit64, + closer: test.fields.closer, + propName: test.fields.propName, + preProp32: test.fields.preProp32, + preProp64: test.fields.preProp64, + mode: test.fields.mode, + prop32: test.fields.prop32, + prop64: test.fields.prop64, + parallel: test.fields.parallel, } got, err := s.PreProp(test.args.ctx, test.args.b, test.args.dataset) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -381,18 +378,18 @@ func Test_strategy_Run(t *testing.T) { ids []uint } type fields struct { - core32 core.Core32 - core64 core.Core64 - initCore32 func(context.Context, *testing.B, assets.Dataset) (core.Core32, core.Closer, error) - initCore64 func(context.Context, *testing.B, assets.Dataset) (core.Core64, core.Closer, error) - closer core.Closer - propName string - preProp32 func(context.Context, *testing.B, core.Core32, assets.Dataset) ([]uint, error) - preProp64 func(context.Context, *testing.B, core.Core64, assets.Dataset) ([]uint, error) - mode core.Mode - prop32 func(context.Context, *testing.B, core.Core32, assets.Dataset, []uint, *uint64) (interface{}, error) - prop64 func(context.Context, *testing.B, core.Core64, assets.Dataset, []uint, *uint64) (interface{}, error) - parallel bool + core32 algorithm.Bit32 + core64 algorithm.Bit64 + initBit32 func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit32, algorithm.Closer, error) + initBit64 func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit64, algorithm.Closer, error) + closer algorithm.Closer + propName string + preProp32 func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset) ([]uint, error) + preProp64 func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset) ([]uint, error) + mode algorithm.Mode + prop32 func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset, []uint, *uint64) (interface{}, error) + prop64 func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset, []uint, *uint64) (interface{}, error) + parallel bool } type want struct { } @@ -422,8 +419,8 @@ func Test_strategy_Run(t *testing.T) { fields: fields { core32: nil, core64: nil, - initCore32: nil, - initCore64: nil, + initBit32: nil, + initBit64: nil, closer: nil, propName: "", preProp32: nil, @@ -452,8 +449,8 @@ func Test_strategy_Run(t *testing.T) { fields: fields { core32: nil, core64: nil, - initCore32: nil, - initCore64: nil, + initBit32: nil, + initBit64: nil, closer: nil, propName: "", preProp32: nil, @@ -483,18 +480,18 @@ func Test_strategy_Run(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &strategy{ - core32: test.fields.core32, - core64: test.fields.core64, - initCore32: test.fields.initCore32, - initCore64: test.fields.initCore64, - closer: test.fields.closer, - propName: test.fields.propName, - preProp32: test.fields.preProp32, - preProp64: test.fields.preProp64, - mode: test.fields.mode, - prop32: test.fields.prop32, - prop64: test.fields.prop64, - parallel: test.fields.parallel, + core32: test.fields.core32, + core64: test.fields.core64, + initBit32: test.fields.initBit32, + initBit64: test.fields.initBit64, + closer: test.fields.closer, + propName: test.fields.propName, + preProp32: test.fields.preProp32, + preProp64: test.fields.preProp64, + mode: test.fields.mode, + prop32: test.fields.prop32, + prop64: test.fields.prop64, + parallel: test.fields.parallel, } s.Run(test.args.ctx, test.args.b, test.args.dataset, test.args.ids) @@ -507,18 +504,18 @@ func Test_strategy_Run(t *testing.T) { func Test_strategy_Close(t *testing.T) { type fields struct { - core32 core.Core32 - core64 core.Core64 - initCore32 func(context.Context, *testing.B, assets.Dataset) (core.Core32, core.Closer, error) - initCore64 func(context.Context, *testing.B, assets.Dataset) (core.Core64, core.Closer, error) - closer core.Closer - propName string - preProp32 func(context.Context, *testing.B, core.Core32, assets.Dataset) ([]uint, error) - preProp64 func(context.Context, *testing.B, core.Core64, assets.Dataset) ([]uint, error) - mode core.Mode - prop32 func(context.Context, *testing.B, core.Core32, assets.Dataset, []uint, *uint64) (interface{}, error) - prop64 func(context.Context, *testing.B, core.Core64, assets.Dataset, []uint, *uint64) (interface{}, error) - parallel bool + core32 algorithm.Bit32 + core64 algorithm.Bit64 + initBit32 func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit32, algorithm.Closer, error) + initBit64 func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit64, algorithm.Closer, error) + closer algorithm.Closer + propName string + preProp32 func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset) ([]uint, error) + preProp64 func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset) ([]uint, error) + mode algorithm.Mode + prop32 func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset, []uint, *uint64) (interface{}, error) + prop64 func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset, []uint, *uint64) (interface{}, error) + parallel bool } type want struct { } @@ -541,8 +538,8 @@ func Test_strategy_Close(t *testing.T) { fields: fields { core32: nil, core64: nil, - initCore32: nil, - initCore64: nil, + initBit32: nil, + initBit64: nil, closer: nil, propName: "", preProp32: nil, @@ -565,8 +562,8 @@ func Test_strategy_Close(t *testing.T) { fields: fields { core32: nil, core64: nil, - initCore32: nil, - initCore64: nil, + initBit32: nil, + initBit64: nil, closer: nil, propName: "", preProp32: nil, @@ -596,18 +593,18 @@ func Test_strategy_Close(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &strategy{ - core32: test.fields.core32, - core64: test.fields.core64, - initCore32: test.fields.initCore32, - initCore64: test.fields.initCore64, - closer: test.fields.closer, - propName: test.fields.propName, - preProp32: test.fields.preProp32, - preProp64: test.fields.preProp64, - mode: test.fields.mode, - prop32: test.fields.prop32, - prop64: test.fields.prop64, - parallel: test.fields.parallel, + core32: test.fields.core32, + core64: test.fields.core64, + initBit32: test.fields.initBit32, + initBit64: test.fields.initBit64, + closer: test.fields.closer, + propName: test.fields.propName, + preProp32: test.fields.preProp32, + preProp64: test.fields.preProp64, + mode: test.fields.mode, + prop32: test.fields.prop32, + prop64: test.fields.prop64, + parallel: test.fields.parallel, } s.Close() @@ -627,18 +624,18 @@ func Test_strategy_float32(t *testing.T) { cnt *uint64 } type fields struct { - core32 core.Core32 - core64 core.Core64 - initCore32 func(context.Context, *testing.B, assets.Dataset) (core.Core32, core.Closer, error) - initCore64 func(context.Context, *testing.B, assets.Dataset) (core.Core64, core.Closer, error) - closer core.Closer - propName string - preProp32 func(context.Context, *testing.B, core.Core32, assets.Dataset) ([]uint, error) - preProp64 func(context.Context, *testing.B, core.Core64, assets.Dataset) ([]uint, error) - mode core.Mode - prop32 func(context.Context, *testing.B, core.Core32, assets.Dataset, []uint, *uint64) (interface{}, error) - prop64 func(context.Context, *testing.B, core.Core64, assets.Dataset, []uint, *uint64) (interface{}, error) - parallel bool + core32 algorithm.Bit32 + core64 algorithm.Bit64 + initBit32 func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit32, algorithm.Closer, error) + initBit64 func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit64, algorithm.Closer, error) + closer algorithm.Closer + propName string + preProp32 func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset) ([]uint, error) + preProp64 func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset) ([]uint, error) + mode algorithm.Mode + prop32 func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset, []uint, *uint64) (interface{}, error) + prop64 func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset, []uint, *uint64) (interface{}, error) + parallel bool } type want struct { } @@ -669,8 +666,8 @@ func Test_strategy_float32(t *testing.T) { fields: fields { core32: nil, core64: nil, - initCore32: nil, - initCore64: nil, + initBit32: nil, + initBit64: nil, closer: nil, propName: "", preProp32: nil, @@ -700,8 +697,8 @@ func Test_strategy_float32(t *testing.T) { fields: fields { core32: nil, core64: nil, - initCore32: nil, - initCore64: nil, + initBit32: nil, + initBit64: nil, closer: nil, propName: "", preProp32: nil, @@ -731,18 +728,18 @@ func Test_strategy_float32(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &strategy{ - core32: test.fields.core32, - core64: test.fields.core64, - initCore32: test.fields.initCore32, - initCore64: test.fields.initCore64, - closer: test.fields.closer, - propName: test.fields.propName, - preProp32: test.fields.preProp32, - preProp64: test.fields.preProp64, - mode: test.fields.mode, - prop32: test.fields.prop32, - prop64: test.fields.prop64, - parallel: test.fields.parallel, + core32: test.fields.core32, + core64: test.fields.core64, + initBit32: test.fields.initBit32, + initBit64: test.fields.initBit64, + closer: test.fields.closer, + propName: test.fields.propName, + preProp32: test.fields.preProp32, + preProp64: test.fields.preProp64, + mode: test.fields.mode, + prop32: test.fields.prop32, + prop64: test.fields.prop64, + parallel: test.fields.parallel, } s.float32(test.args.ctx, test.args.b, test.args.dataset, test.args.ids, test.args.cnt) @@ -762,18 +759,18 @@ func Test_strategy_float64(t *testing.T) { cnt *uint64 } type fields struct { - core32 core.Core32 - core64 core.Core64 - initCore32 func(context.Context, *testing.B, assets.Dataset) (core.Core32, core.Closer, error) - initCore64 func(context.Context, *testing.B, assets.Dataset) (core.Core64, core.Closer, error) - closer core.Closer - propName string - preProp32 func(context.Context, *testing.B, core.Core32, assets.Dataset) ([]uint, error) - preProp64 func(context.Context, *testing.B, core.Core64, assets.Dataset) ([]uint, error) - mode core.Mode - prop32 func(context.Context, *testing.B, core.Core32, assets.Dataset, []uint, *uint64) (interface{}, error) - prop64 func(context.Context, *testing.B, core.Core64, assets.Dataset, []uint, *uint64) (interface{}, error) - parallel bool + core32 algorithm.Bit32 + core64 algorithm.Bit64 + initBit32 func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit32, algorithm.Closer, error) + initBit64 func(context.Context, *testing.B, assets.Dataset) (algorithm.Bit64, algorithm.Closer, error) + closer algorithm.Closer + propName string + preProp32 func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset) ([]uint, error) + preProp64 func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset) ([]uint, error) + mode algorithm.Mode + prop32 func(context.Context, *testing.B, algorithm.Bit32, assets.Dataset, []uint, *uint64) (interface{}, error) + prop64 func(context.Context, *testing.B, algorithm.Bit64, assets.Dataset, []uint, *uint64) (interface{}, error) + parallel bool } type want struct { } @@ -804,8 +801,8 @@ func Test_strategy_float64(t *testing.T) { fields: fields { core32: nil, core64: nil, - initCore32: nil, - initCore64: nil, + initBit32: nil, + initBit64: nil, closer: nil, propName: "", preProp32: nil, @@ -835,8 +832,8 @@ func Test_strategy_float64(t *testing.T) { fields: fields { core32: nil, core64: nil, - initCore32: nil, - initCore64: nil, + initBit32: nil, + initBit64: nil, closer: nil, propName: "", preProp32: nil, @@ -866,18 +863,18 @@ func Test_strategy_float64(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &strategy{ - core32: test.fields.core32, - core64: test.fields.core64, - initCore32: test.fields.initCore32, - initCore64: test.fields.initCore64, - closer: test.fields.closer, - propName: test.fields.propName, - preProp32: test.fields.preProp32, - preProp64: test.fields.preProp64, - mode: test.fields.mode, - prop32: test.fields.prop32, - prop64: test.fields.prop64, - parallel: test.fields.parallel, + core32: test.fields.core32, + core64: test.fields.core64, + initBit32: test.fields.initBit32, + initBit64: test.fields.initBit64, + closer: test.fields.closer, + propName: test.fields.propName, + preProp32: test.fields.preProp32, + preProp64: test.fields.preProp64, + mode: test.fields.mode, + prop32: test.fields.prop32, + prop64: test.fields.prop64, + parallel: test.fields.parallel, } s.float64(test.args.ctx, test.args.b, test.args.dataset, test.args.ids, test.args.cnt) diff --git a/hack/benchmark/core/benchmark/strategy/util.go b/hack/benchmark/core/benchmark/strategy/util.go index a67c8a80ad..e65179ac8c 100644 --- a/hack/benchmark/core/benchmark/strategy/util.go +++ b/hack/benchmark/core/benchmark/strategy/util.go @@ -21,7 +21,7 @@ import ( "context" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" "github.com/vdaas/vald/internal/errors" ) @@ -42,7 +42,7 @@ func wrapErrors(errs []error) (wrapped error) { return } -func insertAndCreateIndex32(ctx context.Context, c core.Core32, dataset assets.Dataset) (ids []uint, err error) { +func insertAndCreateIndex32(ctx context.Context, c algorithm.Bit32, dataset assets.Dataset) (ids []uint, err error) { ids = make([]uint, 0, dataset.TrainSize()*bulkInsertCnt) n := 0 @@ -72,7 +72,7 @@ func insertAndCreateIndex32(ctx context.Context, c core.Core32, dataset assets.D return } -func insertAndCreateIndex64(ctx context.Context, c core.Core64, dataset assets.Dataset) (ids []uint, err error) { +func insertAndCreateIndex64(ctx context.Context, c algorithm.Bit64, dataset assets.Dataset) (ids []uint, err error) { ids = make([]uint, 0, dataset.TrainSize()*bulkInsertCnt) n := 0 diff --git a/hack/benchmark/core/benchmark/strategy/util_test.go b/hack/benchmark/core/benchmark/strategy/util_test.go index 4ab7d38de9..84f445e69d 100644 --- a/hack/benchmark/core/benchmark/strategy/util_test.go +++ b/hack/benchmark/core/benchmark/strategy/util_test.go @@ -23,9 +23,8 @@ import ( "testing" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) @@ -95,7 +94,6 @@ func Test_wrapErrors(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -103,7 +101,7 @@ func Test_wrapErrors(t *testing.T) { func Test_insertAndCreateIndex32(t *testing.T) { type args struct { ctx context.Context - c core.Core32 + c algorithm.Bit32 dataset assets.Dataset } type want struct { @@ -176,7 +174,6 @@ func Test_insertAndCreateIndex32(t *testing.T) { if err := test.checkFunc(test.want, gotIds, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -184,7 +181,7 @@ func Test_insertAndCreateIndex32(t *testing.T) { func Test_insertAndCreateIndex64(t *testing.T) { type args struct { ctx context.Context - c core.Core64 + c algorithm.Bit64 dataset assets.Dataset } type want struct { @@ -257,7 +254,78 @@ func Test_insertAndCreateIndex64(t *testing.T) { if err := test.checkFunc(test.want, gotIds, err); err != nil { tt.Errorf("error = %v", err) } + }) + } +} + +func Test_float32To64(t *testing.T) { + t.Parallel() + type args struct { + x []float32 + } + type want struct { + wantY []float64 + } + type test struct { + name string + args args + want want + checkFunc func(want, []float64) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotY []float64) error { + if !reflect.DeepEqual(gotY, w.wantY) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotY, w.wantY) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + x: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + x: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + gotY := float32To64(test.args.x) + if err := test.checkFunc(test.want, gotY); err != nil { + tt.Errorf("error = %v", err) + } }) } } diff --git a/hack/benchmark/core/gongt/gongt_bench_test.go b/hack/benchmark/core/gongt/gongt_bench_test.go index a56f886175..d14cc68454 100644 --- a/hack/benchmark/core/gongt/gongt_bench_test.go +++ b/hack/benchmark/core/gongt/gongt_bench_test.go @@ -26,8 +26,8 @@ import ( "github.com/vdaas/vald/hack/benchmark/core/benchmark" "github.com/vdaas/vald/hack/benchmark/core/benchmark/strategy" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" - "github.com/vdaas/vald/hack/benchmark/internal/core/gongt" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm/gongt" ) const ( @@ -47,7 +47,7 @@ func init() { targets = strings.Split(strings.TrimSpace(dataset), ",") } -func initCore(ctx context.Context, b *testing.B, dataset assets.Dataset) (core.Core64, core.Closer, error) { +func initCore(ctx context.Context, b *testing.B, dataset assets.Dataset) (algorithm.Bit64, algorithm.Closer, error) { ngt, err := gongt.New( gongt.WithDimension(dataset.Dimension()), gongt.WithObjectType(dataset.ObjectType()), @@ -65,7 +65,7 @@ func BenchmarkGoNGTSequential_Insert(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewInsert( - strategy.WithCore64(initCore), + strategy.WithBit64(initCore), ), ), ).Run(ctx, b) @@ -79,7 +79,7 @@ func BenchmarkGoNGTParallel_Insert(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewInsert( - strategy.WithCore64(initCore), + strategy.WithBit64(initCore), strategy.WithParallel(), ), ), @@ -94,7 +94,7 @@ func BenchmarkGoNGTSequential_BulkInsert(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewBulkInsert( - strategy.WithCore64(initCore), + strategy.WithBit64(initCore), ), ), ).Run(ctx, b) @@ -108,7 +108,7 @@ func BenchmarkGoNGTParallel_BulkInsert(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewBulkInsert( - strategy.WithCore64(initCore), + strategy.WithBit64(initCore), strategy.WithParallel(), ), ), @@ -124,7 +124,7 @@ func BenchmarkGoNGTSequential_InsertCommit(b *testing.B) { benchmark.WithStrategy( strategy.NewInsertCommit( 10, - strategy.WithCore64(initCore), + strategy.WithBit64(initCore), ), ), ).Run(ctx, b) @@ -139,7 +139,7 @@ func BenchmarkGoNGTParallel_InsertCommit(b *testing.B) { benchmark.WithStrategy( strategy.NewInsertCommit( 10, - strategy.WithCore64(initCore), + strategy.WithBit64(initCore), strategy.WithParallel(), ), ), @@ -155,7 +155,7 @@ func BenchmarkGoNGTSequential_Search(b *testing.B) { benchmark.WithStrategy( strategy.NewSearch( size, radius, epsilon, - strategy.WithCore64(initCore), + strategy.WithBit64(initCore), ), ), ).Run(ctx, b) @@ -170,7 +170,7 @@ func BenchmarkGoNGTParallel_Search(b *testing.B) { benchmark.WithStrategy( strategy.NewSearch( size, radius, epsilon, - strategy.WithCore64(initCore), + strategy.WithBit64(initCore), strategy.WithParallel(), ), ), @@ -185,7 +185,7 @@ func BenchmarkGoNGTSequential_Remove(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewRemove( - strategy.WithCore64(initCore), + strategy.WithBit64(initCore), ), ), ).Run(ctx, b) @@ -199,7 +199,7 @@ func BenchmarkGoNGTParallel_Remove(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewRemove( - strategy.WithCore64(initCore), + strategy.WithBit64(initCore), strategy.WithParallel(), ), ), @@ -214,7 +214,7 @@ func BenchmarkGoNGTSequential_GetVector(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewGetVector( - strategy.WithCore64(initCore), + strategy.WithBit64(initCore), ), ), ).Run(ctx, b) @@ -228,7 +228,7 @@ func BenchmarkGoNGTParallel_GetVector(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewGetVector( - strategy.WithCore64(initCore), + strategy.WithBit64(initCore), ), ), ).Run(ctx, b) diff --git a/hack/benchmark/core/ngt/ngt_bench_test.go b/hack/benchmark/core/ngt/ngt_bench_test.go index d5974405b0..c75f80a983 100644 --- a/hack/benchmark/core/ngt/ngt_bench_test.go +++ b/hack/benchmark/core/ngt/ngt_bench_test.go @@ -26,8 +26,8 @@ import ( "github.com/vdaas/vald/hack/benchmark/core/benchmark" "github.com/vdaas/vald/hack/benchmark/core/benchmark/strategy" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/hack/benchmark/internal/core" - "github.com/vdaas/vald/hack/benchmark/internal/core/ngt" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm/ngt" ) const ( @@ -47,7 +47,7 @@ func init() { targets = strings.Split(strings.TrimSpace(dataset), ",") } -func initCore(ctx context.Context, b *testing.B, dataset assets.Dataset) (core.Core32, core.Closer, error) { +func initCore(ctx context.Context, b *testing.B, dataset assets.Dataset) (algorithm.Bit32, algorithm.Closer, error) { ngt, err := ngt.New( ngt.WithDimension(dataset.Dimension()), ngt.WithObjectType(dataset.ObjectType()), @@ -65,7 +65,7 @@ func BenchmarkNGTSequential_Insert(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewInsert( - strategy.WithCore32(initCore), + strategy.WithBit32(initCore), ), ), ).Run(ctx, b) @@ -79,7 +79,7 @@ func BenchmarkNGTParallel_Insert(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewInsert( - strategy.WithCore32(initCore), + strategy.WithBit32(initCore), strategy.WithParallel(), ), ), @@ -94,7 +94,7 @@ func BenchmarkNGTSequential_BulkInsert(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewBulkInsert( - strategy.WithCore32(initCore), + strategy.WithBit32(initCore), ), ), ).Run(ctx, b) @@ -108,7 +108,7 @@ func BenchmarkNGTParallel_BulkInsert(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewBulkInsert( - strategy.WithCore32(initCore), + strategy.WithBit32(initCore), strategy.WithParallel(), ), ), @@ -124,7 +124,7 @@ func BenchmarkNGTSequential_InsertCommit(b *testing.B) { benchmark.WithStrategy( strategy.NewInsertCommit( 10, - strategy.WithCore32(initCore), + strategy.WithBit32(initCore), ), ), ).Run(ctx, b) @@ -139,7 +139,7 @@ func BenchmarkNGTParallel_InsertCommit(b *testing.B) { benchmark.WithStrategy( strategy.NewInsertCommit( 10, - strategy.WithCore32(initCore), + strategy.WithBit32(initCore), strategy.WithParallel(), ), ), @@ -155,7 +155,7 @@ func BenchmarkNGTSequential_BulkInsertCommit(b *testing.B) { benchmark.WithStrategy( strategy.NewBulkInsertCommit( 10, - strategy.WithCore32(initCore), + strategy.WithBit32(initCore), ), ), ).Run(ctx, b) @@ -170,7 +170,7 @@ func BenchmarkNGTParallel_BulkInsertCommit(b *testing.B) { benchmark.WithStrategy( strategy.NewBulkInsertCommit( 10, - strategy.WithCore32(initCore), + strategy.WithBit32(initCore), strategy.WithParallel(), ), ), @@ -186,7 +186,7 @@ func BenchmarkNGTSequential_Search(b *testing.B) { benchmark.WithStrategy( strategy.NewSearch( size, radius, epsilon, - strategy.WithCore32(initCore), + strategy.WithBit32(initCore), ), ), ).Run(ctx, b) @@ -201,7 +201,7 @@ func BenchmarkNGTParallel_Search(b *testing.B) { benchmark.WithStrategy( strategy.NewSearch( size, radius, epsilon, - strategy.WithCore32(initCore), + strategy.WithBit32(initCore), strategy.WithParallel(), ), ), @@ -216,7 +216,7 @@ func BenchmarkNGTSequential_Remove(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewRemove( - strategy.WithCore32(initCore), + strategy.WithBit32(initCore), ), ), ).Run(ctx, b) @@ -230,7 +230,7 @@ func BenchmarkNGTParallel_Remove(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewRemove( - strategy.WithCore32(initCore), + strategy.WithBit32(initCore), strategy.WithParallel(), ), ), @@ -245,7 +245,7 @@ func BenchmarkNGTSequential_GetVector(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewGetVector( - strategy.WithCore32(initCore), + strategy.WithBit32(initCore), ), ), ).Run(ctx, b) @@ -259,7 +259,7 @@ func BenchmarkNGTParallel_GetVector(b *testing.B) { benchmark.WithName(target), benchmark.WithStrategy( strategy.NewGetVector( - strategy.WithCore32(initCore), + strategy.WithBit32(initCore), strategy.WithParallel(), ), ), diff --git a/hack/benchmark/e2e/agent/core/ngt/ngt_bench_test.go b/hack/benchmark/e2e/agent/core/ngt/ngt_bench_test.go index f59c70dc2b..224da75346 100644 --- a/hack/benchmark/e2e/agent/core/ngt/ngt_bench_test.go +++ b/hack/benchmark/e2e/agent/core/ngt/ngt_bench_test.go @@ -25,14 +25,11 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/e2e" "github.com/vdaas/vald/hack/benchmark/internal/e2e/strategy" "github.com/vdaas/vald/hack/benchmark/internal/starter/agent/core/ngt" - "github.com/vdaas/vald/internal/client/agent/grpc" - "github.com/vdaas/vald/internal/client/agent/rest" + "github.com/vdaas/vald/internal/client/v1/client/agent/core" "github.com/vdaas/vald/internal/log" ) -var ( - targets []string -) +var targets []string func init() { testing.Init() @@ -46,41 +43,9 @@ func init() { targets = strings.Split(strings.TrimSpace(dataset), ",") } -func BenchmarkAgentNGT_REST_Sequential(b *testing.B) { - ctx := context.Background() - client := rest.New(ctx) - - for _, name := range targets { - bench := e2e.New( - b, - e2e.WithName(name), - e2e.WithServerStarter(func(ctx context.Context, tb testing.TB, d assets.Dataset) func() { - return ngt.New( - ngt.WithDimension(d.Dimension()), - ngt.WithDistanceType(d.DistanceType()), - ngt.WithObjectType(d.ObjectType()), - ).Run(ctx, tb) - }), - e2e.WithClient(client), - e2e.WithStrategy( - strategy.NewInsert(), - strategy.NewCreateIndex( - strategy.WithCreateIndexClient(client), - ), - strategy.NewSearch(), - ), - ) - bench.Run(ctx, b) - } -} - func BenchmarkAgentNGT_gRPC_Sequential(b *testing.B) { ctx := context.Background() - client, err := grpc.New(ctx) - if err != nil { - b.Fatal(err) - } - + client := core.New() for _, name := range targets { bench := e2e.New( b, @@ -107,10 +72,7 @@ func BenchmarkAgentNGT_gRPC_Sequential(b *testing.B) { func BenchmarkAgentNGT_gRPC_Stream(b *testing.B) { ctx := context.Background() - client, err := grpc.New(ctx) - if err != nil { - b.Fatal(err) - } + client := core.New() for _, name := range targets { bench := e2e.New( diff --git a/hack/benchmark/e2e/external/ngtd/ngtd_bench_test.go b/hack/benchmark/e2e/external/ngtd/ngtd_bench_test.go index 4e675d3e30..73020c9780 100644 --- a/hack/benchmark/e2e/external/ngtd/ngtd_bench_test.go +++ b/hack/benchmark/e2e/external/ngtd/ngtd_bench_test.go @@ -30,9 +30,7 @@ import ( "github.com/vdaas/vald/internal/log" ) -var ( - targets []string -) +var targets []string func init() { testing.Init() diff --git a/hack/benchmark/e2e/gateway/vald/vald_bench_test.go b/hack/benchmark/e2e/gateway/vald/vald_bench_test.go index 27ea43667a..d3deb7f44e 100644 --- a/hack/benchmark/e2e/gateway/vald/vald_bench_test.go +++ b/hack/benchmark/e2e/gateway/vald/vald_bench_test.go @@ -24,14 +24,13 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/e2e" "github.com/vdaas/vald/hack/benchmark/internal/e2e/strategy" - "github.com/vdaas/vald/internal/client/gateway/vald/grpc" - "github.com/vdaas/vald/internal/client/gateway/vald/rest" + "github.com/vdaas/vald/internal/client/v1/client/vald" "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/net/grpc" ) var ( targets []string - restAddr string grpcAddr string wait time.Duration ) @@ -46,7 +45,6 @@ func init() { ) flag.StringVar(&dataset, "dataset", "", "set dataset (choice with comma)") - flag.StringVar(&restAddr, "rest_address", "http://127.0.0.1:8080", "set vald gateway address for REST") flag.StringVar(&grpcAddr, "grpc_address", "127.0.0.1:8081", "set vald gateway address for gRPC") flag.UintVar(&waitSeconds, "wait", 30, "indexing wait time (secs)") flag.Parse() @@ -55,63 +53,17 @@ func init() { wait = time.Duration(time.Duration(waitSeconds) * time.Second) } -func BenchmarkValdGateway_REST_Sequential(b *testing.B) { - ctx := context.Background() - - for _, name := range targets { - bench := e2e.New( - b, - e2e.WithName(name), - e2e.WithClient( - rest.New( - rest.WithAddr( - restAddr, - ), - ), - ), - e2e.WithStrategy( - strategy.NewInsert(), - strategy.NewSearch(), - ), - ) - bench.Run(ctx, b) - } -} - -func BenchmarkValdGateway_REST_Stream(b *testing.B) { - ctx := context.Background() - - for _, name := range targets { - bench := e2e.New( - b, - e2e.WithName(name), - e2e.WithClient( - rest.New( - rest.WithAddr( - restAddr, - ), - ), - ), - e2e.WithStrategy( - strategy.NewStreamInsert(), - strategy.NewStreamSearch(), - ), - ) - bench.Run(ctx, b) - } -} - func BenchmarkValdGateway_gRPC_Sequential(b *testing.B) { - ctx := context.Background() - client, err := grpc.New(ctx, - grpc.WithAddr( + client := vald.New( + vald.WithClient(grpc.New( + grpc.WithAddrs(grpcAddr), + )), + vald.WithAddr( grpcAddr, ), ) - if err != nil { - b.Fatal(err) - } - + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() for _, name := range targets { bench := e2e.New( b, @@ -127,16 +79,17 @@ func BenchmarkValdGateway_gRPC_Sequential(b *testing.B) { } func BenchmarkValdGateway_gRPC_Stream(b *testing.B) { - ctx := context.Background() - client, err := grpc.New(ctx, - grpc.WithAddr( + client := vald.New( + vald.WithClient(grpc.New( + grpc.WithAddrs(grpcAddr), + )), + vald.WithAddr( grpcAddr, ), ) - if err != nil { - b.Fatal(err) - } + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() for _, name := range targets { bench := e2e.New( b, diff --git a/hack/benchmark/internal/assets/dataset_test.go b/hack/benchmark/internal/assets/dataset_test.go index 61703a10fa..810d1125cb 100644 --- a/hack/benchmark/internal/assets/dataset_test.go +++ b/hack/benchmark/internal/assets/dataset_test.go @@ -89,7 +89,6 @@ func TestData(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/internal/client/ngtd/grpc/client.go b/hack/benchmark/internal/client/ngtd/grpc/client.go index a3b3a3663f..59f62fdcad 100644 --- a/hack/benchmark/internal/client/ngtd/grpc/client.go +++ b/hack/benchmark/internal/client/ngtd/grpc/client.go @@ -20,7 +20,9 @@ package grpc import ( "context" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/client/v1/client" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" @@ -35,7 +37,7 @@ type Client interface { type ngtdClient struct { addr string - grpc.Client + c grpc.Client opts []grpc.Option } @@ -46,389 +48,490 @@ func New(ctx context.Context, opts ...Option) (Client, error) { opt(c) } - c.Client = grpc.New(c.opts...) + c.c = grpc.New(c.opts...) - if err := c.Client.Connect(ctx, c.addr); err != nil { + if err := c.c.Connect(ctx, c.addr); err != nil { return nil, err } return c, nil } -func (c *ngtdClient) Exists( - ctx context.Context, - req *client.ObjectID, -) (*client.ObjectID, error) { - return nil, errors.ErrUnsupportedClientMethod +func (c *ngtdClient) Exists(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (oid *payload.Object_ID, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + id, err := proto.NewNGTDClient(conn).GetObject(ctx, &proto.GetObjectRequest{ + Id: []byte(in.GetId()), + }) + if err != nil { + return nil, err + } + if len(id.GetError()) != 0 { + return nil, errors.New(id.GetError()) + } + oid = &payload.Object_ID{ + Id: string(id.GetId()), + } + return oid, nil + }) + if err != nil { + return nil, err + } + return oid, nil } -func (c *ngtdClient) Search( - ctx context.Context, - req *client.SearchRequest, -) (*client.SearchResponse, error) { - res, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - res, err := proto.NewNGTDClient(conn).Search(ctx, searchRequestToNgtdSearchRequest(req), copts...) - if err != nil { - return nil, err - } - - if len(res.GetError()) != 0 { - return nil, errors.New(res.GetError()) - } - return res, nil - }, - ) +func (c *ngtdClient) Search(ctx context.Context, in *payload.Search_Request, opts ...grpc.CallOption) (*payload.Search_Response, error) { + res, err := c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + r, err := proto.NewNGTDClient(conn).Search(ctx, searchRequestToNgtdSearchRequest(in), copts...) + if err != nil { + return nil, err + } + if len(r.GetError()) != 0 { + return nil, errors.New(r.GetError()) + } + return r, nil + }) if err != nil { return nil, err } return ngtdSearchResponseToSearchResponse(res.(*proto.SearchResponse)), nil } -func (c *ngtdClient) SearchByID( - ctx context.Context, - req *client.SearchIDRequest, -) (*client.SearchResponse, error) { - res, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - res, err := proto.NewNGTDClient(conn).SearchByID(ctx, searchIDRequestToNgtdSearchRequest(req), copts...) - if err != nil { - return nil, err - } - - if len(res.GetError()) != 0 { - return nil, errors.New(res.GetError()) - } - return res, nil - }, - ) +func (c *ngtdClient) SearchByID(ctx context.Context, in *payload.Search_IDRequest, opts ...grpc.CallOption) (*payload.Search_Response, error) { + res, err := c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + r, err := proto.NewNGTDClient(conn).SearchByID(ctx, searchIDRequestToNgtdSearchRequest(in), copts...) + if err != nil { + return nil, err + } + if len(r.GetError()) != 0 { + return nil, errors.New(r.GetError()) + } + return r, nil + }) if err != nil { return nil, err } return ngtdSearchResponseToSearchResponse(res.(*proto.SearchResponse)), nil } -func (c *ngtdClient) StreamSearch( - ctx context.Context, - dataProvider func() *client.SearchRequest, - f func(*client.SearchResponse, error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - st, err := proto.NewNGTDClient(conn).StreamSearch(ctx, copts...) - if err != nil { - return nil, err - } +func (c *ngtdClient) StreamSearch(ctx context.Context, opts ...grpc.CallOption) (res vald.Search_StreamSearchClient, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + st, err := proto.NewNGTDClient(conn).StreamSearch(ctx, copts...) + if err != nil { + return nil, err + } + res = NewStreamSearchClient(st) + return res, nil + }) + if err != nil { + return nil, err + } + return res, nil +} - return nil, grpc.BidirectionalStreamClient(st, - func() interface{} { - if d := dataProvider(); d != nil { - return searchRequestToNgtdSearchRequest(d) - } - return nil - }, func() interface{} { - return new(proto.SearchResponse) - }, func(intr interface{}, err error) { - if err != nil { - f(nil, err) - return - } - - res := intr.(*proto.SearchResponse) - if len(res.GetError()) != 0 { - f(nil, errors.New(res.GetError())) - return - } - - f(ngtdSearchResponseToSearchResponse(res), err) - }) - }, - ) - return err +func (c *ngtdClient) StreamSearchByID(ctx context.Context, opts ...grpc.CallOption) (res vald.Search_StreamSearchByIDClient, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + st, err := proto.NewNGTDClient(conn).StreamSearch(ctx, copts...) + if err != nil { + return nil, err + } + res = NewStreamSearchByIDClient(st) + return res, nil + }) + if err != nil { + return nil, err + } + return res, nil } -func (c *ngtdClient) StreamSearchByID( - ctx context.Context, - dataProvider func() *client.SearchIDRequest, - f func(*client.SearchResponse, error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - st, err := proto.NewNGTDClient(conn).StreamSearchByID(ctx, copts...) +func (c *ngtdClient) MultiSearch(ctx context.Context, in *payload.Search_MultiRequest, opts ...grpc.CallOption) (res *payload.Search_Responses, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res = &payload.Search_Responses{ + Responses: make([]*payload.Search_Response, 0, len(in.GetRequests())), + } + for _, req := range in.GetRequests() { + sres, err := c.Search(ctx, req, opts...) if err != nil { return nil, err } - - return nil, grpc.BidirectionalStreamClient(st, - func() interface{} { - if d := dataProvider(); d != nil { - return searchIDRequestToNgtdSearchRequest(d) - } - return nil - }, func() interface{} { - return new(proto.SearchResponse) - }, func(intr interface{}, err error) { - if err != nil { - f(nil, err) - return - } - - res := intr.(*proto.SearchResponse) - if len(res.GetError()) != 0 { - f(nil, errors.New(res.GetError())) - return - } - - f(ngtdSearchResponseToSearchResponse(res), err) - }) - }, - ) - return err + res.Responses = append(res.Responses, sres) + } + return nil, nil + }) + if err != nil { + return nil, err + } + return res, nil } -func (c *ngtdClient) Insert( - ctx context.Context, - req *client.ObjectVector, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - res, err := proto.NewNGTDClient(conn).Insert(ctx, objectVectorToNGTDInsertRequest(req), copts...) +func (c *ngtdClient) MultiSearchByID(ctx context.Context, in *payload.Search_MultiIDRequest, opts ...grpc.CallOption) (res *payload.Search_Responses, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res = &payload.Search_Responses{ + Responses: make([]*payload.Search_Response, 0, len(in.GetRequests())), + } + for _, req := range in.GetRequests() { + sres, err := c.SearchByID(ctx, req, opts...) if err != nil { return nil, err } + res.Responses = append(res.Responses, sres) + } + return nil, nil + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *ngtdClient) Insert(ctx context.Context, in *payload.Insert_Request, opts ...grpc.CallOption) (res *payload.Object_Location, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + data, err := proto.NewNGTDClient(conn).Insert(ctx, &proto.InsertRequest{ + Id: []byte(in.GetVector().GetId()), + Vector: tofloat64(in.GetVector().GetVector()), + }, copts...) + if err != nil { + return nil, err + } + if len(data.GetError()) != 0 { + return nil, errors.New(data.GetError()) + } + return nil, nil + }) + if err != nil { + return nil, err + } + return res, nil +} - if len(res.GetError()) != 0 { - return nil, errors.New(res.GetError()) - } - return res, nil - }, - ) - return err +func (c *ngtdClient) StreamInsert(ctx context.Context, opts ...grpc.CallOption) (res vald.Insert_StreamInsertClient, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + st, err := proto.NewNGTDClient(conn).StreamInsert(ctx, copts...) + if err != nil { + return nil, err + } + res = NewStreamInsertClient(st) + return res, nil + }) + if err != nil { + return nil, err + } + return res, nil } -func (c *ngtdClient) StreamInsert( - ctx context.Context, - dataProvider func() *client.ObjectVector, - f func(error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - st, err := proto.NewNGTDClient(conn).StreamInsert(ctx, copts...) +func (c *ngtdClient) MultiInsert(ctx context.Context, in *payload.Insert_MultiRequest, opts ...grpc.CallOption) (res *payload.Object_Locations, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res = &payload.Object_Locations{ + Locations: make([]*payload.Object_Location, 0, len(in.GetRequests())), + } + for _, req := range in.GetRequests() { + sres, err := c.Insert(ctx, req, opts...) if err != nil { return nil, err } - - return nil, grpc.BidirectionalStreamClient(st, - func() interface{} { - if d := dataProvider(); d != nil { - return objectVectorToNGTDInsertRequest(d) - } - return nil - }, func() interface{} { - return new(proto.InsertResponse) - }, func(intr interface{}, err error) { - if err != nil { - f(err) - return - } - - res := intr.(*proto.InsertResponse) - if len(res.GetError()) != 0 { - f(errors.New(res.GetError())) - return - } - - f(err) - }) - }, - ) - return err -} - -func (c *ngtdClient) MultiInsert( - ctx context.Context, - req *client.ObjectVectors, -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *ngtdClient) Update( - ctx context.Context, - req *client.ObjectVector, -) error { - return errors.ErrUnsupportedClientMethod + res.Locations = append(res.Locations, sres) + } + return nil, nil + }) + if err != nil { + return nil, err + } + return res, nil } -func (c *ngtdClient) StreamUpdate( - ctx context.Context, - dataProvider func() *client.ObjectVector, - f func(error), -) error { - return errors.ErrUnsupportedClientMethod +func (c *ngtdClient) Update(ctx context.Context, in *payload.Update_Request, opts ...grpc.CallOption) (res *payload.Object_Location, err error) { + _, err = c.Remove(ctx, &payload.Remove_Request{ + Id: &payload.Object_ID{ + Id: in.GetVector().GetId(), + }, + }, opts...) + if err != nil { + return nil, err + } + _, err = c.Insert(ctx, &payload.Insert_Request{ + Vector: in.GetVector(), + }, opts...) + if err != nil { + return nil, err + } + return nil, nil } -func (c *ngtdClient) MultiUpdate( - ctx context.Context, - req *client.ObjectVectors, -) error { - return errors.ErrUnsupportedClientMethod +func (c *ngtdClient) StreamUpdate(ctx context.Context, opts ...grpc.CallOption) (res vald.Update_StreamUpdateClient, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + ist, err := proto.NewNGTDClient(conn).StreamInsert(ctx, copts...) + if err != nil { + return nil, err + } + rst, err := proto.NewNGTDClient(conn).StreamRemove(ctx, copts...) + if err != nil { + return nil, err + } + res = NewStreamUpdateClient(ist, rst) + return res, nil + }) + if err != nil { + return nil, err + } + return res, nil } -func (c *ngtdClient) Remove( - ctx context.Context, - req *client.ObjectID, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - res, err := proto.NewNGTDClient(conn).Remove(ctx, objectIDToNGTDRemoveRequest(req), copts...) +func (c *ngtdClient) MultiUpdate(ctx context.Context, in *payload.Update_MultiRequest, opts ...grpc.CallOption) (res *payload.Object_Locations, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res = &payload.Object_Locations{ + Locations: make([]*payload.Object_Location, 0, len(in.GetRequests())), + } + for _, req := range in.GetRequests() { + sres, err := c.Update(ctx, req, opts...) if err != nil { return nil, err } - - if len(res.GetError()) != 0 { - return nil, errors.New(res.GetError()) - } - return res, nil - }, - ) - return err + res.Locations = append(res.Locations, sres) + } + return nil, nil + }) + if err != nil { + return nil, err + } + return res, nil } -func (c *ngtdClient) StreamRemove( - ctx context.Context, - dataProvider func() *client.ObjectID, - f func(error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - st, err := proto.NewNGTDClient(conn).StreamRemove(ctx, copts...) - if err != nil { - return nil, err - } - - return nil, grpc.BidirectionalStreamClient(st, - func() interface{} { - if d := dataProvider(); d != nil { - return objectIDToNGTDRemoveRequest(d) - } - return nil - }, func() interface{} { - return new(proto.RemoveResponse) - }, func(intr interface{}, err error) { - if err != nil { - f(err) - return - } - - res := intr.(*proto.RemoveResponse) - if len(res.GetError()) != 0 { - f(errors.New(res.GetError())) - return - } - - f(err) - }) - }, - ) - return err +func (c *ngtdClient) Upsert(ctx context.Context, in *payload.Upsert_Request, opts ...grpc.CallOption) (res *payload.Object_Location, err error) { + id, err := c.Exists(ctx, &payload.Object_ID{ + Id: in.GetVector().GetId(), + }, opts...) + if err == nil || len(id.GetId()) != 0 { + return c.Update(ctx, &payload.Update_Request{ + Vector: in.GetVector(), + }, opts...) + } + return c.Insert(ctx, &payload.Insert_Request{ + Vector: in.GetVector(), + }, opts...) } -func (c *ngtdClient) MultiRemove( - ctx context.Context, - req *client.ObjectIDs, -) error { - return errors.ErrUnsupportedClientMethod +func (c *ngtdClient) StreamUpsert(ctx context.Context, opts ...grpc.CallOption) (res vald.Upsert_StreamUpsertClient, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + st, err := proto.NewNGTDClient(conn).StreamInsert(ctx, copts...) + if err != nil { + return nil, err + } + res = NewStreamUpsertClient(c, st) + return res, nil + }) + if err != nil { + return nil, err + } + return res, nil } -func (c *ngtdClient) GetObject( - ctx context.Context, - req *client.ObjectID, -) (*client.ObjectVector, error) { - resp, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - res, err := proto.NewNGTDClient(conn).GetObject(ctx, objectIDToNGTDGetObjectRequest(req), copts...) +func (c *ngtdClient) MultiUpsert(ctx context.Context, in *payload.Upsert_MultiRequest, opts ...grpc.CallOption) (res *payload.Object_Locations, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res = &payload.Object_Locations{ + Locations: make([]*payload.Object_Location, 0, len(in.GetRequests())), + } + for _, req := range in.GetRequests() { + sres, err := c.Upsert(ctx, req, opts...) if err != nil { return nil, err } + res.Locations = append(res.Locations, sres) + } + return nil, nil + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *ngtdClient) Remove(ctx context.Context, in *payload.Remove_Request, opts ...grpc.CallOption) (res *payload.Object_Location, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err := proto.NewNGTDClient(conn).Remove(ctx, &proto.RemoveRequest{ + Id: []byte(in.GetId().GetId()), + }, copts...) + if err != nil { + return nil, err + } - if len(res.GetError()) != 0 { - return nil, errors.New(res.GetError()) - } - return res, nil - }, - ) + if len(res.GetError()) != 0 { + return nil, errors.New(res.GetError()) + } + return nil, nil + }) if err != nil { return nil, err } - return ngtdGetObjectResponseToObjectVector(resp.(*proto.GetObjectResponse)), err + return res, nil } -func (c *ngtdClient) StreamGetObject( - ctx context.Context, - dataProvider func() *client.ObjectID, - f func(*client.ObjectVector, error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - st, err := proto.NewNGTDClient(conn).StreamGetObject(ctx, copts...) +func (c *ngtdClient) StreamRemove(ctx context.Context, opts ...grpc.CallOption) (res vald.Remove_StreamRemoveClient, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + st, err := proto.NewNGTDClient(conn).StreamRemove(ctx, copts...) + if err != nil { + return nil, err + } + res = NewStreamRemoveClient(st) + return res, nil + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *ngtdClient) MultiRemove(ctx context.Context, in *payload.Remove_MultiRequest, opts ...grpc.CallOption) (res *payload.Object_Locations, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + for _, req := range in.GetRequests() { + id, err := proto.NewNGTDClient(conn).Remove(ctx, &proto.RemoveRequest{ + Id: []byte(req.GetId().GetId()), + }, append(copts, opts...)...) if err != nil { return nil, err } + if len(id.GetError()) != 0 { + return nil, errors.New(id.GetError()) + } - return nil, grpc.BidirectionalStreamClient(st, - func() interface{} { - if d := dataProvider(); d != nil { - return objectIDToNGTDGetObjectRequest(d) - } - return nil - }, func() interface{} { - return new(proto.InsertResponse) - }, func(intr interface{}, err error) { - if err != nil { - f(nil, err) - } - - res := intr.(*proto.GetObjectResponse) - if len(res.GetError()) != 0 { - f(nil, errors.New(res.GetError())) - } - - f(ngtdGetObjectResponseToObjectVector(res), err) - }) - }, - ) - return err + } + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *ngtdClient) GetObject(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_Vector, error) { + res, err := c.c.Do(ctx, c.addr, func( + ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err := proto.NewNGTDClient(conn).GetObject(ctx, &proto.GetObjectRequest{ + Id: []byte(in.GetId()), + }, copts...) + if err != nil { + return nil, err + } + + if len(res.GetError()) != 0 { + return nil, errors.New(res.GetError()) + } + return res, nil + }) + if err != nil { + return nil, err + } + r, ok := res.(*proto.GetObjectResponse) + if !ok { + return nil, errors.ErrInvalidAPIConfig + } + if len(r.GetError()) != 0 { + return nil, errors.New(r.GetError()) + } + return &client.ObjectVector{ + Id: string(r.GetId()), + Vector: r.GetVector(), + }, nil +} + +func (c *ngtdClient) StreamGetObject(ctx context.Context, opts ...grpc.CallOption) (res vald.Object_StreamGetObjectClient, err error) { + _, err = c.c.Do(ctx, c.addr, func( + ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + st, err := proto.NewNGTDClient(conn).StreamGetObject(ctx, copts...) + if err != nil { + return nil, err + } + res = NewStreamObjectClient(st) + return nil, nil + }) + if err != nil { + return nil, err + } + return res, nil } func (c *ngtdClient) CreateIndex( ctx context.Context, - req *client.ControlCreateIndexRequest, -) error { - _, err := c.Client.Do(ctx, c.addr, + in *client.ControlCreateIndexRequest, + opts ...grpc.CallOption, +) (*client.Empty, error) { + _, err := c.c.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return proto.NewNGTDClient(conn).CreateIndex(ctx, controlCreateIndexRequestToCreateIndexRequest(req), copts...) + return proto.NewNGTDClient(conn).CreateIndex(ctx, &proto.CreateIndexRequest{ + PoolSize: in.GetPoolSize(), + }, copts...) }, ) - return err + return nil, err } -func (c *ngtdClient) SaveIndex(ctx context.Context) error { - _, err := c.Client.Do(ctx, c.addr, +func (c *ngtdClient) SaveIndex( + ctx context.Context, + req *client.Empty, + opts ...grpc.CallOption, +) (*client.Empty, error) { + _, err := c.c.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { return proto.NewNGTDClient(conn).SaveIndex(ctx, new(proto.Empty), copts...) }, ) - return err + return nil, err } func (c *ngtdClient) CreateAndSaveIndex( ctx context.Context, req *client.ControlCreateIndexRequest, -) error { - return errors.ErrUnsupportedClientMethod + opts ...grpc.CallOption, +) (*client.Empty, error) { + _, err := c.CreateIndex(ctx, req) + if err != nil { + return nil, err + } + _, err = c.SaveIndex(ctx, nil) + return nil, err } -func (c *ngtdClient) IndexInfo(ctx context.Context) (*client.InfoIndex, error) { +func (c *ngtdClient) IndexInfo( + ctx context.Context, + req *client.Empty, + opts ...grpc.CallOption, +) (res *client.InfoIndexCount, err error) { return nil, errors.ErrUnsupportedClientMethod } @@ -471,42 +574,6 @@ func ngtdSearchResponseToSearchResponse(in *proto.SearchResponse) *client.Search } } -func ngtdGetObjectResponseToObjectVector(in *proto.GetObjectResponse) *client.ObjectVector { - if len(in.GetError()) != 0 { - return nil - } - - return &client.ObjectVector{ - Id: string(in.GetId()), - Vector: in.GetVector(), - } -} - -func objectVectorToNGTDInsertRequest(in *client.ObjectVector) *proto.InsertRequest { - return &proto.InsertRequest{ - Id: []byte(in.GetId()), - Vector: tofloat64(in.GetVector()), - } -} - -func objectIDToNGTDRemoveRequest(in *client.ObjectID) *proto.RemoveRequest { - return &proto.RemoveRequest{ - Id: []byte(in.GetId()), - } -} - -func objectIDToNGTDGetObjectRequest(in *client.ObjectID) *proto.GetObjectRequest { - return &proto.GetObjectRequest{ - Id: []byte(in.GetId()), - } -} - -func controlCreateIndexRequestToCreateIndexRequest(in *client.ControlCreateIndexRequest) *proto.CreateIndexRequest { - return &proto.CreateIndexRequest{ - PoolSize: in.GetPoolSize(), - } -} - func getSizeAndEpsilon(cfg *client.SearchConfig) (size int32, epsilon float32) { if cfg != nil { size = int32(cfg.GetNum()) diff --git a/hack/benchmark/internal/client/ngtd/grpc/option.go b/hack/benchmark/internal/client/ngtd/grpc/option.go index c6cdaf16e9..a683da5e60 100644 --- a/hack/benchmark/internal/client/ngtd/grpc/option.go +++ b/hack/benchmark/internal/client/ngtd/grpc/option.go @@ -24,24 +24,22 @@ import ( type Option func(*ngtdClient) -var ( - defaultOptions = []Option{ - WithAddr("127.0.0.1:8200"), - WithGRPCClientOption( - (&config.GRPCClient{ - Addrs: []string{ - "127.0.0.1:8200", - }, - CallOption: &config.CallOption{ - MaxRecvMsgSize: 100000000000, - }, - DialOption: &config.DialOption{ - Insecure: true, - }, - }).Bind().Opts()..., - ), - } -) +var defaultOptions = []Option{ + WithAddr("127.0.0.1:8200"), + WithGRPCClientOption( + (&config.GRPCClient{ + Addrs: []string{ + "127.0.0.1:8200", + }, + CallOption: &config.CallOption{ + MaxRecvMsgSize: 100000000000, + }, + DialOption: &config.DialOption{ + Insecure: true, + }, + }).Bind().Opts()..., + ), +} func WithAddr(addr string) Option { return func(c *ngtdClient) { diff --git a/hack/benchmark/internal/client/ngtd/grpc/stream.go b/hack/benchmark/internal/client/ngtd/grpc/stream.go new file mode 100644 index 0000000000..427645f1e5 --- /dev/null +++ b/hack/benchmark/internal/client/ngtd/grpc/stream.go @@ -0,0 +1,308 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package grpc provides grpc client functions +package grpc + +import ( + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/net/grpc" + proto "github.com/yahoojapan/ngtd/proto" +) + +type ( + StreamSearch vald.Search_StreamSearchClient + StreamSearchByID vald.Search_StreamSearchByIDClient + StreamInsert vald.Insert_StreamInsertClient + StreamUpdate vald.Update_StreamUpdateClient + StreamUpsert vald.Upsert_StreamUpsertClient + StreamRemove vald.Remove_StreamRemoveClient + StreamObject vald.Object_StreamGetObjectClient +) + +type streamSearch struct { + grpc.ClientStream + ngtd proto.NGTD_StreamSearchClient +} + +func NewStreamSearchClient(ngtd proto.NGTD_StreamSearchClient) StreamSearch { + return &streamSearch{ + ClientStream: ngtd, + ngtd: ngtd, + } +} + +func (s *streamSearch) Send(req *payload.Search_Request) error { + vec := make([]float64, 0, len(req.GetVector())) + for _, v := range req.GetVector() { + vec = append(vec, float64(v)) + } + return s.ngtd.Send(&proto.SearchRequest{ + Vector: vec, + }) +} + +func (s *streamSearch) Recv() (*payload.Search_Response, error) { + data, err := s.ngtd.Recv() + if err != nil { + return nil, err + } + if len(data.GetError()) != 0 { + return nil, errors.New(data.GetError()) + } + res := &payload.Search_Response{ + Results: make([]*payload.Object_Distance, 0, len(data.GetResult())), + } + for _, dist := range data.GetResult() { + res.Results = append(res.Results, &payload.Object_Distance{ + Distance: dist.GetDistance(), + Id: string(dist.GetId()), + }) + } + return res, nil +} + +type streamSearchByID struct { + grpc.ClientStream + ngtd proto.NGTD_StreamSearchByIDClient +} + +func NewStreamSearchByIDClient(ngtd proto.NGTD_StreamSearchByIDClient) StreamSearchByID { + return &streamSearchByID{ + ClientStream: ngtd, + ngtd: ngtd, + } +} + +func (s *streamSearchByID) Send(req *payload.Search_IDRequest) error { + return s.ngtd.Send(&proto.SearchRequest{ + Id: []byte(req.GetId()), + }) +} + +func (s *streamSearchByID) Recv() (*payload.Search_Response, error) { + data, err := s.ngtd.Recv() + if err != nil { + return nil, err + } + if len(data.GetError()) != 0 { + return nil, errors.New(data.GetError()) + } + res := &payload.Search_Response{ + Results: make([]*payload.Object_Distance, 0, len(data.GetResult())), + } + for _, dist := range data.GetResult() { + res.Results = append(res.Results, &payload.Object_Distance{ + Distance: dist.GetDistance(), + Id: string(dist.GetId()), + }) + } + return res, nil +} + +type streamInsert struct { + grpc.ClientStream + ngtd proto.NGTD_StreamInsertClient +} + +func NewStreamInsertClient(ngtd proto.NGTD_StreamInsertClient) StreamInsert { + return &streamInsert{ + ClientStream: ngtd, + ngtd: ngtd, + } +} + +func (s *streamInsert) Send(req *payload.Insert_Request) error { + vec := make([]float64, 0, len(req.GetVector().GetVector())) + for _, v := range req.GetVector().GetVector() { + vec = append(vec, float64(v)) + } + return s.ngtd.Send(&proto.InsertRequest{ + Id: []byte(req.GetVector().GetId()), + Vector: vec, + }) +} + +func (s *streamInsert) Recv() (*payload.Object_Location, error) { + data, err := s.ngtd.Recv() + if err != nil { + return nil, err + } + if len(data.GetError()) != 0 { + return nil, errors.New(data.GetError()) + } + return nil, nil +} + +type streamUpdate struct { + grpc.ClientStream + ic proto.NGTD_StreamInsertClient + rc proto.NGTD_StreamRemoveClient +} + +func NewStreamUpdateClient(ic proto.NGTD_StreamInsertClient, rc proto.NGTD_StreamRemoveClient) StreamUpdate { + return &streamUpdate{ + ClientStream: ic, + ic: ic, + rc: rc, + } +} + +func (s *streamUpdate) Send(req *payload.Update_Request) error { + vec := make([]float64, 0, len(req.GetVector().GetVector())) + for _, v := range req.GetVector().GetVector() { + vec = append(vec, float64(v)) + } + err := s.rc.Send(&proto.RemoveRequest{ + Id: []byte(req.GetVector().GetId()), + }) + if err != nil { + return err + } + err = s.ic.Send(&proto.InsertRequest{ + Id: []byte(req.GetVector().GetId()), + Vector: vec, + }) + return err +} + +func (s *streamUpdate) Recv() (*payload.Object_Location, error) { + rdata, err := s.rc.Recv() + if err != nil { + return nil, err + } + if len(rdata.GetError()) != 0 { + return nil, errors.New(rdata.GetError()) + } + idata, err := s.ic.Recv() + if err != nil { + return nil, err + } + if len(idata.GetError()) != 0 { + return nil, errors.New(idata.GetError()) + } + return nil, nil +} + +type streamUpsert struct { + grpc.ClientStream + cc Client + ch chan *payload.Object_Location +} + +func NewStreamUpsertClient(c Client, ic proto.NGTD_StreamInsertClient) StreamUpsert { + return &streamUpsert{ + ClientStream: ic, + cc: c, + ch: make(chan *payload.Object_Location, 10), + } +} + +func (s *streamUpsert) Send(req *payload.Upsert_Request) error { + go func() { + ctx := s.ClientStream.Context() + id, err := s.cc.Exists(ctx, &payload.Object_ID{ + Id: req.GetVector().GetId(), + }) + var loc *payload.Object_Location + if err == nil || len(id.GetId()) != 0 { + loc, err = s.cc.Update(ctx, &payload.Update_Request{ + Vector: req.GetVector(), + }) + } else { + loc, err = s.cc.Insert(ctx, &payload.Insert_Request{ + Vector: req.GetVector(), + }) + } + if err == nil { + s.ch <- loc + } + }() + return nil +} + +func (s *streamUpsert) Recv() (loc *payload.Object_Location, err error) { + ctx := s.ClientStream.Context() + select { + case <-ctx.Done(): + return nil, ctx.Err() + case loc := <-s.ch: + return loc, err + } +} + +type streamRemove struct { + grpc.ClientStream + ngtd proto.NGTD_StreamRemoveClient +} + +func NewStreamRemoveClient(ngtd proto.NGTD_StreamRemoveClient) StreamRemove { + return &streamRemove{ + ClientStream: ngtd, + ngtd: ngtd, + } +} + +func (s *streamRemove) Send(req *payload.Remove_Request) error { + return s.ngtd.Send(&proto.RemoveRequest{ + Id: []byte(req.GetId().GetId()), + }) +} + +func (s *streamRemove) Recv() (*payload.Object_Location, error) { + data, err := s.ngtd.Recv() + if err != nil { + return nil, err + } + if len(data.GetError()) != 0 { + return nil, errors.New(data.GetError()) + } + return nil, nil +} + +type streamGetObject struct { + grpc.ClientStream + ngtd proto.NGTD_StreamGetObjectClient +} + +func NewStreamObjectClient(ngtd proto.NGTD_StreamGetObjectClient) StreamObject { + return &streamGetObject{ + ClientStream: ngtd, + ngtd: ngtd, + } +} + +func (s *streamGetObject) Send(req *payload.Object_ID) error { + return s.ngtd.Send(&proto.GetObjectRequest{ + Id: []byte(req.GetId()), + }) +} + +func (s *streamGetObject) Recv() (*payload.Object_Vector, error) { + data, err := s.ngtd.Recv() + if err != nil { + return nil, err + } + if len(data.GetError()) != 0 { + return nil, errors.New(data.GetError()) + } + return &payload.Object_Vector{ + Id: string(data.GetId()), + Vector: data.GetVector(), + }, nil +} diff --git a/hack/benchmark/internal/client/ngtd/rest/client.go b/hack/benchmark/internal/client/ngtd/rest/client.go index 3f4f124daf..08028b0e2d 100644 --- a/hack/benchmark/internal/client/ngtd/rest/client.go +++ b/hack/benchmark/internal/client/ngtd/rest/client.go @@ -19,11 +19,14 @@ package rest import ( "context" + "fmt" "net/http" - "strconv" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/client/v1/client" "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/net/http/json" "github.com/yahoojapan/ngtd/model" @@ -47,262 +50,273 @@ func New(ctx context.Context, opts ...Option) (Client, error) { return c, nil } -func (c *ngtdClient) Exists( - ctx context.Context, - req *client.ObjectID, -) (*client.ObjectID, error) { - return nil, errors.ErrUnsupportedClientMethod +func (c *ngtdClient) Exists(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (oid *payload.Object_ID, err error) { + id, err := c.GetObject(ctx, in, opts...) + return &payload.Object_ID{ + Id: id.GetId(), + }, nil } -func (c *ngtdClient) Search( - ctx context.Context, - req *client.SearchRequest, -) (*client.SearchResponse, error) { - res := new(model.SearchResponse) - err := json.Request(ctx, http.MethodPost, c.addr+"/search", searchRequestToNgtdSearchRequest(req), &res) +func (c *ngtdClient) Search(ctx context.Context, in *payload.Search_Request, opts ...grpc.CallOption) (*payload.Search_Response, error) { + vec := make([]float64, 0, len(in.GetVector())) + for _, v := range in.GetVector() { + vec = append(vec, float64(v)) + } + var res model.SearchResponse + err := json.Request(ctx, http.MethodPost, c.addr+"/search", model.SearchRequest{ + Vector: vec, + Size: int(in.GetConfig().GetNum()), + Epsilon: in.GetConfig().GetEpsilon(), + }, &res) if err != nil { return nil, err } - return ngtdSearchResponseToSearchResponse(res), nil + sr := &payload.Search_Response{ + Results: make([]*payload.Object_Distance, 0, len(res.Result)), + } + for _, r := range res.Result { + sr.Results = append(sr.Results, &payload.Object_Distance{ + Id: r.ID, + Distance: r.Distance, + }) + } + return sr, nil } -func (c *ngtdClient) SearchByID( - ctx context.Context, - req *client.SearchIDRequest, -) (*client.SearchResponse, error) { - res := new(model.SearchResponse) - err := json.Request(ctx, http.MethodPost, c.addr+"/searchbyid", searchIDRequestToNgtdSearchRequest(req), res) +func (c *ngtdClient) SearchByID(ctx context.Context, in *payload.Search_IDRequest, opts ...grpc.CallOption) (*payload.Search_Response, error) { + var res model.SearchResponse + err := json.Request(ctx, http.MethodPost, c.addr+"/search", model.SearchRequest{ + ID: in.GetId(), + Size: int(in.GetConfig().GetNum()), + Epsilon: in.GetConfig().GetEpsilon(), + }, &res) if err != nil { return nil, err } - return ngtdSearchResponseToSearchResponse(res), nil + sr := &payload.Search_Response{ + Results: make([]*payload.Object_Distance, 0, len(res.Result)), + } + for _, r := range res.Result { + sr.Results = append(sr.Results, &payload.Object_Distance{ + Id: r.ID, + Distance: r.Distance, + }) + } + return sr, nil } -func (c *ngtdClient) StreamSearch( - ctx context.Context, - dataProvider func() *client.SearchRequest, - f func(*client.SearchResponse, error), -) error { - return errors.ErrUnsupportedClientMethod +func (c *ngtdClient) StreamSearch(ctx context.Context, opts ...grpc.CallOption) (res vald.Search_StreamSearchClient, err error) { + return nil, errors.ErrUnsupportedClientMethod } -func (c *ngtdClient) StreamSearchByID( - ctx context.Context, - dataProvider func() *client.SearchIDRequest, - f func(*client.SearchResponse, error), -) error { - return errors.ErrUnsupportedClientMethod +func (c *ngtdClient) StreamSearchByID(ctx context.Context, opts ...grpc.CallOption) (res vald.Search_StreamSearchByIDClient, err error) { + return nil, errors.ErrUnsupportedClientMethod } -func (c *ngtdClient) Insert( - ctx context.Context, - req *client.ObjectVector, -) error { - err := json.Request(ctx, http.MethodPost, c.addr+"/insert", objectVectorToNgtdInsertRequest(req), nil) - if err != nil { - return err +func (c *ngtdClient) MultiSearch(ctx context.Context, in *payload.Search_MultiRequest, opts ...grpc.CallOption) (res *payload.Search_Responses, err error) { + res = &payload.Search_Responses{ + Responses: make([]*payload.Search_Response, 0, len(in.GetRequests())), + } + for _, req := range in.GetRequests() { + r, err := c.Search(ctx, req) + if err == nil { + res.Responses = append(res.Responses, r) + } } - return nil + return res, nil } -func (c *ngtdClient) StreamInsert( - ctx context.Context, - dataProvider func() *client.ObjectVector, - f func(error), -) error { - return errors.ErrUnsupportedClientMethod +func (c *ngtdClient) MultiSearchByID(ctx context.Context, in *payload.Search_MultiIDRequest, opts ...grpc.CallOption) (res *payload.Search_Responses, err error) { + res = &payload.Search_Responses{ + Responses: make([]*payload.Search_Response, 0, len(in.GetRequests())), + } + for _, req := range in.GetRequests() { + r, err := c.SearchByID(ctx, req) + if err == nil { + res.Responses = append(res.Responses, r) + } + } + return res, nil } -func (c *ngtdClient) MultiInsert( - ctx context.Context, - req *client.ObjectVectors, -) error { - err := json.Request(ctx, http.MethodPost, c.addr+"/multiinsert", objectVectorsToNgtdMultiInsertRequest(req), nil) +func (c *ngtdClient) Insert(ctx context.Context, in *payload.Insert_Request, opts ...grpc.CallOption) (*payload.Object_Location, error) { + vec := make([]float64, 0, len(in.GetVector().GetVector())) + for _, v := range in.GetVector().GetVector() { + vec = append(vec, float64(v)) + } + var res model.InsertResponse + err := json.Request(ctx, http.MethodPost, c.addr+"/insert", model.InsertRequest{ + ID: in.GetVector().GetId(), + Vector: vec, + }, &res) if err != nil { - return err + return nil, err } - return nil -} - -func (c *ngtdClient) Update( - ctx context.Context, - req *client.ObjectVector, -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *ngtdClient) StreamUpdate( - ctx context.Context, - dataProvider func() *client.ObjectVector, - f func(error), -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *ngtdClient) MultiUpdate( - ctx context.Context, - req *client.ObjectVectors, -) error { - return errors.ErrUnsupportedClientMethod + return nil, nil } -func (c *ngtdClient) Remove( - ctx context.Context, - req *client.ObjectID, -) error { - return json.Request(ctx, http.MethodGet, c.addr+"/remove/"+req.GetId(), nil, nil) +func (c *ngtdClient) StreamInsert(ctx context.Context, opts ...grpc.CallOption) (res vald.Insert_StreamInsertClient, err error) { + return nil, errors.ErrUnsupportedClientMethod } -func (c *ngtdClient) StreamRemove( - ctx context.Context, - dataProvider func() *client.ObjectID, - f func(error), -) error { - return errors.ErrUnsupportedClientMethod +func (c *ngtdClient) MultiInsert(ctx context.Context, in *payload.Insert_MultiRequest, opts ...grpc.CallOption) (res *payload.Object_Locations, err error) { + req := &model.MultiInsertRequest{ + InsertRequests: make([]model.InsertRequest, 0, len(in.GetRequests())), + } + for _, i := range in.GetRequests() { + vec := make([]float64, 0, len(i.GetVector().GetVector())) + for _, v := range i.GetVector().GetVector() { + vec = append(vec, float64(v)) + } + req.InsertRequests = append(req.InsertRequests, model.InsertRequest{ + ID: i.GetVector().GetId(), + Vector: vec, + }) + } + var r model.MultiInsertResponse + return nil, json.Request(ctx, http.MethodPost, c.addr+"/multiinsert", req, &r) } -func (c *ngtdClient) MultiRemove( - ctx context.Context, - req *client.ObjectIDs, -) (err error) { - res := new(model.MultiRemoveResponse) - err = json.Request(ctx, http.MethodGet, c.addr+"/multiremove/", objectIDsToNgtdMultiRemoveRequest(req), res) +func (c *ngtdClient) Update(ctx context.Context, in *payload.Update_Request, opts ...grpc.CallOption) (res *payload.Object_Location, err error) { + _, err = c.Remove(ctx, &payload.Remove_Request{ + Id: &payload.Object_ID{ + Id: in.GetVector().GetId(), + }, + }, opts...) if err != nil { - return err + return nil, err } - - for _, resErr := range res.Errors { - if err == nil { - err = resErr - } else { - if resErr != nil { - err = errors.Wrap(err, resErr.Error()) - } - } + _, err = c.Insert(ctx, &payload.Insert_Request{ + Vector: in.GetVector(), + }, opts...) + if err != nil { + return nil, err } - - return + return nil, nil } -func (c *ngtdClient) GetObject( - ctx context.Context, - req *client.ObjectID, -) (*client.ObjectVector, error) { +func (c *ngtdClient) StreamUpdate(ctx context.Context, opts ...grpc.CallOption) (res vald.Update_StreamUpdateClient, err error) { return nil, errors.ErrUnsupportedClientMethod } -func (c *ngtdClient) StreamGetObject( - ctx context.Context, - dataProvider func() *client.ObjectID, - f func(*client.ObjectVector, error), -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *ngtdClient) CreateIndex( - ctx context.Context, - req *client.ControlCreateIndexRequest, -) error { - res := new(model.DefaultResponse) - err := json.Request(ctx, http.MethodGet, c.addr+"/index/create/"+strconv.Itoa(int(req.GetPoolSize())), nil, res) - if err != nil { - return err - } - - if res.Error != nil { - return res.Error +func (c *ngtdClient) MultiUpdate(ctx context.Context, in *payload.Update_MultiRequest, opts ...grpc.CallOption) (res *payload.Object_Locations, err error) { + for _, req := range in.GetRequests() { + _, err := c.Update(ctx, req) + if err != nil { + return nil, err + } } - return nil -} - -func (c *ngtdClient) SaveIndex(ctx context.Context) error { - return json.Request(ctx, http.MethodGet, c.addr+"/index/save", nil, nil) + return nil, nil } -func (c *ngtdClient) CreateAndSaveIndex( - ctx context.Context, - req *client.ControlCreateIndexRequest, -) error { - return errors.ErrUnsupportedClientMethod +func (c *ngtdClient) Upsert(ctx context.Context, in *payload.Upsert_Request, opts ...grpc.CallOption) (res *payload.Object_Location, err error) { + id, err := c.Exists(ctx, &payload.Object_ID{ + Id: in.GetVector().GetId(), + }, opts...) + if err == nil || len(id.GetId()) != 0 { + return c.Update(ctx, &payload.Update_Request{ + Vector: in.GetVector(), + }, opts...) + } + return c.Insert(ctx, &payload.Insert_Request{ + Vector: in.GetVector(), + }, opts...) } -func (c *ngtdClient) IndexInfo(ctx context.Context) (*client.InfoIndex, error) { +func (c *ngtdClient) StreamUpsert(ctx context.Context, opts ...grpc.CallOption) (res vald.Upsert_StreamUpsertClient, err error) { return nil, errors.ErrUnsupportedClientMethod } -func searchRequestToNgtdSearchRequest(in *client.SearchRequest) *model.SearchRequest { - size, epsilon := getSizeAndEpsilon(in.GetConfig()) - return &model.SearchRequest{ - Vector: tofloat64(in.GetVector()), - Size: size, - Epsilon: epsilon, +func (c *ngtdClient) MultiUpsert(ctx context.Context, in *payload.Upsert_MultiRequest, opts ...grpc.CallOption) (res *payload.Object_Locations, err error) { + for _, req := range in.GetRequests() { + _, err := c.Upsert(ctx, req) + if err != nil { + return nil, err + } } + return nil, nil } -func searchIDRequestToNgtdSearchRequest(in *client.SearchIDRequest) *model.SearchRequest { - size, epsilon := getSizeAndEpsilon(in.GetConfig()) - return &model.SearchRequest{ - ID: in.GetId(), - Size: size, - Epsilon: epsilon, +func (c *ngtdClient) Remove(ctx context.Context, in *payload.Remove_Request, opts ...grpc.CallOption) (*payload.Object_Location, error) { + var res model.RemoveResponse + err := json.Request(ctx, http.MethodGet, c.addr+"/remove/"+in.GetId().GetId(), nil, &res) + if err != nil { + return nil, err } + return nil, nil } -func objectVectorToNgtdInsertRequest(in *client.ObjectVector) *model.InsertRequest { - return &model.InsertRequest{ - ID: in.GetId(), - Vector: tofloat64(in.GetVector()), - } +func (c *ngtdClient) StreamRemove(ctx context.Context, opts ...grpc.CallOption) (res vald.Remove_StreamRemoveClient, err error) { + return nil, errors.ErrUnsupportedClientMethod } -func objectVectorsToNgtdMultiInsertRequest(in *client.ObjectVectors) *model.MultiInsertRequest { - reqs := make([]model.InsertRequest, len(in.GetVectors())) - - for _, v := range in.GetVectors() { - reqs = append(reqs, model.InsertRequest{ - Vector: tofloat64(v.GetVector()), - ID: v.GetId(), - }) +func (c *ngtdClient) MultiRemove(ctx context.Context, in *payload.Remove_MultiRequest, opts ...grpc.CallOption) (res *payload.Object_Locations, err error) { + req := &model.MultiRemoveRequest{ + IDs: make([]string, 0, len(in.GetRequests())), } - - return &model.MultiInsertRequest{ - InsertRequests: reqs, + for _, i := range in.GetRequests() { + req.IDs = append(req.IDs, i.GetId().GetId()) } + var r model.MultiInsertResponse + return nil, json.Request(ctx, http.MethodPost, c.addr+"/multiremove", req, &r) } -func objectIDsToNgtdMultiRemoveRequest(in *client.ObjectIDs) *model.MultiRemoveRequest { - return &model.MultiRemoveRequest{ - IDs: in.GetIds(), +func (c *ngtdClient) GetObject(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (*payload.Object_Vector, error) { + var res model.GetObjectsResponse + err := json.Request(ctx, http.MethodPost, c.addr+"/getobjects", model.GetObjectsRequest{ + IDs: []string{in.GetId()}, + }, &res) + if err != nil { + return nil, err } + return &payload.Object_Vector{ + Id: res.Result[0].ID, + Vector: res.Result[0].Vector, + }, nil } -func ngtdSearchResponseToSearchResponse(in *model.SearchResponse) *client.SearchResponse { - results := make([]*client.ObjectDistance, len(in.Result)) +func (c *ngtdClient) StreamGetObject(ctx context.Context, opts ...grpc.CallOption) (res vald.Object_StreamGetObjectClient, err error) { + return nil, errors.ErrUnsupportedClientMethod +} - for _, r := range in.Result { - results = append(results, &client.ObjectDistance{ - Id: r.ID, - Distance: r.Distance, - }) +func (c *ngtdClient) CreateIndex( + ctx context.Context, + in *client.ControlCreateIndexRequest, + opts ...grpc.CallOption, +) (*client.Empty, error) { + err := json.Request(ctx, http.MethodGet, fmt.Sprintf("%s/index/create/%d", c.addr, in.GetPoolSize()), nil, nil) + if err != nil { + return nil, err } + return nil, nil +} - return &client.SearchResponse{ - Results: results, - } +func (c *ngtdClient) SaveIndex( + ctx context.Context, + req *client.Empty, + opts ...grpc.CallOption, +) (*client.Empty, error) { + err := json.Request(ctx, http.MethodGet, c.addr+"/index/save", nil, nil) + return nil, err } -func getSizeAndEpsilon(cfg *client.SearchConfig) (size int, epsilon float32) { - if cfg != nil { - size = int(cfg.GetNum()) - epsilon = float32(cfg.GetEpsilon()) +func (c *ngtdClient) CreateAndSaveIndex( + ctx context.Context, + req *client.ControlCreateIndexRequest, + opts ...grpc.CallOption, +) (*client.Empty, error) { + _, err := c.CreateIndex(ctx, req) + if err != nil { + return nil, err } - return + _, err = c.SaveIndex(ctx, nil) + return nil, err } -func tofloat64(in []float32) (out []float64) { - out = make([]float64, len(in)) - for i := range in { - out[i] = float64(in[i]) - } - return +func (c *ngtdClient) IndexInfo( + ctx context.Context, + req *client.Empty, + opts ...grpc.CallOption, +) (res *client.InfoIndexCount, err error) { + return nil, errors.ErrUnsupportedClientMethod } diff --git a/hack/benchmark/internal/client/ngtd/rest/option.go b/hack/benchmark/internal/client/ngtd/rest/option.go index 7e9ad741cc..228f3372f8 100644 --- a/hack/benchmark/internal/client/ngtd/rest/option.go +++ b/hack/benchmark/internal/client/ngtd/rest/option.go @@ -19,11 +19,9 @@ package rest type Option func(*ngtdClient) -var ( - defaultOptions = []Option{ - WithAddr("http://127.0.0.1:8200"), - } -) +var defaultOptions = []Option{ + WithAddr("http://127.0.0.1:8200"), +} func WithAddr(addr string) Option { return func(c *ngtdClient) { diff --git a/hack/benchmark/internal/core/core.go b/hack/benchmark/internal/core/algorithm/algorithm.go similarity index 94% rename from hack/benchmark/internal/core/core.go rename to hack/benchmark/internal/core/algorithm/algorithm.go index 8c12076166..4cdc69b49a 100644 --- a/hack/benchmark/internal/core/core.go +++ b/hack/benchmark/internal/core/algorithm/algorithm.go @@ -14,8 +14,8 @@ // limitations under the License. // -// Package core provides core interface -package core +// Package algorithm provides core interface +package algorithm type Mode uint32 @@ -28,7 +28,7 @@ type Closer interface { Close() } -type Core32 interface { +type Bit32 interface { Search(vec []float32, size int, epsilon, radius float32) (interface{}, error) Insert(vec []float32) (uint, error) InsertCommit(vec []float32, poolSize uint32) (uint, error) @@ -43,7 +43,7 @@ type Core32 interface { Closer } -type Core64 interface { +type Bit64 interface { Search(vec []float64, size int, epsilon, radius float32) (interface{}, error) Insert(vec []float64) (uint, error) InsertCommit(vec []float64, poolSize uint32) (uint, error) diff --git a/hack/benchmark/internal/core/gongt/gongt.go b/hack/benchmark/internal/core/algorithm/gongt/gongt.go similarity index 95% rename from hack/benchmark/internal/core/gongt/gongt.go rename to hack/benchmark/internal/core/algorithm/gongt/gongt.go index 4b201079b9..f540f8d9de 100644 --- a/hack/benchmark/internal/core/gongt/gongt.go +++ b/hack/benchmark/internal/core/algorithm/gongt/gongt.go @@ -21,7 +21,7 @@ import ( "io/ioutil" "os" - icore "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" "github.com/vdaas/vald/internal/errors" "github.com/yahoojapan/gongt" ) @@ -44,7 +44,7 @@ type core struct { *gongt.NGT } -func New(opts ...Option) (icore.Core64, error) { +func New(opts ...Option) (algorithm.Bit64, error) { c := new(core) for _, opt := range append(defaultOptions, opts...) { opt(c) diff --git a/hack/benchmark/internal/core/gongt/gongt_test.go b/hack/benchmark/internal/core/algorithm/gongt/gongt_test.go similarity index 99% rename from hack/benchmark/internal/core/gongt/gongt_test.go rename to hack/benchmark/internal/core/algorithm/gongt/gongt_test.go index 5bfd4cb7f5..2719198b54 100644 --- a/hack/benchmark/internal/core/gongt/gongt_test.go +++ b/hack/benchmark/internal/core/algorithm/gongt/gongt_test.go @@ -21,7 +21,7 @@ import ( "reflect" "testing" - icore "github.com/vdaas/vald/hack/benchmark/internal/core" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" "github.com/vdaas/vald/internal/errors" "github.com/yahoojapan/gongt" @@ -33,18 +33,18 @@ func TestNew(t *testing.T) { opts []Option } type want struct { - want icore.Core64 + want algorithm.Bit64 err error } type test struct { name string args args want want - checkFunc func(want, icore.Core64, error) error + checkFunc func(want, algorithm.Bit64, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got icore.Core64, err error) error { + defaultCheckFunc := func(w want, got algorithm.Bit64, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -98,7 +98,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -211,7 +210,6 @@ func Test_core_Search(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -315,7 +313,6 @@ func Test_core_Insert(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -422,7 +419,6 @@ func Test_core_InsertCommit(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -526,7 +522,6 @@ func Test_core_BulkInsert(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -633,7 +628,6 @@ func Test_core_BulkInsertCommit(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -733,7 +727,6 @@ func Test_core_CreateAndSaveIndex(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -833,7 +826,6 @@ func Test_core_CreateIndex(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -933,7 +925,6 @@ func Test_core_Remove(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1033,7 +1024,6 @@ func Test_core_BulkRemove(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1137,7 +1127,6 @@ func Test_core_GetVector(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1293,7 +1282,6 @@ func Test_toUint(t *testing.T) { if err := test.checkFunc(test.want, gotOut); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/internal/core/gongt/option.go b/hack/benchmark/internal/core/algorithm/gongt/option.go similarity index 91% rename from hack/benchmark/internal/core/gongt/option.go rename to hack/benchmark/internal/core/algorithm/gongt/option.go index 0ca456dd99..92b7cd8841 100644 --- a/hack/benchmark/internal/core/gongt/option.go +++ b/hack/benchmark/internal/core/algorithm/gongt/option.go @@ -19,13 +19,11 @@ package gongt type Option func(c *core) -var ( - defaultOptions = []Option{ - WithIndexPath("tmpdir"), - WithObjectType("float"), - WithDimension(128), - } -) +var defaultOptions = []Option{ + WithIndexPath("tmpdir"), + WithObjectType("float"), + WithDimension(128), +} func WithIndexPath(path string) Option { return func(c *core) { diff --git a/hack/benchmark/internal/core/gongt/option_test.go b/hack/benchmark/internal/core/algorithm/gongt/option_test.go similarity index 100% rename from hack/benchmark/internal/core/gongt/option_test.go rename to hack/benchmark/internal/core/algorithm/gongt/option_test.go diff --git a/hack/benchmark/internal/core/ngt/ngt.go b/hack/benchmark/internal/core/algorithm/ngt/ngt.go similarity index 89% rename from hack/benchmark/internal/core/ngt/ngt.go rename to hack/benchmark/internal/core/algorithm/ngt/ngt.go index 1cd233bb1a..9167395677 100644 --- a/hack/benchmark/internal/core/ngt/ngt.go +++ b/hack/benchmark/internal/core/algorithm/ngt/ngt.go @@ -21,8 +21,8 @@ import ( "io/ioutil" "os" - icore "github.com/vdaas/vald/hack/benchmark/internal/core" - "github.com/vdaas/vald/internal/core/ngt" + c "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" + "github.com/vdaas/vald/internal/core/algorithm/ngt" ) type ObjectType int @@ -41,7 +41,7 @@ type core struct { ngt.NGT } -func New(opts ...Option) (icore.Core32, error) { +func New(opts ...Option) (c.Bit32, error) { c := new(core) for _, opt := range append(defaultOptions, opts...) { opt(c) @@ -53,7 +53,7 @@ func New(opts ...Option) (icore.Core32, error) { } c.tmpdir = tmpdir - var typ = ngt.ObjectNone + typ := ngt.ObjectNone switch c.objectType { case Uint8: typ = ngt.Uint8 diff --git a/hack/benchmark/internal/core/ngt/ngt_test.go b/hack/benchmark/internal/core/algorithm/ngt/ngt_test.go similarity index 96% rename from hack/benchmark/internal/core/ngt/ngt_test.go rename to hack/benchmark/internal/core/algorithm/ngt/ngt_test.go index 85d98c288b..f5f1ac3e15 100644 --- a/hack/benchmark/internal/core/ngt/ngt_test.go +++ b/hack/benchmark/internal/core/algorithm/ngt/ngt_test.go @@ -21,8 +21,8 @@ import ( "reflect" "testing" - icore "github.com/vdaas/vald/hack/benchmark/internal/core" - "github.com/vdaas/vald/internal/core/ngt" + "github.com/vdaas/vald/hack/benchmark/internal/core/algorithm" + "github.com/vdaas/vald/internal/core/algorithm/ngt" "github.com/vdaas/vald/internal/errors" "go.uber.org/goleak" @@ -33,18 +33,18 @@ func TestNew(t *testing.T) { opts []Option } type want struct { - want icore.Core32 + want algorithm.Bit32 err error } type test struct { name string args args want want - checkFunc func(want, icore.Core32, error) error + checkFunc func(want, algorithm.Bit32, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got icore.Core32, err error) error { + defaultCheckFunc := func(w want, got algorithm.Bit32, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -98,7 +98,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -211,7 +210,6 @@ func Test_core_Search(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/internal/core/ngt/option.go b/hack/benchmark/internal/core/algorithm/ngt/option.go similarity index 90% rename from hack/benchmark/internal/core/ngt/option.go rename to hack/benchmark/internal/core/algorithm/ngt/option.go index 87b24245d2..7bdc26840d 100644 --- a/hack/benchmark/internal/core/ngt/option.go +++ b/hack/benchmark/internal/core/algorithm/ngt/option.go @@ -19,13 +19,11 @@ package ngt type Option func(*core) -var ( - defaultOptions = []Option{ - WithIndexPath("tmpdir"), - WithObjectType("float"), - WithDimension(128), - } -) +var defaultOptions = []Option{ + WithIndexPath("tmpdir"), + WithObjectType("float"), + WithDimension(128), +} func WithIndexPath(path string) Option { return func(c *core) { diff --git a/hack/benchmark/internal/core/ngt/option_test.go b/hack/benchmark/internal/core/algorithm/ngt/option_test.go similarity index 100% rename from hack/benchmark/internal/core/ngt/option_test.go rename to hack/benchmark/internal/core/algorithm/ngt/option_test.go diff --git a/hack/benchmark/internal/db/nosql/cassandra/cassandra_test.go b/hack/benchmark/internal/db/nosql/cassandra/cassandra_test.go index 371df5d93a..3a60d4969a 100644 --- a/hack/benchmark/internal/db/nosql/cassandra/cassandra_test.go +++ b/hack/benchmark/internal/db/nosql/cassandra/cassandra_test.go @@ -27,7 +27,7 @@ import ( ) var ( - metaTable = "meta_vector" + metaTable = "backup_vector" uuidColumn = "uuid" vectorColumn = "vector" @@ -36,13 +36,12 @@ var ( metaColumnSlice = []string{uuidColumn, vectorColumn, metaColumn, ipsColumn} - dropStmt = "DROP TABLE IF EXISTS vald.meta_vector;" + dropStmt = "DROP TABLE IF EXISTS vald.backup_vector;" schema = ` -CREATE TABLE vald.meta_vector ( +CREATE TABLE vald.backup_vector ( uuid text, vector blob, - meta text, ips list, PRIMARY KEY (uuid) ); diff --git a/hack/benchmark/internal/e2e/e2e.go b/hack/benchmark/internal/e2e/e2e.go index 8ed79513dd..7557c5ad1e 100644 --- a/hack/benchmark/internal/e2e/e2e.go +++ b/hack/benchmark/internal/e2e/e2e.go @@ -22,7 +22,7 @@ import ( "testing" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" ) type Runner interface { diff --git a/hack/benchmark/internal/e2e/e2e_test.go b/hack/benchmark/internal/e2e/e2e_test.go index ec95885434..d0a7324b4a 100644 --- a/hack/benchmark/internal/e2e/e2e_test.go +++ b/hack/benchmark/internal/e2e/e2e_test.go @@ -23,11 +23,13 @@ import ( "testing" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { b *testing.B opts []Option @@ -79,8 +81,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -95,12 +100,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_e2e_Run(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -170,8 +175,11 @@ func Test_e2e_Run(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } diff --git a/hack/benchmark/internal/e2e/option.go b/hack/benchmark/internal/e2e/option.go index 6da6982450..0b8dc62c74 100644 --- a/hack/benchmark/internal/e2e/option.go +++ b/hack/benchmark/internal/e2e/option.go @@ -22,20 +22,18 @@ import ( "testing" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" ) type Option func(*e2e) -var ( - defaultOptions = []Option{ - WithServerStarter( - func(context.Context, testing.TB, assets.Dataset) func() { - return func() {} - }, - ), - } -) +var defaultOptions = []Option{ + WithServerStarter( + func(context.Context, testing.TB, assets.Dataset) func() { + return func() {} + }, + ), +} func WithName(name string) Option { return func(e *e2e) { diff --git a/hack/benchmark/internal/e2e/option_test.go b/hack/benchmark/internal/e2e/option_test.go index 70c8165979..41b04a8efc 100644 --- a/hack/benchmark/internal/e2e/option_test.go +++ b/hack/benchmark/internal/e2e/option_test.go @@ -22,12 +22,13 @@ import ( "testing" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/internal/client" - + "github.com/vdaas/vald/internal/client/v1/client" "go.uber.org/goleak" ) func TestWithName(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { name string @@ -65,7 +66,7 @@ func TestWithName(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -101,9 +102,11 @@ func TestWithName(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -124,7 +127,7 @@ func TestWithName(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -132,7 +135,7 @@ func TestWithName(t *testing.T) { got := WithName(test.args.name) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -141,6 +144,8 @@ func TestWithName(t *testing.T) { } func TestWithClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { c client.Client @@ -178,7 +183,7 @@ func TestWithClient(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -214,9 +219,11 @@ func TestWithClient(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -237,7 +244,7 @@ func TestWithClient(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -245,7 +252,7 @@ func TestWithClient(t *testing.T) { got := WithClient(test.args.c) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -254,6 +261,8 @@ func TestWithClient(t *testing.T) { } func TestWithStrategy(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { strategis []Strategy @@ -291,7 +300,7 @@ func TestWithStrategy(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -327,9 +336,11 @@ func TestWithStrategy(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -350,7 +361,7 @@ func TestWithStrategy(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -358,7 +369,7 @@ func TestWithStrategy(t *testing.T) { got := WithStrategy(test.args.strategis...) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -367,6 +378,8 @@ func TestWithStrategy(t *testing.T) { } func TestWithServerStarter(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { f func(context.Context, testing.TB, assets.Dataset) func() @@ -404,7 +417,7 @@ func TestWithServerStarter(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -440,9 +453,11 @@ func TestWithServerStarter(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -463,7 +478,7 @@ func TestWithServerStarter(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -471,7 +486,7 @@ func TestWithServerStarter(t *testing.T) { got := WithServerStarter(test.args.f) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/hack/benchmark/internal/e2e/strategy.go b/hack/benchmark/internal/e2e/strategy.go index bffd03ea65..7cd4845da6 100644 --- a/hack/benchmark/internal/e2e/strategy.go +++ b/hack/benchmark/internal/e2e/strategy.go @@ -22,7 +22,7 @@ import ( "testing" "github.com/vdaas/vald/hack/benchmark/internal/assets" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" ) type Strategy interface { diff --git a/hack/benchmark/internal/e2e/strategy/create_index.go b/hack/benchmark/internal/e2e/strategy/create_index.go index 34df1ddf4c..d9f97f0b69 100644 --- a/hack/benchmark/internal/e2e/strategy/create_index.go +++ b/hack/benchmark/internal/e2e/strategy/create_index.go @@ -23,7 +23,7 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/assets" "github.com/vdaas/vald/hack/benchmark/internal/e2e" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" ) type createIndex struct { @@ -48,7 +48,7 @@ func (ci *createIndex) Run(ctx context.Context, b *testing.B, c client.Client, d } func (ci *createIndex) do(ctx context.Context, b *testing.B) { - if err := ci.Indexer.CreateIndex(ctx, &client.ControlCreateIndexRequest{ + if _, err := ci.Indexer.CreateIndex(ctx, &client.ControlCreateIndexRequest{ PoolSize: ci.poolSize, }); err != nil { b.Error(err) diff --git a/hack/benchmark/internal/e2e/strategy/create_index_option.go b/hack/benchmark/internal/e2e/strategy/create_index_option.go index 42952e3b13..79f1956a24 100644 --- a/hack/benchmark/internal/e2e/strategy/create_index_option.go +++ b/hack/benchmark/internal/e2e/strategy/create_index_option.go @@ -17,15 +17,13 @@ // Package strategy provides strategy for e2e testing functions package strategy -import "github.com/vdaas/vald/internal/client" +import "github.com/vdaas/vald/internal/client/v1/client" type CreateIndexOption func(*createIndex) -var ( - defaultCreateIndexOptions = []CreateIndexOption{ - WithCreateIndexPoolSize(10000), - } -) +var defaultCreateIndexOptions = []CreateIndexOption{ + WithCreateIndexPoolSize(10000), +} func WithCreateIndexPoolSize(size int) CreateIndexOption { return func(ci *createIndex) { diff --git a/hack/benchmark/internal/e2e/strategy/create_index_option_test.go b/hack/benchmark/internal/e2e/strategy/create_index_option_test.go index 844962aeeb..f1c4acf866 100644 --- a/hack/benchmark/internal/e2e/strategy/create_index_option_test.go +++ b/hack/benchmark/internal/e2e/strategy/create_index_option_test.go @@ -20,12 +20,13 @@ package strategy import ( "testing" - "github.com/vdaas/vald/internal/client" - + "github.com/vdaas/vald/internal/client/v1/client" "go.uber.org/goleak" ) func TestWithCreateIndexPoolSize(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { size int @@ -63,7 +64,7 @@ func TestWithCreateIndexPoolSize(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithCreateIndexPoolSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithCreateIndexPoolSize(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithCreateIndexPoolSize(t *testing.T) { got := WithCreateIndexPoolSize(test.args.size) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -139,6 +142,8 @@ func TestWithCreateIndexPoolSize(t *testing.T) { } func TestWithCreateIndexClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { c client.Indexer @@ -176,7 +181,7 @@ func TestWithCreateIndexClient(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -212,9 +217,11 @@ func TestWithCreateIndexClient(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -235,7 +242,7 @@ func TestWithCreateIndexClient(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -243,7 +250,7 @@ func TestWithCreateIndexClient(t *testing.T) { got := WithCreateIndexClient(test.args.c) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/hack/benchmark/internal/e2e/strategy/create_index_test.go b/hack/benchmark/internal/e2e/strategy/create_index_test.go index 6def125f8e..1b4bb70e00 100644 --- a/hack/benchmark/internal/e2e/strategy/create_index_test.go +++ b/hack/benchmark/internal/e2e/strategy/create_index_test.go @@ -24,11 +24,13 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/assets" "github.com/vdaas/vald/hack/benchmark/internal/e2e" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNewCreateIndex(t *testing.T) { + t.Parallel() type args struct { opts []CreateIndexOption } @@ -77,8 +79,11 @@ func TestNewCreateIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -93,12 +98,12 @@ func TestNewCreateIndex(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_createIndex_Run(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -165,8 +170,11 @@ func Test_createIndex_Run(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -190,6 +198,7 @@ func Test_createIndex_Run(t *testing.T) { } func Test_createIndex_do(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -250,8 +259,11 @@ func Test_createIndex_do(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } diff --git a/hack/benchmark/internal/e2e/strategy/insert.go b/hack/benchmark/internal/e2e/strategy/insert.go index 4baf51f953..6b8f45236f 100644 --- a/hack/benchmark/internal/e2e/strategy/insert.go +++ b/hack/benchmark/internal/e2e/strategy/insert.go @@ -25,7 +25,7 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/assets" "github.com/vdaas/vald/hack/benchmark/internal/e2e" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" ) type insert struct { @@ -90,9 +90,11 @@ func (isrt *insert) runParallel(ctx context.Context, b *testing.B, c client.Clie } func (isrt *insert) do(ctx context.Context, b *testing.B, c client.Client, id string, vector []float32) { - if err := c.Insert(ctx, &client.ObjectVector{ - Id: id, - Vector: vector, + if _, err := c.Insert(ctx, &client.InsertRequest{ + Vector: &client.ObjectVector{ + Id: id, + Vector: vector, + }, }); err != nil { b.Error(err) } diff --git a/hack/benchmark/internal/e2e/strategy/insert_option.go b/hack/benchmark/internal/e2e/strategy/insert_option.go index 9b9c082b11..e67639d766 100644 --- a/hack/benchmark/internal/e2e/strategy/insert_option.go +++ b/hack/benchmark/internal/e2e/strategy/insert_option.go @@ -19,11 +19,9 @@ package strategy type InsertOption func(*insert) -var ( - defaultInsertOption = []InsertOption{ - WithParallelInsert(false), - } -) +var defaultInsertOption = []InsertOption{ + WithParallelInsert(false), +} func WithParallelInsert(flag bool) InsertOption { return func(e *insert) { diff --git a/hack/benchmark/internal/e2e/strategy/insert_option_test.go b/hack/benchmark/internal/e2e/strategy/insert_option_test.go index 8ea010bccb..88c781297e 100644 --- a/hack/benchmark/internal/e2e/strategy/insert_option_test.go +++ b/hack/benchmark/internal/e2e/strategy/insert_option_test.go @@ -24,6 +24,8 @@ import ( ) func TestWithParallelInsert(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { flag bool @@ -61,7 +63,7 @@ func TestWithParallelInsert(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -97,9 +99,11 @@ func TestWithParallelInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -120,7 +124,7 @@ func TestWithParallelInsert(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -128,7 +132,7 @@ func TestWithParallelInsert(t *testing.T) { got := WithParallelInsert(test.args.flag) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/hack/benchmark/internal/e2e/strategy/insert_test.go b/hack/benchmark/internal/e2e/strategy/insert_test.go index 58bf76ebcf..73a72a9478 100644 --- a/hack/benchmark/internal/e2e/strategy/insert_test.go +++ b/hack/benchmark/internal/e2e/strategy/insert_test.go @@ -24,11 +24,13 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/assets" "github.com/vdaas/vald/hack/benchmark/internal/e2e" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNewInsert(t *testing.T) { + t.Parallel() type args struct { opts []InsertOption } @@ -77,8 +79,11 @@ func TestNewInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -93,12 +98,12 @@ func TestNewInsert(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_insert_Run(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -162,8 +167,11 @@ func Test_insert_Run(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -186,6 +194,7 @@ func Test_insert_Run(t *testing.T) { } func Test_insert_run(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -249,8 +258,11 @@ func Test_insert_run(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -273,6 +285,7 @@ func Test_insert_run(t *testing.T) { } func Test_insert_runParallel(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -336,8 +349,11 @@ func Test_insert_runParallel(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -360,6 +376,7 @@ func Test_insert_runParallel(t *testing.T) { } func Test_insert_do(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -426,8 +443,11 @@ func Test_insert_do(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } diff --git a/hack/benchmark/internal/e2e/strategy/remove.go b/hack/benchmark/internal/e2e/strategy/remove.go index 7661b764be..5bd37e0d03 100644 --- a/hack/benchmark/internal/e2e/strategy/remove.go +++ b/hack/benchmark/internal/e2e/strategy/remove.go @@ -25,7 +25,7 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/assets" "github.com/vdaas/vald/hack/benchmark/internal/e2e" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" ) type remove struct { @@ -81,8 +81,10 @@ func (r *remove) runParallel(ctx context.Context, b *testing.B, c client.Client, } func (r *remove) do(ctx context.Context, b *testing.B, c client.Client, id string) { - if err := c.Remove(ctx, &client.ObjectID{ - Id: id, + if _, err := c.Remove(ctx, &client.RemoveRequest{ + Id: &client.ObjectID{ + Id: id, + }, }); err != nil { b.Error(err) } diff --git a/hack/benchmark/internal/e2e/strategy/remove_option.go b/hack/benchmark/internal/e2e/strategy/remove_option.go index c1328728b0..0750ab1b8d 100644 --- a/hack/benchmark/internal/e2e/strategy/remove_option.go +++ b/hack/benchmark/internal/e2e/strategy/remove_option.go @@ -19,11 +19,9 @@ package strategy type RemoveOption func(*remove) -var ( - defaultRemoveOptions = []RemoveOption{ - WithParallelRemove(false), - } -) +var defaultRemoveOptions = []RemoveOption{ + WithParallelRemove(false), +} func WithParallelRemove(flag bool) RemoveOption { return func(e *remove) { diff --git a/hack/benchmark/internal/e2e/strategy/remove_option_test.go b/hack/benchmark/internal/e2e/strategy/remove_option_test.go index fa50629662..747c735a8c 100644 --- a/hack/benchmark/internal/e2e/strategy/remove_option_test.go +++ b/hack/benchmark/internal/e2e/strategy/remove_option_test.go @@ -24,6 +24,8 @@ import ( ) func TestWithParallelRemove(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { flag bool @@ -61,7 +63,7 @@ func TestWithParallelRemove(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -97,9 +99,11 @@ func TestWithParallelRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -120,7 +124,7 @@ func TestWithParallelRemove(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -128,7 +132,7 @@ func TestWithParallelRemove(t *testing.T) { got := WithParallelRemove(test.args.flag) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/hack/benchmark/internal/e2e/strategy/remove_test.go b/hack/benchmark/internal/e2e/strategy/remove_test.go index 3eeb93801d..7e97a28113 100644 --- a/hack/benchmark/internal/e2e/strategy/remove_test.go +++ b/hack/benchmark/internal/e2e/strategy/remove_test.go @@ -24,11 +24,13 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/assets" "github.com/vdaas/vald/hack/benchmark/internal/e2e" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNewRemove(t *testing.T) { + t.Parallel() type args struct { opts []RemoveOption } @@ -77,8 +79,11 @@ func TestNewRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -93,12 +98,12 @@ func TestNewRemove(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_remove_Run(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -162,8 +167,11 @@ func Test_remove_Run(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -186,6 +194,7 @@ func Test_remove_Run(t *testing.T) { } func Test_remove_run(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -249,8 +258,11 @@ func Test_remove_run(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -273,6 +285,7 @@ func Test_remove_run(t *testing.T) { } func Test_remove_runParallel(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -336,8 +349,11 @@ func Test_remove_runParallel(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -360,6 +376,7 @@ func Test_remove_runParallel(t *testing.T) { } func Test_remove_do(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -423,8 +440,11 @@ func Test_remove_do(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } diff --git a/hack/benchmark/internal/e2e/strategy/search.go b/hack/benchmark/internal/e2e/strategy/search.go index d0ca62a386..516a06756f 100644 --- a/hack/benchmark/internal/e2e/strategy/search.go +++ b/hack/benchmark/internal/e2e/strategy/search.go @@ -24,7 +24,7 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/assets" "github.com/vdaas/vald/hack/benchmark/internal/e2e" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" ) type search struct { diff --git a/hack/benchmark/internal/e2e/strategy/search_option.go b/hack/benchmark/internal/e2e/strategy/search_option.go index a2b4cdfd29..1007e81d79 100644 --- a/hack/benchmark/internal/e2e/strategy/search_option.go +++ b/hack/benchmark/internal/e2e/strategy/search_option.go @@ -17,7 +17,7 @@ // Package strategy provides strategy for e2e testing functions package strategy -import "github.com/vdaas/vald/internal/client" +import "github.com/vdaas/vald/internal/client/v1/client" type SearchOption func(*search) @@ -27,12 +27,10 @@ var searchCfg = &client.SearchConfig{ Epsilon: 0.01, } -var ( - defaultSearchOptions = []SearchOption{ - WithSearchParallel(false), - WithSearchConfig(searchCfg), - } -) +var defaultSearchOptions = []SearchOption{ + WithSearchParallel(false), + WithSearchConfig(searchCfg), +} func WithSearchParallel(flag bool) SearchOption { return func(s *search) { diff --git a/hack/benchmark/internal/e2e/strategy/search_option_test.go b/hack/benchmark/internal/e2e/strategy/search_option_test.go index 52eb76ac78..3535d6bda1 100644 --- a/hack/benchmark/internal/e2e/strategy/search_option_test.go +++ b/hack/benchmark/internal/e2e/strategy/search_option_test.go @@ -20,12 +20,13 @@ package strategy import ( "testing" - "github.com/vdaas/vald/internal/client" - + "github.com/vdaas/vald/internal/client/v1/client" "go.uber.org/goleak" ) func TestWithSearchParallel(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { flag bool @@ -63,7 +64,7 @@ func TestWithSearchParallel(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithSearchParallel(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithSearchParallel(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithSearchParallel(t *testing.T) { got := WithSearchParallel(test.args.flag) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -139,6 +142,8 @@ func TestWithSearchParallel(t *testing.T) { } func TestWithSearchConfig(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { cfg *client.SearchConfig @@ -176,7 +181,7 @@ func TestWithSearchConfig(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -212,9 +217,11 @@ func TestWithSearchConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -235,7 +242,7 @@ func TestWithSearchConfig(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -243,7 +250,7 @@ func TestWithSearchConfig(t *testing.T) { got := WithSearchConfig(test.args.cfg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/hack/benchmark/internal/e2e/strategy/search_test.go b/hack/benchmark/internal/e2e/strategy/search_test.go index a1e3229556..825258ff09 100644 --- a/hack/benchmark/internal/e2e/strategy/search_test.go +++ b/hack/benchmark/internal/e2e/strategy/search_test.go @@ -24,11 +24,13 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/assets" "github.com/vdaas/vald/hack/benchmark/internal/e2e" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNewSearch(t *testing.T) { + t.Parallel() type args struct { opts []SearchOption } @@ -77,8 +79,11 @@ func TestNewSearch(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -93,12 +98,12 @@ func TestNewSearch(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_search_Run(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -165,8 +170,11 @@ func Test_search_Run(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -190,6 +198,7 @@ func Test_search_Run(t *testing.T) { } func Test_search_run(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -256,8 +265,11 @@ func Test_search_run(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -281,6 +293,7 @@ func Test_search_run(t *testing.T) { } func Test_search_runParallel(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -347,8 +360,11 @@ func Test_search_runParallel(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -372,6 +388,7 @@ func Test_search_runParallel(t *testing.T) { } func Test_search_do(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -438,8 +455,11 @@ func Test_search_do(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } diff --git a/hack/benchmark/internal/e2e/strategy/stream_insert.go b/hack/benchmark/internal/e2e/strategy/stream_insert.go index 1e1cdfc97a..d332062a7e 100644 --- a/hack/benchmark/internal/e2e/strategy/stream_insert.go +++ b/hack/benchmark/internal/e2e/strategy/stream_insert.go @@ -25,7 +25,8 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/assets" "github.com/vdaas/vald/hack/benchmark/internal/e2e" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" + "github.com/vdaas/vald/internal/net/grpc" ) type streamInsert struct{} @@ -38,7 +39,7 @@ func NewStreamInsert(opts ...StreamInsertOption) e2e.Strategy { return s } -func (sisrt *streamInsert) dataProvider(total *uint32, b *testing.B, dataset assets.Dataset) func() *client.ObjectVector { +func (sisrt *streamInsert) dataProvider(total *uint32, b *testing.B, dataset assets.Dataset) func() *client.InsertRequest { var cnt uint32 b.StopTimer() @@ -46,7 +47,7 @@ func (sisrt *streamInsert) dataProvider(total *uint32, b *testing.B, dataset ass b.ResetTimer() b.StartTimer() - return func() *client.ObjectVector { + return func() *client.InsertRequest { n := int(atomic.AddUint32(&cnt, 1)) - 1 if n >= b.N { return nil @@ -57,9 +58,11 @@ func (sisrt *streamInsert) dataProvider(total *uint32, b *testing.B, dataset ass if err != nil { return nil } - return &client.ObjectVector{ - Id: fmt.Sprint(n), - Vector: v.([]float32), + return &client.InsertRequest{ + Vector: &client.ObjectVector{ + Id: fmt.Sprint(n), + Vector: v.([]float32), + }, } } } @@ -67,10 +70,15 @@ func (sisrt *streamInsert) dataProvider(total *uint32, b *testing.B, dataset ass func (sisrt *streamInsert) Run(ctx context.Context, b *testing.B, c client.Client, dataset assets.Dataset) { var total uint32 b.Run("StreamInsert", func(bb *testing.B) { - c.StreamInsert(ctx, sisrt.dataProvider(&total, bb, dataset), func(err error) { - if err != nil { - bb.Error(err) - } + srv, err := c.StreamInsert(ctx) + if err != nil { + bb.Error(err) + } + grpc.BidirectionalStreamClient(srv, func() interface{} { + return sisrt.dataProvider(&total, bb, dataset)() + }, func() interface{} { + return new(client.InsertRequest) + }, func(msg interface{}, err error) { }) }) } diff --git a/hack/benchmark/internal/e2e/strategy/stream_insert_option.go b/hack/benchmark/internal/e2e/strategy/stream_insert_option.go index b5a68dde3f..f1d05e5a04 100644 --- a/hack/benchmark/internal/e2e/strategy/stream_insert_option.go +++ b/hack/benchmark/internal/e2e/strategy/stream_insert_option.go @@ -19,6 +19,4 @@ package strategy type StreamInsertOption func(*streamInsert) -var ( - defaultStreamInsertOptions = []StreamInsertOption{} -) +var defaultStreamInsertOptions = []StreamInsertOption{} diff --git a/hack/benchmark/internal/e2e/strategy/stream_insert_test.go b/hack/benchmark/internal/e2e/strategy/stream_insert_test.go index 370af2059e..ed56798dd2 100644 --- a/hack/benchmark/internal/e2e/strategy/stream_insert_test.go +++ b/hack/benchmark/internal/e2e/strategy/stream_insert_test.go @@ -24,13 +24,13 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/assets" "github.com/vdaas/vald/hack/benchmark/internal/e2e" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNewStreamInsert(t *testing.T) { + t.Parallel() type args struct { opts []StreamInsertOption } @@ -79,9 +79,11 @@ func TestNewStreamInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -96,30 +98,30 @@ func TestNewStreamInsert(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_streamInsert_dataProvider(t *testing.T) { + t.Parallel() type args struct { total *uint32 b *testing.B dataset assets.Dataset } type want struct { - want func() *client.ObjectVector + want func() *client.InsertRequest } type test struct { name string args args sisrt *streamInsert want want - checkFunc func(want, func() *client.ObjectVector) error + checkFunc func(want, func() *client.InsertRequest) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got func() *client.ObjectVector) error { + defaultCheckFunc := func(w want, got func() *client.InsertRequest) error { if !reflect.DeepEqual(got, w.want) { return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } @@ -157,9 +159,11 @@ func Test_streamInsert_dataProvider(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -175,12 +179,12 @@ func Test_streamInsert_dataProvider(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_streamInsert_Run(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -235,9 +239,11 @@ func Test_streamInsert_Run(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } diff --git a/hack/benchmark/internal/e2e/strategy/stream_remove.go b/hack/benchmark/internal/e2e/strategy/stream_remove.go index 6143ae4158..a5f3ff39a1 100644 --- a/hack/benchmark/internal/e2e/strategy/stream_remove.go +++ b/hack/benchmark/internal/e2e/strategy/stream_remove.go @@ -25,7 +25,8 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/assets" "github.com/vdaas/vald/hack/benchmark/internal/e2e" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" + "github.com/vdaas/vald/internal/net/grpc" ) type streamRemove struct{} @@ -38,7 +39,7 @@ func NewStreamRemove(opts ...StreamRemoveOption) e2e.Strategy { return sr } -func (sr *streamRemove) dataProvider(total *uint32, b *testing.B, dataset assets.Dataset) func() *client.ObjectID { +func (sr *streamRemove) dataProvider(total *uint32, b *testing.B, dataset assets.Dataset) func() *client.RemoveRequest { var cnt uint32 b.StopTimer() @@ -47,15 +48,17 @@ func (sr *streamRemove) dataProvider(total *uint32, b *testing.B, dataset assets b.StartTimer() defer b.StopTimer() - return func() *client.ObjectID { + return func() *client.RemoveRequest { n := int(atomic.AddUint32(&cnt, 1)) - 1 if n >= b.N { return nil } total := int(atomic.AddUint32(total, 1)) - 1 - return &client.ObjectID{ - Id: fmt.Sprint(total % dataset.TrainSize()), + return &client.RemoveRequest{ + Id: &client.ObjectID{ + Id: fmt.Sprint(total % dataset.TrainSize()), + }, } } } @@ -63,10 +66,15 @@ func (sr *streamRemove) dataProvider(total *uint32, b *testing.B, dataset assets func (sr *streamRemove) Run(ctx context.Context, b *testing.B, c client.Client, dataset assets.Dataset) { var total uint32 b.Run("StreamRemove", func(bb *testing.B) { - c.StreamRemove(ctx, sr.dataProvider(&total, bb, dataset), func(err error) { - if err != nil { - b.Error(err) - } + srv, err := c.StreamRemove(ctx) + if err != nil { + bb.Error(err) + } + grpc.BidirectionalStreamClient(srv, func() interface{} { + return sr.dataProvider(&total, bb, dataset)() + }, func() interface{} { + return new(client.RemoveRequest) + }, func(msg interface{}, err error) { }) }) } diff --git a/hack/benchmark/internal/e2e/strategy/stream_remove_option.go b/hack/benchmark/internal/e2e/strategy/stream_remove_option.go index c0a7f51f29..d71fc671e1 100644 --- a/hack/benchmark/internal/e2e/strategy/stream_remove_option.go +++ b/hack/benchmark/internal/e2e/strategy/stream_remove_option.go @@ -19,6 +19,4 @@ package strategy type StreamRemoveOption func(*streamRemove) -var ( - defaultStreamRemoveOptions = []StreamRemoveOption{} -) +var defaultStreamRemoveOptions = []StreamRemoveOption{} diff --git a/hack/benchmark/internal/e2e/strategy/stream_remove_test.go b/hack/benchmark/internal/e2e/strategy/stream_remove_test.go index 896e9e22db..46d05d329b 100644 --- a/hack/benchmark/internal/e2e/strategy/stream_remove_test.go +++ b/hack/benchmark/internal/e2e/strategy/stream_remove_test.go @@ -24,13 +24,13 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/assets" "github.com/vdaas/vald/hack/benchmark/internal/e2e" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNewStreamRemove(t *testing.T) { + t.Parallel() type args struct { opts []StreamRemoveOption } @@ -79,9 +79,11 @@ func TestNewStreamRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -96,30 +98,30 @@ func TestNewStreamRemove(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_streamRemove_dataProvider(t *testing.T) { + t.Parallel() type args struct { total *uint32 b *testing.B dataset assets.Dataset } type want struct { - want func() *client.ObjectID + want func() *client.RemoveRequest } type test struct { name string args args sr *streamRemove want want - checkFunc func(want, func() *client.ObjectID) error + checkFunc func(want, func() *client.RemoveRequest) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got func() *client.ObjectID) error { + defaultCheckFunc := func(w want, got func() *client.RemoveRequest) error { if !reflect.DeepEqual(got, w.want) { return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } @@ -157,9 +159,11 @@ func Test_streamRemove_dataProvider(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -175,12 +179,12 @@ func Test_streamRemove_dataProvider(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_streamRemove_Run(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -235,9 +239,11 @@ func Test_streamRemove_Run(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } diff --git a/hack/benchmark/internal/e2e/strategy/stream_search.go b/hack/benchmark/internal/e2e/strategy/stream_search.go index 30ebabf794..4a7d0d8892 100644 --- a/hack/benchmark/internal/e2e/strategy/stream_search.go +++ b/hack/benchmark/internal/e2e/strategy/stream_search.go @@ -24,7 +24,8 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/assets" "github.com/vdaas/vald/hack/benchmark/internal/e2e" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" + "github.com/vdaas/vald/internal/net/grpc" ) type streamSearch struct { @@ -68,10 +69,15 @@ func (s *streamSearch) dataProvider(total *uint32, b *testing.B, dataset assets. func (s *streamSearch) Run(ctx context.Context, b *testing.B, c client.Client, dataset assets.Dataset) { var total uint32 b.Run("StreamSearch", func(bb *testing.B) { - c.StreamSearch(ctx, s.dataProvider(&total, bb, dataset), func(_ *client.SearchResponse, err error) { - if err != nil { - bb.Error(err) - } + srv, err := c.StreamSearch(ctx) + if err != nil { + bb.Error(err) + } + grpc.BidirectionalStreamClient(srv, func() interface{} { + return s.dataProvider(&total, bb, dataset)() + }, func() interface{} { + return new(client.SearchRequest) + }, func(msg interface{}, err error) { }) }) } diff --git a/hack/benchmark/internal/e2e/strategy/stream_search_option.go b/hack/benchmark/internal/e2e/strategy/stream_search_option.go index aca02eba68..06ad8fb7ed 100644 --- a/hack/benchmark/internal/e2e/strategy/stream_search_option.go +++ b/hack/benchmark/internal/e2e/strategy/stream_search_option.go @@ -17,15 +17,13 @@ // Package strategy provides strategy for e2e testing functions package strategy -import "github.com/vdaas/vald/internal/client" +import "github.com/vdaas/vald/internal/client/v1/client" type StreamSearchOption func(*streamSearch) -var ( - defaultStreamSearchOptions = []StreamSearchOption{ - WithStreamSearchConfig(searchCfg), - } -) +var defaultStreamSearchOptions = []StreamSearchOption{ + WithStreamSearchConfig(searchCfg), +} func WithStreamSearchConfig(cfg *client.SearchConfig) StreamSearchOption { return func(ss *streamSearch) { diff --git a/hack/benchmark/internal/e2e/strategy/stream_search_option_test.go b/hack/benchmark/internal/e2e/strategy/stream_search_option_test.go index 2c93d438dc..80c68c753a 100644 --- a/hack/benchmark/internal/e2e/strategy/stream_search_option_test.go +++ b/hack/benchmark/internal/e2e/strategy/stream_search_option_test.go @@ -20,12 +20,13 @@ package strategy import ( "testing" - "github.com/vdaas/vald/internal/client" - + "github.com/vdaas/vald/internal/client/v1/client" "go.uber.org/goleak" ) func TestWithStreamSearchConfig(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { cfg *client.SearchConfig @@ -63,7 +64,7 @@ func TestWithStreamSearchConfig(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithStreamSearchConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithStreamSearchConfig(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithStreamSearchConfig(t *testing.T) { got := WithStreamSearchConfig(test.args.cfg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/hack/benchmark/internal/e2e/strategy/stream_search_test.go b/hack/benchmark/internal/e2e/strategy/stream_search_test.go index 2a459d5d16..0cafd079ce 100644 --- a/hack/benchmark/internal/e2e/strategy/stream_search_test.go +++ b/hack/benchmark/internal/e2e/strategy/stream_search_test.go @@ -24,13 +24,13 @@ import ( "github.com/vdaas/vald/hack/benchmark/internal/assets" "github.com/vdaas/vald/hack/benchmark/internal/e2e" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/internal/client/v1/client" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNewStreamSearch(t *testing.T) { + t.Parallel() type args struct { opts []StreamSearchOption } @@ -79,9 +79,11 @@ func TestNewStreamSearch(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -96,12 +98,12 @@ func TestNewStreamSearch(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_streamSearch_dataProvider(t *testing.T) { + t.Parallel() type args struct { total *uint32 b *testing.B @@ -166,9 +168,11 @@ func Test_streamSearch_dataProvider(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -186,12 +190,12 @@ func Test_streamSearch_dataProvider(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_streamSearch_Run(t *testing.T) { + t.Parallel() type args struct { ctx context.Context b *testing.B @@ -255,9 +259,11 @@ func Test_streamSearch_Run(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } diff --git a/hack/benchmark/internal/starter/agent/core/ngt/ngt_test.go b/hack/benchmark/internal/starter/agent/core/ngt/ngt_test.go index 75405eb84f..64eafbd62c 100644 --- a/hack/benchmark/internal/starter/agent/core/ngt/ngt_test.go +++ b/hack/benchmark/internal/starter/agent/core/ngt/ngt_test.go @@ -95,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -182,7 +181,6 @@ func Test_server_Run(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/internal/starter/agent/core/ngt/option.go b/hack/benchmark/internal/starter/agent/core/ngt/option.go index 00049b85b2..704ba2880a 100644 --- a/hack/benchmark/internal/starter/agent/core/ngt/option.go +++ b/hack/benchmark/internal/starter/agent/core/ngt/option.go @@ -24,67 +24,65 @@ import ( type Option func(*server) -var ( - defaultOptions = []Option{ - WithConfig(&config.Data{ - GlobalConfig: config.GlobalConfig{ - Version: "v0.0.0", - }, - Server: &iconfig.Servers{ - Servers: []*iconfig.Server{ - { - Name: "agent-grpc", - Host: "127.0.0.1", - Port: 8082, - Mode: "GRPC", - ProbeWaitTime: "0s", - HTTP: &iconfig.HTTP{ - ShutdownDuration: "0s", - }, - }, - { - Name: "agent-rest", - Host: "127.0.0.1", - Port: 8081, - Mode: "REST", - ProbeWaitTime: "0s", - HTTP: &iconfig.HTTP{ - ShutdownDuration: "0s", - HandlerTimeout: "60s", - IdleTimeout: "60s", - ReadHeaderTimeout: "60s", - ReadTimeout: "60s", - WriteTimeout: "60s", - }, +var defaultOptions = []Option{ + WithConfig(&config.Data{ + GlobalConfig: config.GlobalConfig{ + Version: "v0.0.0", + }, + Server: &iconfig.Servers{ + Servers: []*iconfig.Server{ + { + Name: "agent-grpc", + Host: "127.0.0.1", + Port: 8082, + Mode: "GRPC", + ProbeWaitTime: "0s", + HTTP: &iconfig.HTTP{ + ShutdownDuration: "0s", }, }, - StartUpStrategy: []string{ - "agent-grpc", - "agent-rest", - }, - ShutdownStrategy: []string{ - "agent-grpc", - "agent-rest", - }, - FullShutdownDuration: "600s", - TLS: &iconfig.TLS{ - Enabled: false, + { + Name: "agent-rest", + Host: "127.0.0.1", + Port: 8081, + Mode: "REST", + ProbeWaitTime: "0s", + HTTP: &iconfig.HTTP{ + ShutdownDuration: "0s", + HandlerTimeout: "60s", + IdleTimeout: "60s", + ReadHeaderTimeout: "60s", + ReadTimeout: "60s", + WriteTimeout: "60s", + }, }, }, - Observability: &iconfig.Observability{ - Enabled: false, + StartUpStrategy: []string{ + "agent-grpc", + "agent-rest", }, - NGT: &iconfig.NGT{ - Dimension: 0, - DistanceType: "unknown", - ObjectType: "unknown", - CreationEdgeSize: 20, - SearchEdgeSize: 10, - EnableInMemoryMode: true, + ShutdownStrategy: []string{ + "agent-grpc", + "agent-rest", }, - }), - } -) + FullShutdownDuration: "600s", + TLS: &iconfig.TLS{ + Enabled: false, + }, + }, + Observability: &iconfig.Observability{ + Enabled: false, + }, + NGT: &iconfig.NGT{ + Dimension: 0, + DistanceType: "unknown", + ObjectType: "unknown", + CreationEdgeSize: 20, + SearchEdgeSize: 10, + EnableInMemoryMode: true, + }, + }), +} func WithConfig(cfg *config.Data) Option { return func(s *server) { diff --git a/hack/benchmark/internal/starter/external/ngtd/ngtd_test.go b/hack/benchmark/internal/starter/external/ngtd/ngtd_test.go index a72e413df3..7aa347bbb6 100644 --- a/hack/benchmark/internal/starter/external/ngtd/ngtd_test.go +++ b/hack/benchmark/internal/starter/external/ngtd/ngtd_test.go @@ -94,7 +94,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -193,7 +192,6 @@ func Test_server_Run(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -279,7 +277,6 @@ func Test_server_createIndexDir(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -365,7 +362,6 @@ func Test_server_clearIndexDir(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/internal/starter/external/ngtd/option.go b/hack/benchmark/internal/starter/external/ngtd/option.go index 9ac534b46b..2e109cbfd2 100644 --- a/hack/benchmark/internal/starter/external/ngtd/option.go +++ b/hack/benchmark/internal/starter/external/ngtd/option.go @@ -19,14 +19,12 @@ package ngtd type Option func(*server) -var ( - defaultOptions = []Option{ - WithDimension(128), - WithIndexDir("/tmp/ngtd/"), - WithServerType(HTTP), - WithPort(8200), - } -) +var defaultOptions = []Option{ + WithDimension(128), + WithIndexDir("/tmp/ngtd/"), + WithServerType(HTTP), + WithPort(8200), +} func WithDimension(dim int) Option { return func(n *server) { diff --git a/hack/benchmark/internal/starter/gateway/vald/option.go b/hack/benchmark/internal/starter/gateway/vald/option.go index f4a11cb252..f5b281c678 100644 --- a/hack/benchmark/internal/starter/gateway/vald/option.go +++ b/hack/benchmark/internal/starter/gateway/vald/option.go @@ -19,6 +19,4 @@ package vald type Option func(*server) -var ( - defaultOptions = []Option{} -) +var defaultOptions = []Option{} diff --git a/hack/benchmark/internal/starter/gateway/vald/vald_test.go b/hack/benchmark/internal/starter/gateway/vald/vald_test.go index a327954b1e..6984d7fe45 100644 --- a/hack/benchmark/internal/starter/gateway/vald/vald_test.go +++ b/hack/benchmark/internal/starter/gateway/vald/vald_test.go @@ -91,7 +91,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -166,7 +165,6 @@ func Test_server_Run(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/hack/benchmark/src/singleflight/singleflight_bench_test.go b/hack/benchmark/src/singleflight/singleflight_bench_test.go index d20c86be67..6f936216fb 100644 --- a/hack/benchmark/src/singleflight/singleflight_bench_test.go +++ b/hack/benchmark/src/singleflight/singleflight_bench_test.go @@ -49,33 +49,29 @@ const ( tryCnt = 5 ) -var ( - durs = []time.Duration{ - time.Microsecond * 10, - time.Microsecond * 100, - time.Microsecond * 200, - time.Microsecond * 500, - time.Millisecond, - time.Millisecond * 5, - time.Millisecond * 10, - time.Millisecond * 25, - time.Millisecond * 50, - time.Millisecond * 100, - time.Millisecond * 250, - time.Millisecond * 500, - } -) +var durs = []time.Duration{ + time.Microsecond * 10, + time.Microsecond * 100, + time.Microsecond * 200, + time.Microsecond * 500, + time.Millisecond, + time.Millisecond * 5, + time.Millisecond * 10, + time.Millisecond * 25, + time.Millisecond * 50, + time.Millisecond * 100, + time.Millisecond * 250, + time.Millisecond * 500, +} func (h *helper) Do(parallel int, b *testing.B) { b.Helper() - var ( - fn = func() (interface{}, error) { - atomic.AddInt64(&h.calledCnt, 1) - time.Sleep(h.sleepDur) - return "", nil - } - ) + fn := func() (interface{}, error) { + atomic.AddInt64(&h.calledCnt, 1) + time.Sleep(h.sleepDur) + return "", nil + } doFn := h.initDoFn() @@ -273,6 +269,9 @@ func toCSV(name string, r []Result) error { } defer f.Close() _, err = fmt.Fprintln(f, "goroutine,duration,hit_rate") + if err != nil { + return err + } for _, res := range r { _, err = fmt.Fprintf(f, "%d,%v,%f\n", res.Goroutine, res.Duration, res.HitRate) if err != nil { diff --git a/hack/go.mod.default b/hack/go.mod.default index b77bed01e7..ad851db3bc 100755 --- a/hack/go.mod.default +++ b/hack/go.mod.default @@ -22,7 +22,7 @@ replace ( github.com/gorilla/websocket => github.com/gorilla/websocket latest github.com/hailocab/go-hostpool => github.com/monzo/go-hostpool latest github.com/klauspost/compress => github.com/klauspost/compress master - github.com/tensorflow/tensorflow => github.com/tensorflow/tensorflow v2.1.0 + github.com/tensorflow/tensorflow => github.com/tensorflow/tensorflow v2.1.2 golang.org/x/crypto => golang.org/x/crypto latest google.golang.org/grpc => google.golang.org/grpc latest google.golang.org/protobuf => google.golang.org/protobuf latest diff --git a/hack/graphql/gqlgen.sh b/hack/graphql/gqlgen.sh deleted file mode 100644 index f1d8a0d3c0..0000000000 --- a/hack/graphql/gqlgen.sh +++ /dev/null @@ -1,71 +0,0 @@ -# -# Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# $1 = directory -# $2 = schema -# $3 = target - -package=$(echo $1 | sed -e 's:/$::' | awk -F "/" '{ print $NF }') -config=apis/graphql/$package/gqlgen.yml - -if [ ! -f $config ]; then - - cat >$config <= b.durationLimit { dur = b.maxDuration @@ -105,7 +127,7 @@ func (b *backoff) Do(ctx context.Context, f func() (interface{}, error)) (res in } } } - return res, nil + return res, err } return res, err } diff --git a/internal/backoff/backoff_test.go b/internal/backoff/backoff_test.go index 4725748a49..cdc0abff6b 100644 --- a/internal/backoff/backoff_test.go +++ b/internal/backoff/backoff_test.go @@ -26,7 +26,6 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" - "go.uber.org/goleak" ) @@ -74,9 +73,10 @@ func TestNew(t *testing.T) { } } -func TestDo(t *testing.T) { +func Test_backoff_Do(t *testing.T) { + t.Parallel() type args struct { - fn func() (interface{}, error) + fn func(context.Context) (interface{}, bool, error) opts []Option } @@ -91,9 +91,9 @@ func TestDo(t *testing.T) { tests := []test{ func() test { cnt := 0 - fn := func() (interface{}, error) { + fn := func(context.Context) (interface{}, bool, error) { cnt++ - return nil, nil + return nil, false, nil } return test{ @@ -124,12 +124,12 @@ func TestDo(t *testing.T) { func() test { cnt := 0 - fn := func() (interface{}, error) { + fn := func(context.Context) (interface{}, bool, error) { cnt++ if cnt == 2 { - return nil, nil + return nil, false, nil } - return nil, errors.Errorf("error (%d)", cnt) + return nil, true, errors.Errorf("error (%d)", cnt) } return test{ @@ -161,9 +161,44 @@ func TestDo(t *testing.T) { func() test { cnt := 0 - fn := func() (interface{}, error) { + err := errors.New("not retryable error") + fn := func(context.Context) (interface{}, bool, error) { + cnt++ + return nil, false, err + } + + return test{ + name: "returns error when retryable is false", + args: args{ + fn: fn, + opts: []Option{ + WithDisableErrorLog(), + WithRetryCount(6), + }, + }, + ctxFn: func() (context.Context, context.CancelFunc) { + return context.WithCancel(context.Background()) + }, + checkFunc: func(got, want error) error { + if cnt != 1 { + return errors.Errorf("error count is wrong, want: %v, got: %v", 1, cnt) + } + + if !errors.Is(want, got) { + return errors.Errorf("not equals. want: %v, got: %v", want, got) + } + + return nil + }, + want: err, + } + }(), + + func() test { + cnt := 0 + fn := func(context.Context) (interface{}, bool, error) { cnt++ - return nil, errors.Errorf("error (%d)", cnt) + return nil, true, errors.Errorf("error (%d)", cnt) } return test{ @@ -196,12 +231,12 @@ func TestDo(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cnt := 0 - fn := func() (interface{}, error) { + fn := func(context.Context) (interface{}, bool, error) { cnt++ if cnt == 2 { cancel() } - return nil, errors.Errorf("error (%d)", cnt) + return nil, true, errors.Errorf("error (%d)", cnt) } return test{ @@ -233,8 +268,8 @@ func TestDo(t *testing.T) { func() test { err := errors.New("error") - fn := func() (interface{}, error) { - return nil, err + fn := func(context.Context) (interface{}, bool, error) { + return nil, true, err } return test{ @@ -262,17 +297,17 @@ func TestDo(t *testing.T) { } log.Init() - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx, cancel := tt.ctxFn() + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + ctx, cancel := test.ctxFn() defer cancel() - - _, err := New(tt.args.opts...).Do(ctx, tt.args.fn) - if tt.want == nil && err != nil { + _, err := New(test.args.opts...).Do(ctx, test.args.fn) + if test.want == nil && err != nil { t.Errorf("Do return err: %v", err) } - if err := tt.checkFunc(err, tt.want); err != nil { + if err := test.checkFunc(err, test.want); err != nil { t.Error(err) } }) @@ -301,134 +336,8 @@ func TestClose(t *testing.T) { } } -func Test_backoff_Do(t *testing.T) { - type args struct { - ctx context.Context - f func() (interface{}, error) - } - type fields struct { - wg sync.WaitGroup - backoffFactor float64 - initialDuration float64 - jittedInitialDuration float64 - jitterLimit float64 - durationLimit float64 - maxDuration float64 - maxRetryCount int - backoffTimeLimit time.Duration - errLog bool - } - type want struct { - wantRes interface{} - err error - } - type test struct { - name string - args args - fields fields - want want - checkFunc func(want, interface{}, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, gotRes interface{}, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) - } - if !reflect.DeepEqual(gotRes, w.wantRes) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - ctx: nil, - f: nil, - }, - fields: fields { - wg: sync.WaitGroup{}, - backoffFactor: 0, - initialDuration: 0, - jittedInitialDuration: 0, - jitterLimit: 0, - durationLimit: 0, - maxDuration: 0, - maxRetryCount: 0, - backoffTimeLimit: nil, - errLog: false, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - ctx: nil, - f: nil, - }, - fields: fields { - wg: sync.WaitGroup{}, - backoffFactor: 0, - initialDuration: 0, - jittedInitialDuration: 0, - jitterLimit: 0, - durationLimit: 0, - maxDuration: 0, - maxRetryCount: 0, - backoffTimeLimit: nil, - errLog: false, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - b := &backoff{ - wg: test.fields.wg, - backoffFactor: test.fields.backoffFactor, - initialDuration: test.fields.initialDuration, - jittedInitialDuration: test.fields.jittedInitialDuration, - jitterLimit: test.fields.jitterLimit, - durationLimit: test.fields.durationLimit, - maxDuration: test.fields.maxDuration, - maxRetryCount: test.fields.maxRetryCount, - backoffTimeLimit: test.fields.backoffTimeLimit, - errLog: test.fields.errLog, - } - - gotRes, err := b.Do(test.args.ctx, test.args.f) - if err := test.checkFunc(test.want, gotRes, err); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - func Test_backoff_addJitter(t *testing.T) { + t.Parallel() type args struct { dur float64 } @@ -514,9 +423,11 @@ func Test_backoff_addJitter(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -543,12 +454,12 @@ func Test_backoff_addJitter(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backoff_Close(t *testing.T) { + t.Parallel() type fields struct { wg sync.WaitGroup backoffFactor float64 @@ -620,9 +531,11 @@ func Test_backoff_Close(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } diff --git a/internal/backoff/option.go b/internal/backoff/option.go index f4d5fbd26d..b6f9285ce3 100644 --- a/internal/backoff/option.go +++ b/internal/backoff/option.go @@ -25,17 +25,15 @@ import ( type Option func(*backoff) -var ( - defaultOpts = []Option{ - WithInitialDuration("10ms"), - WithBackOffTimeLimit("5m"), - WithMaximumDuration("1h"), - WithJitterLimit("1m"), - WithBackOffFactor(1.5), - WithRetryCount(50), - WithEnableErrorLog(), - } -) +var defaultOpts = []Option{ + WithInitialDuration("10ms"), + WithBackOffTimeLimit("5m"), + WithMaximumDuration("1h"), + WithJitterLimit("1m"), + WithBackOffFactor(1.5), + WithRetryCount(50), + WithEnableErrorLog(), +} func WithInitialDuration(dur string) Option { return func(b *backoff) { diff --git a/internal/backoff/option_test.go b/internal/backoff/option_test.go index f259fb8ef2..89d79c7aaa 100644 --- a/internal/backoff/option_test.go +++ b/internal/backoff/option_test.go @@ -20,7 +20,6 @@ import ( "time" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) diff --git a/internal/cache/cache.go b/internal/cache/cache.go index 41c9e4a345..8532cb856f 100644 --- a/internal/cache/cache.go +++ b/internal/cache/cache.go @@ -26,7 +26,7 @@ import ( "github.com/vdaas/vald/internal/errors" ) -// Cache represent the cache interface to store cache +// Cache represent the cache interface to store cache. type Cache interface { Start(context.Context) Get(string) (interface{}, bool) @@ -42,7 +42,7 @@ type cache struct { expiredHook func(context.Context, string) } -// New returns the Cache instance or error +// New returns the Cache instance or error. func New(opts ...Option) (cc Cache, err error) { c := new(cache) for _, opt := range append(defaultOpts, opts...) { diff --git a/internal/cache/cache_test.go b/internal/cache/cache_test.go index bc7056ef3c..9e85c54e78 100644 --- a/internal/cache/cache_test.go +++ b/internal/cache/cache_test.go @@ -22,16 +22,13 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) -var ( - // Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - } -) +// Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), +} func TestNew(t *testing.T) { type args struct { @@ -128,7 +125,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotCc, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/cache/cacher/cacher_test.go b/internal/cache/cacher/cacher_test.go index 26e491fb8f..2ef230f78e 100644 --- a/internal/cache/cacher/cacher_test.go +++ b/internal/cache/cacher/cacher_test.go @@ -82,7 +82,6 @@ func TestType_String(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -172,7 +171,6 @@ func TestToType(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/cache/gache/gache_test.go b/internal/cache/gache/gache_test.go index 4c47376e78..7f8f382af1 100644 --- a/internal/cache/gache/gache_test.go +++ b/internal/cache/gache/gache_test.go @@ -29,12 +29,10 @@ import ( "go.uber.org/goleak" ) -var ( - // Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - } -) +// Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), +} func TestNew(t *testing.T) { type args struct { @@ -567,7 +565,6 @@ func Test_cache_GetAndDelete(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/cache/gache/option_test.go b/internal/cache/gache/option_test.go index bddd758939..ef9c58c050 100644 --- a/internal/cache/gache/option_test.go +++ b/internal/cache/gache/option_test.go @@ -26,7 +26,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/kpango/gache" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) @@ -378,3 +377,107 @@ func TestWithExpireCheckDuration(t *testing.T) { }) } } + +func Test_defaultOptions(t *testing.T) { + // Change interface type to the type of object you are testing + type T = interface{} + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func() + afterFunc func() + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got error = %v, want %v", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got = %v, want %v", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got = %v, want %v", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := defaultOptions() + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := defaultOptions() + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/internal/cache/option.go b/internal/cache/option.go index 2ac50dc94e..d9395f6b96 100644 --- a/internal/cache/option.go +++ b/internal/cache/option.go @@ -27,13 +27,11 @@ import ( // Option represents the functional option for cache. type Option func(*cache) -var ( - defaultOpts = []Option{ - WithType(cacher.GACHE.String()), - WithExpireDuration("30m"), - WithExpireCheckDuration("5m"), - } -) +var defaultOpts = []Option{ + WithType(cacher.GACHE.String()), + WithExpireDuration("30m"), + WithExpireCheckDuration("5m"), +} // WithExpiredHook returns Option after set expiredHook when f is not nil. func WithExpiredHook(f func(context.Context, string)) Option { diff --git a/internal/client/agent/grpc/client.go b/internal/client/agent/grpc/client.go deleted file mode 100644 index 0b1adfce04..0000000000 --- a/internal/client/agent/grpc/client.go +++ /dev/null @@ -1,402 +0,0 @@ -// -// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package grpc provides agent ngt gRPC client functions -package grpc - -import ( - "context" - - agent "github.com/vdaas/vald/apis/grpc/agent/core" - "github.com/vdaas/vald/internal/client" - "github.com/vdaas/vald/internal/net/grpc" -) - -// Client represents agent NGT client interface. -type Client interface { - client.Client - client.ObjectReader - client.Indexer -} - -type agentClient struct { - addr string - opts []grpc.Option - grpc.Client -} - -// New returns Client implementation if no error occurs. -func New(ctx context.Context, opts ...Option) (Client, error) { - c := new(agentClient) - for _, opt := range append(defaultOptions, opts...) { - opt(c) - } - - c.Client = grpc.New(c.opts...) - - if err := c.Client.Connect(ctx, c.addr); err != nil { - return nil, err - } - - return c, nil -} - -func (c *agentClient) Exists( - ctx context.Context, - req *client.ObjectID, -) (*client.ObjectID, error) { - res, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return agent.NewAgentClient(conn).Exists(ctx, req, copts...) - }, - ) - if err != nil { - return nil, err - } - return res.(*client.ObjectID), nil -} - -func (c *agentClient) Search( - ctx context.Context, - req *client.SearchRequest, -) (*client.SearchResponse, error) { - res, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return agent.NewAgentClient(conn).Search(ctx, req, copts...) - }, - ) - if err != nil { - return nil, err - } - return res.(*client.SearchResponse), nil -} - -func (c *agentClient) SearchByID( - ctx context.Context, - req *client.SearchIDRequest, -) (*client.SearchResponse, error) { - res, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return agent.NewAgentClient(conn).SearchByID(ctx, req, copts...) - }, - ) - if err != nil { - return nil, err - } - return res.(*client.SearchResponse), nil -} - -func (c *agentClient) StreamSearch( - ctx context.Context, - dataProvider func() *client.SearchRequest, - f func(*client.SearchResponse, error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (res interface{}, err error) { - var st agent.Agent_StreamSearchClient - - st, err = agent.NewAgentClient(conn).StreamSearch(ctx, copts...) - if err != nil { - return nil, err - } - - return nil, streamSearch(st, - func() interface{} { - return dataProvider() - }, f) - }, - ) - return err -} - -func (c *agentClient) StreamSearchByID( - ctx context.Context, - dataProvider func() *client.SearchIDRequest, - f func(*client.SearchResponse, error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (res interface{}, err error) { - var st agent.Agent_StreamSearchByIDClient - - st, err = agent.NewAgentClient(conn).StreamSearchByID(ctx, copts...) - if err != nil { - return nil, err - } - - return nil, streamSearch(st, - func() interface{} { - return dataProvider() - }, f, - ) - }, - ) - return err -} - -func (c *agentClient) Insert( - ctx context.Context, - req *client.ObjectVector, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return agent.NewAgentClient(conn).Insert(ctx, req, copts...) - }, - ) - return err -} - -func (c *agentClient) StreamInsert( - ctx context.Context, - dataProvider func() *client.ObjectVector, - f func(error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (res interface{}, err error) { - var st agent.Agent_StreamInsertClient - - st, err = agent.NewAgentClient(conn).StreamInsert(ctx, copts...) - if err != nil { - return nil, err - } - - return nil, stream(st, - func() interface{} { - return dataProvider() - }, f, - ) - }, - ) - return err -} - -func (c *agentClient) MultiInsert( - ctx context.Context, - req *client.ObjectVectors, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return agent.NewAgentClient(conn).MultiInsert(ctx, req, copts...) - }, - ) - return err -} - -func (c *agentClient) Update( - ctx context.Context, - req *client.ObjectVector, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return agent.NewAgentClient(conn).Update(ctx, req, copts...) - }, - ) - return err -} - -func (c *agentClient) StreamUpdate( - ctx context.Context, - dataProvider func() *client.ObjectVector, - f func(error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (res interface{}, err error) { - var st agent.Agent_StreamUpdateClient - - st, err = agent.NewAgentClient(conn).StreamUpdate(ctx, copts...) - if err != nil { - return nil, err - } - - return nil, stream(st, - func() interface{} { - return dataProvider() - }, f, - ) - }, - ) - return err -} - -func (c *agentClient) MultiUpdate( - ctx context.Context, - req *client.ObjectVectors, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return agent.NewAgentClient(conn).MultiUpdate(ctx, req, copts...) - }, - ) - return err -} - -func (c *agentClient) Remove( - ctx context.Context, - req *client.ObjectID, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return agent.NewAgentClient(conn).Remove(ctx, req, copts...) - }, - ) - return err -} - -func (c *agentClient) StreamRemove( - ctx context.Context, - dataProvider func() *client.ObjectID, - f func(error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - st, err := agent.NewAgentClient(conn).StreamRemove(ctx, copts...) - if err != nil { - return nil, err - } - - return nil, stream(st, - func() interface{} { - return dataProvider() - }, f, - ) - }, - ) - return err -} - -func (c *agentClient) MultiRemove( - ctx context.Context, - req *client.ObjectIDs, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return agent.NewAgentClient(conn).MultiRemove(ctx, req, copts...) - }, - ) - return err -} - -func (c *agentClient) GetObject( - ctx context.Context, - req *client.ObjectID, -) (*client.ObjectVector, error) { - res, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return agent.NewAgentClient(conn).GetObject(ctx, req, copts...) - }, - ) - if err != nil { - return nil, err - } - return res.(*client.ObjectVector), nil -} - -func (c *agentClient) StreamGetObject( - ctx context.Context, - dataProvider func() *client.ObjectID, - f func(*client.ObjectVector, error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (res interface{}, err error) { - var st agent.Agent_StreamGetObjectClient - - st, err = agent.NewAgentClient(conn).StreamGetObject(ctx, copts...) - if err != nil { - return nil, err - } - - return nil, grpc.BidirectionalStreamClient(st, - func() interface{} { - return dataProvider() - }, func() interface{} { - return new(client.ObjectVector) - }, func(res interface{}, err error) { - f(res.(*client.ObjectVector), err) - }) - }, - ) - return err -} - -func (c *agentClient) CreateIndex( - ctx context.Context, - req *client.ControlCreateIndexRequest, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return agent.NewAgentClient(conn).CreateIndex(ctx, req, copts...) - }, - ) - return err -} - -func (c *agentClient) SaveIndex(ctx context.Context) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return agent.NewAgentClient(conn).SaveIndex(ctx, new(client.Empty), copts...) - }, - ) - return err -} - -func (c *agentClient) CreateAndSaveIndex( - ctx context.Context, - req *client.ControlCreateIndexRequest, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return agent.NewAgentClient(conn).CreateAndSaveIndex(ctx, req, copts...) - }, - ) - return err -} - -func (c *agentClient) IndexInfo(ctx context.Context) (*client.InfoIndex, error) { - res, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return agent.NewAgentClient(conn).IndexInfo(ctx, new(client.Empty), copts...) - }, - ) - if err != nil { - return nil, err - } - return res.(*client.InfoIndex), err -} - -func streamSearch( - st grpc.ClientStream, - dataProvider func() interface{}, - f func(*client.SearchResponse, error), -) error { - return grpc.BidirectionalStreamClient(st, dataProvider, - func() interface{} { - return new(client.SearchResponse) - }, func(res interface{}, err error) { - f(res.(*client.SearchResponse), err) - }) -} - -func stream( - st grpc.ClientStream, - dataProvider func() interface{}, - f func(error), -) error { - return grpc.BidirectionalStreamClient(st, dataProvider, - func() interface{} { - return new(client.Empty) - }, func(_ interface{}, err error) { - f(err) - }) -} diff --git a/internal/client/agent/grpc/option.go b/internal/client/agent/grpc/option.go deleted file mode 100644 index e8e9fab315..0000000000 --- a/internal/client/agent/grpc/option.go +++ /dev/null @@ -1,62 +0,0 @@ -// -// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package grpc provides agent ngt gRPC client functions -package grpc - -import ( - "github.com/vdaas/vald/internal/config" - "github.com/vdaas/vald/internal/net/grpc" -) - -// Option is agentClient configure. -type Option func(*agentClient) - -var ( - defaultOptions = []Option{ - WithAddr("127.0.0.1:8082"), - WithGRPCClientOption( - (&config.GRPCClient{ - Addrs: []string{ - "127.0.0.1:8200", - }, - CallOption: &config.CallOption{ - MaxRecvMsgSize: 100000000000, - }, - DialOption: &config.DialOption{ - Insecure: true, - }, - }).Bind().Opts()...), - } -) - -// WithAddr returns Option that sets addr. -func WithAddr(addr string) Option { - return func(c *agentClient) { - if len(addr) != 0 { - c.addr = addr - } - } -} - -// WithGRPCClientOption returns Option that sets options for gRPC. -func WithGRPCClientOption(opts ...grpc.Option) Option { - return func(c *agentClient) { - if len(opts) != 0 { - c.opts = append(c.opts, opts...) - } - } -} diff --git a/internal/client/agent/rest/client.go b/internal/client/agent/rest/client.go deleted file mode 100644 index d3454c524c..0000000000 --- a/internal/client/agent/rest/client.go +++ /dev/null @@ -1,197 +0,0 @@ -// -// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package rest provides agent ngt REST client functions -package rest - -import ( - "context" - "net/http" - - "github.com/vdaas/vald/internal/client" - "github.com/vdaas/vald/internal/errors" - "github.com/vdaas/vald/internal/net/http/json" -) - -type Client interface { - client.Client - client.ObjectReader - client.Indexer -} - -type agentClient struct { - addr string -} - -func New(ctx context.Context, opts ...Option) Client { - c := new(agentClient) - - for _, opt := range append(defaultOptions, opts...) { - opt(c) - } - - return c -} - -func (c *agentClient) Exists( - ctx context.Context, - req *client.ObjectID, -) (res *client.ObjectID, err error) { - res = new(client.ObjectID) - err = json.Request(ctx, http.MethodGet, c.addr+"/exists/"+req.GetId(), req, res) - return -} - -func (c *agentClient) Search( - ctx context.Context, - req *client.SearchRequest, -) (res *client.SearchResponse, err error) { - res = new(client.SearchResponse) - err = json.Request(ctx, http.MethodPost, c.addr+"/search", req, res) - return -} - -func (c *agentClient) SearchByID( - ctx context.Context, - req *client.SearchIDRequest, -) (res *client.SearchResponse, err error) { - res = new(client.SearchResponse) - err = json.Request(ctx, http.MethodPost, c.addr+"/search/id", req, res) - return -} - -func (c *agentClient) StreamSearch( - ctx context.Context, - dataProvider func() *client.SearchRequest, - f func(*client.SearchResponse, error), -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *agentClient) StreamSearchByID( - ctx context.Context, - dataProvider func() *client.SearchIDRequest, - f func(*client.SearchResponse, error), -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *agentClient) Insert( - ctx context.Context, - req *client.ObjectVector, -) error { - return json.Request(ctx, http.MethodPost, c.addr+"/insert", req, nil) -} - -func (c *agentClient) StreamInsert( - ctx context.Context, - dataProvider func() *client.ObjectVector, - f func(error), -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *agentClient) MultiInsert( - ctx context.Context, - objectVectors *client.ObjectVectors, -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *agentClient) Update( - ctx context.Context, - req *client.ObjectVector, -) error { - return json.Request(ctx, http.MethodPost, c.addr+"/update", req, nil) -} - -func (c *agentClient) StreamUpdate( - ctx context.Context, - dataProvider func() *client.ObjectVector, - f func(error), -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *agentClient) MultiUpdate( - ctx context.Context, - objectVectors *client.ObjectVectors, -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *agentClient) Remove( - ctx context.Context, - req *client.ObjectID, -) error { - return json.Request(ctx, http.MethodDelete, c.addr+"/remove/"+req.GetId(), req, nil) -} - -func (c *agentClient) StreamRemove( - ctx context.Context, - dataProvider func() *client.ObjectID, - f func(error), -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *agentClient) MultiRemove( - ctx context.Context, - req *client.ObjectIDs, -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *agentClient) GetObject( - ctx context.Context, - req *client.ObjectID, -) (res *client.ObjectVector, err error) { - res = new(client.ObjectVector) - err = json.Request(ctx, http.MethodGet, c.addr+"/object/"+req.GetId(), req, res) - return -} - -func (c *agentClient) StreamGetObject( - ctx context.Context, - dataProvider func() *client.ObjectID, - f func(*client.ObjectVector, error), -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *agentClient) CreateIndex( - ctx context.Context, - req *client.ControlCreateIndexRequest, -) error { - return json.Request(ctx, http.MethodGet, c.addr+"/index/create", req, nil) -} - -func (c *agentClient) SaveIndex(ctx context.Context) error { - return json.Request(ctx, http.MethodGet, c.addr+"/index/save", nil, nil) -} - -func (c *agentClient) CreateAndSaveIndex( - ctx context.Context, - req *client.ControlCreateIndexRequest, -) error { - return json.Request(ctx, http.MethodGet, c.addr+"/index/createandsave", nil, nil) -} - -func (c *agentClient) IndexInfo(ctx context.Context) (res *client.InfoIndex, err error) { - res = new(client.InfoIndex) - err = json.Request(ctx, http.MethodGet, c.addr+"/index/info", nil, res) - return -} diff --git a/internal/client/client.go b/internal/client/client.go deleted file mode 100644 index 88fbc25d1d..0000000000 --- a/internal/client/client.go +++ /dev/null @@ -1,89 +0,0 @@ -// -// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package client provides vald component client interfaces -package client - -import ( - "context" - - "github.com/vdaas/vald/apis/grpc/payload" -) - -type ( - ObjectID = payload.Object_ID - ObjectIDs = payload.Object_IDs - ObjectVector = payload.Object_Vector - ObjectVectors = payload.Object_Vectors - SearchRequest = payload.Search_Request - SearchIDRequest = payload.Search_IDRequest - SearchResponse = payload.Search_Response - ControlCreateIndexRequest = payload.Control_CreateIndexRequest - InfoIndex = payload.Info_Index - MetaObject = payload.Backup_MetaVector - Empty = payload.Empty - SearchConfig = payload.Search_Config - ObjectDistance = payload.Object_Distance - BackupMetaVector = payload.Backup_MetaVector -) - -type Client interface { - Reader - Writer -} - -type Reader interface { - Exists(context.Context, *ObjectID) (*ObjectID, error) - Search(context.Context, *SearchRequest) (*SearchResponse, error) - SearchByID(context.Context, *SearchIDRequest) (*SearchResponse, error) - StreamSearch(context.Context, func() *SearchRequest, func(*SearchResponse, error)) error - StreamSearchByID(context.Context, func() *SearchIDRequest, func(*SearchResponse, error)) error -} - -type Writer interface { - Insert(context.Context, *ObjectVector) error - StreamInsert(context.Context, func() *ObjectVector, func(error)) error - MultiInsert(context.Context, *ObjectVectors) error - Update(context.Context, *ObjectVector) error - StreamUpdate(context.Context, func() *ObjectVector, func(error)) error - MultiUpdate(context.Context, *ObjectVectors) error - Remove(context.Context, *ObjectID) error - StreamRemove(context.Context, func() *ObjectID, func(error)) error - MultiRemove(context.Context, *ObjectIDs) error -} - -type Upserter interface { - Upsert(context.Context, *ObjectVector) error - MultiUpsert(context.Context, *ObjectVectors) error - StreamUpsert(context.Context, func() *ObjectVector, func(error)) error -} - -type ObjectReader interface { - GetObject(context.Context, *ObjectID) (*ObjectVector, error) - StreamGetObject(context.Context, func() *ObjectID, func(*ObjectVector, error)) error -} - -type MetaObjectReader interface { - GetObject(context.Context, *ObjectID) (*MetaObject, error) - StreamGetObject(context.Context, func() *ObjectID, func(*MetaObject, error)) error -} - -type Indexer interface { - CreateIndex(context.Context, *ControlCreateIndexRequest) error - SaveIndex(context.Context) error - CreateAndSaveIndex(context.Context, *ControlCreateIndexRequest) error - IndexInfo(context.Context) (*InfoIndex, error) -} diff --git a/internal/client/gateway/vald/grpc/client.go b/internal/client/gateway/vald/grpc/client.go deleted file mode 100644 index 94fdc9fd13..0000000000 --- a/internal/client/gateway/vald/grpc/client.go +++ /dev/null @@ -1,424 +0,0 @@ -// -// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package grpc provides vald gRPC client functions -package grpc - -import ( - "context" - - "github.com/vdaas/vald/apis/grpc/gateway/vald" - "github.com/vdaas/vald/internal/client" - "github.com/vdaas/vald/internal/config" - igrpc "github.com/vdaas/vald/internal/net/grpc" - "google.golang.org/grpc" -) - -// Client represents gateway client interface. -type Client interface { - client.Client - client.MetaObjectReader - client.Upserter -} - -type gatewayClient struct { - addr string - cfg *config.GRPCClient - igrpc.Client -} - -// New returns Client implementation if no error occurs. -func New(ctx context.Context, opts ...Option) (Client, error) { - c := new(gatewayClient) - - for _, opt := range append(defaultOptions, opts...) { - opt(c) - } - - c.Client = igrpc.New(c.cfg.Opts()...) - - if err := c.Client.Connect(ctx, c.addr); err != nil { - return nil, err - } - - return c, nil -} - -func (c *gatewayClient) Exists( - ctx context.Context, - req *client.ObjectID, -) (*client.ObjectID, error) { - res, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).Exists(ctx, req, copts...) - }, - ) - if err != nil { - return nil, err - } - return res.(*client.ObjectID), nil -} - -func (c *gatewayClient) Search( - ctx context.Context, - req *client.SearchRequest, -) (*client.SearchResponse, error) { - res, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).Search(ctx, req, copts...) - }, - ) - if err != nil { - return nil, err - } - return res.(*client.SearchResponse), nil -} - -func (c *gatewayClient) SearchByID( - ctx context.Context, - req *client.SearchIDRequest, -) (*client.SearchResponse, error) { - res, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).SearchByID(ctx, req, copts...) - }, - ) - if err != nil { - return nil, err - } - return res.(*client.SearchResponse), nil -} - -func (c *gatewayClient) StreamSearch( - ctx context.Context, - dataProvider func() *client.SearchRequest, - f func(*client.SearchResponse, error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (res interface{}, err error) { - var st vald.Vald_StreamSearchClient - - st, err = vald.NewValdClient(conn).StreamSearch(ctx, copts...) - if err != nil { - return nil, err - } - - return nil, streamSearch(st, - func() interface{} { - if d := dataProvider(); d != nil { - return d - } - return nil - }, f, - ) - }, - ) - return err -} - -func (c *gatewayClient) StreamSearchByID( - ctx context.Context, - dataProvider func() *client.SearchIDRequest, - f func(*client.SearchResponse, error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (res interface{}, err error) { - var st vald.Vald_StreamSearchByIDClient - - st, err = vald.NewValdClient(conn).StreamSearchByID(ctx, copts...) - if err != nil { - return nil, err - } - - return nil, streamSearch(st, - func() interface{} { - if d := dataProvider(); d != nil { - return d - } - return nil - }, f, - ) - }, - ) - return err -} - -func (c *gatewayClient) Insert( - ctx context.Context, - req *client.ObjectVector, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).Insert(ctx, req, copts...) - }, - ) - return err -} - -func (c *gatewayClient) StreamInsert( - ctx context.Context, - dataProvider func() *client.ObjectVector, - f func(error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (res interface{}, err error) { - var st vald.Vald_StreamInsertClient - - st, err = vald.NewValdClient(conn).StreamInsert(ctx, copts...) - if err != nil { - return nil, err - } - - return nil, stream(st, func() interface{} { - if d := dataProvider(); d != nil { - return d - } - return nil - }, f) - }, - ) - return err -} - -func (c *gatewayClient) MultiInsert( - ctx context.Context, - req *client.ObjectVectors, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).MultiInsert(ctx, req, copts...) - }, - ) - return err -} - -func (c *gatewayClient) Update( - ctx context.Context, - req *client.ObjectVector, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).Update(ctx, req, copts...) - }, - ) - return err -} - -func (c *gatewayClient) StreamUpdate( - ctx context.Context, - dataProvider func() *client.ObjectVector, - f func(error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (res interface{}, err error) { - var st vald.Vald_StreamUpdateClient - - st, err = vald.NewValdClient(conn).StreamUpdate(ctx, copts...) - if err != nil { - return nil, err - } - - return nil, stream(st, func() interface{} { - if d := dataProvider(); d != nil { - return d - } - return nil - }, f) - }, - ) - return err -} - -func (c *gatewayClient) MultiUpdate( - ctx context.Context, - req *client.ObjectVectors, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).MultiUpdate(ctx, req, copts...) - }, - ) - return err -} - -func (c *gatewayClient) Upsert( - ctx context.Context, - req *client.ObjectVector, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).Upsert(ctx, req, copts...) - }, - ) - return err -} - -func (c *gatewayClient) MultiUpsert( - ctx context.Context, - req *client.ObjectVectors, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).MultiUpsert(ctx, req, copts...) - }, - ) - return err -} - -func (c *gatewayClient) StreamUpsert( - ctx context.Context, - dataProvider func() *client.ObjectVector, - f func(error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) { - var st vald.Vald_StreamUpsertClient - - st, err := vald.NewValdClient(conn).StreamUpsert(ctx, copts...) - if err != nil { - return nil, err - } - - return nil, stream(st, - func() interface{} { - if d := dataProvider(); d != nil { - return d - } - return nil - }, f, - ) - }, - ) - return err -} - -func (c *gatewayClient) Remove( - ctx context.Context, - req *client.ObjectID, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).Remove(ctx, req, copts...) - }, - ) - return err -} - -func (c *gatewayClient) StreamRemove( - ctx context.Context, - dataProvider func() *client.ObjectID, - f func(error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (res interface{}, err error) { - var st vald.Vald_StreamRemoveClient - - st, err = vald.NewValdClient(conn).StreamRemove(ctx, copts...) - if err != nil { - return nil, err - } - - return nil, stream(st, - func() interface{} { - return dataProvider() - }, f, - ) - }, - ) - return err -} - -func (c *gatewayClient) MultiRemove( - ctx context.Context, - req *client.ObjectIDs, -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).MultiRemove(ctx, req, copts...) - }, - ) - return err -} - -func (c *gatewayClient) GetObject( - ctx context.Context, - req *client.ObjectID, -) (*client.MetaObject, error) { - res, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).GetObject(ctx, req, copts...) - }, - ) - if err != nil { - return nil, err - } - return res.(*client.MetaObject), err -} - -func (c *gatewayClient) StreamGetObject( - ctx context.Context, - dataProvider func() *client.ObjectID, - f func(*client.MetaObject, error), -) error { - _, err := c.Client.Do(ctx, c.addr, - func(ctx context.Context, conn *igrpc.ClientConn, copts ...igrpc.CallOption) (res interface{}, err error) { - var st vald.Vald_StreamGetObjectClient - - st, err = vald.NewValdClient(conn).StreamGetObject(ctx, copts...) - if err != nil { - return nil, err - } - - return nil, igrpc.BidirectionalStreamClient(st, - func() interface{} { - return dataProvider() - }, func() interface{} { - return new(client.BackupMetaVector) - }, func(res interface{}, err error) { - f(res.(*client.MetaObject), err) - }) - }, - ) - return err -} - -func streamSearch( - st grpc.ClientStream, - dataProvider func() interface{}, - f func(*client.SearchResponse, error), -) error { - return igrpc.BidirectionalStreamClient(st, dataProvider, - func() interface{} { - return new(client.SearchResponse) - }, func(res interface{}, err error) { - f(res.(*client.SearchResponse), err) - }, - ) -} - -func stream( - st grpc.ClientStream, - dataProvider func() interface{}, - f func(error), -) error { - return igrpc.BidirectionalStreamClient(st, dataProvider, - func() interface{} { - return new(client.Empty) - }, func(_ interface{}, err error) { - f(err) - }, - ) -} diff --git a/internal/client/gateway/vald/grpc/option.go b/internal/client/gateway/vald/grpc/option.go deleted file mode 100644 index 2e746f5a43..0000000000 --- a/internal/client/gateway/vald/grpc/option.go +++ /dev/null @@ -1,52 +0,0 @@ -// -// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package grpc provides vald gRPC client functions -package grpc - -import "github.com/vdaas/vald/internal/config" - -// Option is gatewayClient configure. -type Option func(*gatewayClient) - -var ( - defaultOptions = []Option{ - WithAddr("0.0.0.0:8081"), - WithGRPCClientConfig(&config.GRPCClient{ - Addrs: []string{ - "0.0.0.0:8081", - }, - }), - } -) - -// WithAddr returns Option that sets addr. -func WithAddr(addr string) Option { - return func(c *gatewayClient) { - if len(addr) != 0 { - c.addr = addr - } - } -} - -// WithGRPCClientConfig returns Option that sets config. -func WithGRPCClientConfig(cfg *config.GRPCClient) Option { - return func(c *gatewayClient) { - if cfg != nil { - c.cfg = cfg.Bind() - } - } -} diff --git a/internal/client/gateway/vald/rest/client.go b/internal/client/gateway/vald/rest/client.go deleted file mode 100644 index d5f2150c77..0000000000 --- a/internal/client/gateway/vald/rest/client.go +++ /dev/null @@ -1,195 +0,0 @@ -// -// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package rest provides vald REST client functions -package rest - -import ( - "context" - "net/http" - - "github.com/vdaas/vald/internal/client" - "github.com/vdaas/vald/internal/errors" - "github.com/vdaas/vald/internal/net/http/json" -) - -// Client represents gateway client interface. -type Client interface { - client.Client - client.MetaObjectReader - client.Upserter -} - -type gatewayClient struct { - addr string -} - -// New returns Client implementation. -func New(opts ...Option) Client { - c := new(gatewayClient) - for _, opt := range append(defaultOptions, opts...) { - opt(c) - } - return c -} - -func (c *gatewayClient) Exists( - ctx context.Context, - req *client.ObjectID, -) (resp *client.ObjectID, err error) { - resp = new(client.ObjectID) - err = json.Request(ctx, http.MethodGet, c.addr+"/exists/"+req.GetId(), req, resp) - return -} - -func (c *gatewayClient) Search( - ctx context.Context, - req *client.SearchRequest, -) (resp *client.SearchResponse, err error) { - resp = new(client.SearchResponse) - err = json.Request(ctx, http.MethodPost, c.addr+"/search", req, resp) - return -} - -func (c *gatewayClient) SearchByID( - ctx context.Context, - req *client.SearchIDRequest, -) (resp *client.SearchResponse, err error) { - resp = new(client.SearchResponse) - err = json.Request(ctx, http.MethodPost, c.addr+"/search/id", req, resp) - return -} - -func (c *gatewayClient) StreamSearch( - ctx context.Context, - dataProvider func() *client.SearchRequest, - f func(*client.SearchResponse, error), -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *gatewayClient) StreamSearchByID( - ctx context.Context, - dataProvider func() *client.SearchIDRequest, - f func(*client.SearchResponse, error), -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *gatewayClient) Insert( - ctx context.Context, - req *client.ObjectVector, -) error { - return json.Request(ctx, http.MethodPost, c.addr+"/insert", req, nil) -} - -func (c *gatewayClient) StreamInsert( - ctx context.Context, - dataProvider func() *client.ObjectVector, - f func(error), -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *gatewayClient) MultiInsert( - ctx context.Context, - req *client.ObjectVectors, -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *gatewayClient) Update( - ctx context.Context, - req *client.ObjectVector, -) error { - return json.Request(ctx, http.MethodPost, c.addr+"/update", req, nil) -} - -func (c *gatewayClient) StreamUpdate( - ctx context.Context, - dataProvider func() *client.ObjectVector, - f func(error), -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *gatewayClient) MultiUpdate( - ctx context.Context, - req *client.ObjectVectors, -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *gatewayClient) Upsert( - ctx context.Context, - req *client.ObjectVector, -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *gatewayClient) MultiUpsert( - context.Context, - *client.ObjectVectors, -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *gatewayClient) StreamUpsert( - context.Context, - func() *client.ObjectVector, - func(error), -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *gatewayClient) Remove( - ctx context.Context, - req *client.ObjectID, -) error { - return json.Request(ctx, http.MethodDelete, c.addr+"/remove/"+req.GetId(), nil, nil) -} - -func (c *gatewayClient) StreamRemove( - ctx context.Context, - dataProvider func() *client.ObjectID, - f func(error), -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *gatewayClient) MultiRemove( - ctx context.Context, - req *client.ObjectIDs, -) error { - return errors.ErrUnsupportedClientMethod -} - -func (c *gatewayClient) GetObject( - ctx context.Context, - req *client.ObjectID, -) (resp *client.MetaObject, err error) { - resp = new(client.MetaObject) - err = json.Request(ctx, http.MethodGet, c.addr+"/object/"+req.GetId(), nil, nil) - return -} - -func (c *gatewayClient) StreamGetObject( - ctx context.Context, - dataProvider func() *client.ObjectID, - f func(*client.MetaObject, error), -) error { - return errors.ErrUnsupportedClientMethod -} diff --git a/internal/client/v1/client/agent/core/client.go b/internal/client/v1/client/agent/core/client.go new file mode 100644 index 0000000000..a36baa53c5 --- /dev/null +++ b/internal/client/v1/client/agent/core/client.go @@ -0,0 +1,115 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package core provides agent ngt gRPC client functions +package core + +import ( + "context" + + agent "github.com/vdaas/vald/apis/grpc/v1/agent/core" + "github.com/vdaas/vald/internal/client/v1/client" + "github.com/vdaas/vald/internal/client/v1/client/vald" + "github.com/vdaas/vald/internal/net/grpc" +) + +// Client represents agent NGT client interface. +type Client interface { + client.Client + client.ObjectReader + client.Indexer +} + +type agentClient struct { + vald.Client + addr string + c grpc.Client +} + +// New returns Client implementation if no error occurs. +func New(opts ...Option) Client { + c := new(agentClient) + for _, opt := range opts { + opt(c) + } + return &agentClient{ + Client: vald.New( + vald.WithAddr(c.addr), + vald.WithClient(c.c), + ), + addr: c.addr, + c: c.c, + } +} + +func (c *agentClient) CreateIndex( + ctx context.Context, + req *client.ControlCreateIndexRequest, + opts ...grpc.CallOption, +) (*client.Empty, error) { + _, err := c.c.Do(ctx, c.addr, + func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + return agent.NewAgentClient(conn).CreateIndex(ctx, req, copts...) + }, + ) + return nil, err +} + +func (c *agentClient) SaveIndex( + ctx context.Context, + req *client.Empty, + opts ...grpc.CallOption, +) (*client.Empty, error) { + _, err := c.c.Do(ctx, c.addr, + func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + return agent.NewAgentClient(conn).SaveIndex(ctx, new(client.Empty), copts...) + }, + ) + return nil, err +} + +func (c *agentClient) CreateAndSaveIndex( + ctx context.Context, + req *client.ControlCreateIndexRequest, + opts ...grpc.CallOption, +) (*client.Empty, error) { + _, err := c.c.Do(ctx, c.addr, + func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + return agent.NewAgentClient(conn).CreateAndSaveIndex(ctx, req, copts...) + }, + ) + return nil, err +} + +func (c *agentClient) IndexInfo( + ctx context.Context, + req *client.Empty, + opts ...grpc.CallOption, +) (res *client.InfoIndexCount, err error) { + _, err = c.c.Do(ctx, c.addr, + func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + res, err := agent.NewAgentClient(conn).IndexInfo(ctx, new(client.Empty), copts...) + if err != nil { + return nil, err + } + return res, err + }, + ) + if err != nil { + return nil, err + } + return res, nil +} diff --git a/internal/client/v1/client/agent/core/client_test.go b/internal/client/v1/client/agent/core/client_test.go new file mode 100644 index 0000000000..2fbbd3cc20 --- /dev/null +++ b/internal/client/v1/client/agent/core/client_test.go @@ -0,0 +1,519 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package core provides agent ngt gRPC client functions +package core + +import ( + "context" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/client/v1/client" + "github.com/vdaas/vald/internal/client/v1/client/vald" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/net/grpc" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type args struct { + opts []Option + } + type want struct { + want Client + } + type test struct { + name string + args args + want want + checkFunc func(want, Client) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Client) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := New(test.args.opts...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_agentClient_CreateIndex(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + req *client.ControlCreateIndexRequest + opts []grpc.CallOption + } + type fields struct { + Client vald.Client + addr string + c grpc.Client + } + type want struct { + want *client.Empty + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, *client.Empty, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got *client.Empty, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + req: nil, + opts: nil, + }, + fields: fields { + Client: nil, + addr: "", + c: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + req: nil, + opts: nil, + }, + fields: fields { + Client: nil, + addr: "", + c: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &agentClient{ + Client: test.fields.Client, + addr: test.fields.addr, + c: test.fields.c, + } + + got, err := c.CreateIndex(test.args.ctx, test.args.req, test.args.opts...) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_agentClient_SaveIndex(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + req *client.Empty + opts []grpc.CallOption + } + type fields struct { + Client vald.Client + addr string + c grpc.Client + } + type want struct { + want *client.Empty + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, *client.Empty, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got *client.Empty, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + req: nil, + opts: nil, + }, + fields: fields { + Client: nil, + addr: "", + c: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + req: nil, + opts: nil, + }, + fields: fields { + Client: nil, + addr: "", + c: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &agentClient{ + Client: test.fields.Client, + addr: test.fields.addr, + c: test.fields.c, + } + + got, err := c.SaveIndex(test.args.ctx, test.args.req, test.args.opts...) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_agentClient_CreateAndSaveIndex(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + req *client.ControlCreateIndexRequest + opts []grpc.CallOption + } + type fields struct { + Client vald.Client + addr string + c grpc.Client + } + type want struct { + want *client.Empty + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, *client.Empty, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got *client.Empty, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + req: nil, + opts: nil, + }, + fields: fields { + Client: nil, + addr: "", + c: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + req: nil, + opts: nil, + }, + fields: fields { + Client: nil, + addr: "", + c: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &agentClient{ + Client: test.fields.Client, + addr: test.fields.addr, + c: test.fields.c, + } + + got, err := c.CreateAndSaveIndex(test.args.ctx, test.args.req, test.args.opts...) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_agentClient_IndexInfo(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + req *client.Empty + opts []grpc.CallOption + } + type fields struct { + Client vald.Client + addr string + c grpc.Client + } + type want struct { + wantRes *client.InfoIndexCount + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, *client.InfoIndexCount, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotRes *client.InfoIndexCount, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + req: nil, + opts: nil, + }, + fields: fields { + Client: nil, + addr: "", + c: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + req: nil, + opts: nil, + }, + fields: fields { + Client: nil, + addr: "", + c: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &agentClient{ + Client: test.fields.Client, + addr: test.fields.addr, + c: test.fields.c, + } + + gotRes, err := c.IndexInfo(test.args.ctx, test.args.req, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/client/agent/rest/option.go b/internal/client/v1/client/agent/core/option.go similarity index 76% rename from internal/client/agent/rest/option.go rename to internal/client/v1/client/agent/core/option.go index 86c13bf79e..1a0f1c4799 100644 --- a/internal/client/agent/rest/option.go +++ b/internal/client/v1/client/agent/core/option.go @@ -14,21 +14,19 @@ // limitations under the License. // -// Package rest provides agent ngt REST client functions -package rest +// Package core provides agent ngt gRPC client functions +package core +// Option is agentClient configure. type Option func(*agentClient) -var ( - defaultOptions = []Option{ - WithAddr("http://127.0.0.1:8081"), - } -) +var defaultOptions = []Option{} +// WithAddr returns Option that sets addr. func WithAddr(addr string) Option { - return func(ac *agentClient) { + return func(c *agentClient) { if len(addr) != 0 { - ac.addr = addr + c.addr = addr } } } diff --git a/internal/client/agent/rest/option_test.go b/internal/client/v1/client/agent/core/option_test.go similarity index 88% rename from internal/client/agent/rest/option_test.go rename to internal/client/v1/client/agent/core/option_test.go index 3541380e5c..864adb55d9 100644 --- a/internal/client/agent/rest/option_test.go +++ b/internal/client/v1/client/agent/core/option_test.go @@ -14,8 +14,8 @@ // limitations under the License. // -// Package rest provides agent ngt REST client functions -package rest +// Package core provides agent ngt gRPC client functions +package core import ( "testing" @@ -24,6 +24,8 @@ import ( ) func TestWithAddr(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { addr string @@ -61,7 +63,7 @@ func TestWithAddr(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -97,9 +99,11 @@ func TestWithAddr(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -120,7 +124,7 @@ func TestWithAddr(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -128,7 +132,7 @@ func TestWithAddr(t *testing.T) { got := WithAddr(test.args.addr) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/internal/client/v1/client/client.go b/internal/client/v1/client/client.go new file mode 100644 index 0000000000..5176ed0488 --- /dev/null +++ b/internal/client/v1/client/client.go @@ -0,0 +1,80 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package client provides vald component client interfaces +package client + +import ( + "github.com/vdaas/vald/apis/grpc/v1/agent/core" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" +) + +type ( + ObjectID = payload.Object_ID + ObjectIDs = payload.Object_IDs + ObjectVector = payload.Object_Vector + ObjectVectors = payload.Object_Vectors + ObjectLocation = payload.Object_Location + ObjectLocations = payload.Object_Locations + SearchRequest = payload.Search_Request + SearchIDRequest = payload.Search_IDRequest + SearchResponse = payload.Search_Response + SearchResponses = payload.Search_Responses + InsertRequest = payload.Insert_Request + UpdateRequest = payload.Update_Request + UpsertRequest = payload.Upsert_Request + RemoveRequest = payload.Remove_Request + SearchMultiRequest = payload.Search_MultiRequest + SearchIDMultiRequest = payload.Search_MultiIDRequest + InsertMultiRequest = payload.Insert_MultiRequest + UpdateMultiRequest = payload.Update_MultiRequest + UpsertMultiRequest = payload.Upsert_MultiRequest + RemoveMultiRequest = payload.Remove_MultiRequest + ControlCreateIndexRequest = payload.Control_CreateIndexRequest + InfoIndex = payload.Info_Index + InfoIndexCount = payload.Info_Index_Count + MetaObject = payload.Backup_Vector + Empty = payload.Empty + SearchConfig = payload.Search_Config + ObjectDistance = payload.Object_Distance + BackupMetaVector = payload.Backup_Vector + + Searcher = vald.SearchClient + Inserter = vald.InsertClient + Updater = vald.UpdateClient + Upsertor = vald.UpsertClient + Remover = vald.RemoveClient + ObjectReader = vald.ObjectClient + Indexer = core.AgentClient +) + +type Client interface { + Reader + Writer +} + +type Reader interface { + Searcher + ObjectReader +} + +type Writer interface { + Inserter + Updater + Upsertor + Remover +} diff --git a/internal/client/compressor/client.go b/internal/client/v1/client/compressor/client.go similarity index 91% rename from internal/client/compressor/client.go rename to internal/client/v1/client/compressor/client.go index 3ed97ec1d3..adb2c2a243 100644 --- a/internal/client/compressor/client.go +++ b/internal/client/v1/client/compressor/client.go @@ -21,18 +21,18 @@ import ( "context" "reflect" - "github.com/vdaas/vald/apis/grpc/manager/compressor" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/compressor" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" ) type Client interface { Start(ctx context.Context) (<-chan error, error) - GetVector(ctx context.Context, uuid string) (*payload.Backup_MetaVector, error) + GetVector(ctx context.Context, uuid string) (*payload.Backup_Vector, error) GetLocation(ctx context.Context, uuid string) ([]string, error) - Register(ctx context.Context, vec *payload.Backup_MetaVector) error - RegisterMultiple(ctx context.Context, vecs *payload.Backup_MetaVectors) error + Register(ctx context.Context, vec *payload.Backup_Vector) error + RegisterMultiple(ctx context.Context, vecs *payload.Backup_Vectors) error Remove(ctx context.Context, uuid string) error RemoveMultiple(ctx context.Context, uuids ...string) error RegisterIPs(ctx context.Context, ips []string) error @@ -59,7 +59,7 @@ func (c *client) Start(ctx context.Context) (<-chan error, error) { return c.client.StartConnectionMonitor(ctx) } -func (c *client) GetVector(ctx context.Context, uuid string) (vec *payload.Backup_MetaVector, err error) { +func (c *client) GetVector(ctx context.Context, uuid string) (vec *payload.Backup_Vector, err error) { _, err = c.client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { vec, err = compressor.NewBackupClient(conn).GetVector(ctx, &payload.Backup_GetVector_Request{ @@ -88,7 +88,7 @@ func (c *client) GetLocation(ctx context.Context, uuid string) (ipList []string, return } -func (c *client) Register(ctx context.Context, vec *payload.Backup_MetaVector) (err error) { +func (c *client) Register(ctx context.Context, vec *payload.Backup_Vector) (err error) { _, err = c.client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { _, err = compressor.NewBackupClient(conn).Register(ctx, vec, copts...) @@ -100,7 +100,7 @@ func (c *client) Register(ctx context.Context, vec *payload.Backup_MetaVector) ( return } -func (c *client) RegisterMultiple(ctx context.Context, vecs *payload.Backup_MetaVectors) (err error) { +func (c *client) RegisterMultiple(ctx context.Context, vecs *payload.Backup_Vectors) (err error) { _, err = c.client.Do(ctx, c.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { _, err = compressor.NewBackupClient(conn).RegisterMulti(ctx, vecs, copts...) diff --git a/internal/client/compressor/client_test.go b/internal/client/v1/client/compressor/client_test.go similarity index 93% rename from internal/client/compressor/client_test.go rename to internal/client/v1/client/compressor/client_test.go index adfbca79b9..96a3bec365 100644 --- a/internal/client/compressor/client_test.go +++ b/internal/client/v1/client/compressor/client_test.go @@ -22,14 +22,14 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -82,9 +82,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -99,12 +101,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotC, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -170,9 +172,11 @@ func Test_client_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -191,12 +195,12 @@ func Test_client_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_GetVector(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -206,7 +210,7 @@ func Test_client_GetVector(t *testing.T) { client grpc.Client } type want struct { - wantVec *payload.Backup_MetaVector + wantVec *payload.Backup_Vector err error } type test struct { @@ -214,11 +218,11 @@ func Test_client_GetVector(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Backup_MetaVector, error) error + checkFunc func(want, *payload.Backup_Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotVec *payload.Backup_MetaVector, err error) error { + defaultCheckFunc := func(w want, gotVec *payload.Backup_Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -265,9 +269,11 @@ func Test_client_GetVector(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -286,12 +292,12 @@ func Test_client_GetVector(t *testing.T) { if err := test.checkFunc(test.want, gotVec, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_GetLocation(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -360,9 +366,11 @@ func Test_client_GetLocation(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -381,15 +389,15 @@ func Test_client_GetLocation(t *testing.T) { if err := test.checkFunc(test.want, gotIpList, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Register(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - vec *payload.Backup_MetaVector + vec *payload.Backup_Vector } type fields struct { addr string @@ -451,9 +459,11 @@ func Test_client_Register(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -472,15 +482,15 @@ func Test_client_Register(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_RegisterMultiple(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - vecs *payload.Backup_MetaVectors + vecs *payload.Backup_Vectors } type fields struct { addr string @@ -542,9 +552,11 @@ func Test_client_RegisterMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -563,12 +575,12 @@ func Test_client_RegisterMultiple(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Remove(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -633,9 +645,11 @@ func Test_client_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -654,12 +668,12 @@ func Test_client_Remove(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_RemoveMultiple(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuids []string @@ -724,9 +738,11 @@ func Test_client_RemoveMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -745,12 +761,12 @@ func Test_client_RemoveMultiple(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_RegisterIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context ips []string @@ -815,9 +831,11 @@ func Test_client_RegisterIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -836,12 +854,12 @@ func Test_client_RegisterIPs(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_RemoveIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context ips []string @@ -906,9 +924,11 @@ func Test_client_RemoveIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -927,7 +947,6 @@ func Test_client_RemoveIPs(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/client/compressor/option.go b/internal/client/v1/client/compressor/option.go similarity index 93% rename from internal/client/compressor/option.go rename to internal/client/v1/client/compressor/option.go index fda001bef8..1260effecd 100644 --- a/internal/client/compressor/option.go +++ b/internal/client/v1/client/compressor/option.go @@ -23,13 +23,13 @@ import ( type Option func(c *client) error -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithAddr(addr string) Option { return func(c *client) error { - c.addr = addr + if len(addr) != 0 { + c.addr = addr + } return nil } } diff --git a/internal/client/compressor/option_test.go b/internal/client/v1/client/compressor/option_test.go similarity index 89% rename from internal/client/compressor/option_test.go rename to internal/client/v1/client/compressor/option_test.go index 076a5f6c2e..2cf1ec1f3a 100644 --- a/internal/client/compressor/option_test.go +++ b/internal/client/v1/client/compressor/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestWithAddr(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { addr string @@ -63,7 +64,7 @@ func TestWithAddr(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithAddr(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithAddr(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithAddr(t *testing.T) { got := WithAddr(test.args.addr) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -139,6 +142,8 @@ func TestWithAddr(t *testing.T) { } func TestWithClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { gc grpc.Client @@ -176,7 +181,7 @@ func TestWithClient(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -212,9 +217,11 @@ func TestWithClient(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -235,7 +242,7 @@ func TestWithClient(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -243,7 +250,7 @@ func TestWithClient(t *testing.T) { got := WithClient(test.args.gc) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/internal/client/discoverer/discover.go b/internal/client/v1/client/discoverer/discover.go similarity index 98% rename from internal/client/discoverer/discover.go rename to internal/client/v1/client/discoverer/discover.go index 7a9b0480f1..5f15a51e18 100644 --- a/internal/client/discoverer/discover.go +++ b/internal/client/v1/client/discoverer/discover.go @@ -25,8 +25,8 @@ import ( "sync/atomic" "time" - "github.com/vdaas/vald/apis/grpc/discoverer" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/discoverer" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" @@ -292,7 +292,6 @@ func (c *client) discover(ctx context.Context, ech chan<- error) (err error) { } } return nil, nil - }); err != nil { log.Warn("failed to discover addrs from discoverer API, trying to discover from dns...\t" + err.Error()) connected, err = c.dnsDiscovery(ctx, ech) diff --git a/internal/client/discoverer/discover_test.go b/internal/client/v1/client/discoverer/discover_test.go similarity index 97% rename from internal/client/discoverer/discover_test.go rename to internal/client/v1/client/discoverer/discover_test.go index 2c270e29e4..4c069be187 100644 --- a/internal/client/discoverer/discover_test.go +++ b/internal/client/v1/client/discoverer/discover_test.go @@ -27,11 +27,11 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -84,9 +84,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -101,12 +103,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotD, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -214,9 +216,11 @@ func Test_client_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -249,12 +253,12 @@ func Test_client_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_GetAddrs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -358,9 +362,11 @@ func Test_client_GetAddrs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -393,12 +399,12 @@ func Test_client_GetAddrs(t *testing.T) { if err := test.checkFunc(test.want, gotAddrs); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_GetClient(t *testing.T) { + t.Parallel() type fields struct { autoconn bool onDiscover func(ctx context.Context, c Client, addrs []string) error @@ -492,9 +498,11 @@ func Test_client_GetClient(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -527,12 +535,12 @@ func Test_client_GetClient(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_connect(t *testing.T) { + t.Parallel() type args struct { ctx context.Context addr string @@ -639,9 +647,11 @@ func Test_client_connect(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -674,12 +684,12 @@ func Test_client_connect(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_disconnect(t *testing.T) { + t.Parallel() type args struct { ctx context.Context addr string @@ -786,9 +796,11 @@ func Test_client_disconnect(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -821,12 +833,12 @@ func Test_client_disconnect(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_dnsDiscovery(t *testing.T) { + t.Parallel() type args struct { ctx context.Context ech chan<- error @@ -937,9 +949,11 @@ func Test_client_dnsDiscovery(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -972,12 +986,12 @@ func Test_client_dnsDiscovery(t *testing.T) { if err := test.checkFunc(test.want, gotAddrs, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_discover(t *testing.T) { + t.Parallel() type args struct { ctx context.Context ech chan<- error @@ -1084,9 +1098,11 @@ func Test_client_discover(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1119,7 +1135,6 @@ func Test_client_discover(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/client/discoverer/option.go b/internal/client/v1/client/discoverer/option.go similarity index 96% rename from internal/client/discoverer/option.go rename to internal/client/v1/client/discoverer/option.go index 38417ec837..b9e02c2c9c 100644 --- a/internal/client/discoverer/option.go +++ b/internal/client/v1/client/discoverer/option.go @@ -29,13 +29,11 @@ import ( type Option func(c *client) error -var ( - defaultOpts = []Option{ - WithErrGroup(errgroup.Get()), - WithAutoConnect(true), - WithNamespace("vald"), - } -) +var defaultOpts = []Option{ + WithErrGroup(errgroup.Get()), + WithAutoConnect(true), + WithNamespace("vald"), +} func WithOnDiscoverFunc(f func(ctx context.Context, c Client, addrs []string) error) Option { return func(c *client) error { diff --git a/internal/client/discoverer/option_test.go b/internal/client/v1/client/discoverer/option_test.go similarity index 88% rename from internal/client/discoverer/option_test.go rename to internal/client/v1/client/discoverer/option_test.go index 7c17e64261..f2e103d30e 100644 --- a/internal/client/discoverer/option_test.go +++ b/internal/client/v1/client/discoverer/option_test.go @@ -23,11 +23,12 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestWithOnDiscoverFunc(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { f func(ctx context.Context, c Client, addrs []string) error @@ -65,7 +66,7 @@ func TestWithOnDiscoverFunc(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -101,9 +102,11 @@ func TestWithOnDiscoverFunc(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -124,7 +127,7 @@ func TestWithOnDiscoverFunc(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -132,7 +135,7 @@ func TestWithOnDiscoverFunc(t *testing.T) { got := WithOnDiscoverFunc(test.args.f) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -141,6 +144,8 @@ func TestWithOnDiscoverFunc(t *testing.T) { } func TestWithOnConnectFunc(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { f func(ctx context.Context, c Client, addr string) error @@ -178,7 +183,7 @@ func TestWithOnConnectFunc(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -214,9 +219,11 @@ func TestWithOnConnectFunc(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -237,7 +244,7 @@ func TestWithOnConnectFunc(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -245,7 +252,7 @@ func TestWithOnConnectFunc(t *testing.T) { got := WithOnConnectFunc(test.args.f) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -254,6 +261,8 @@ func TestWithOnConnectFunc(t *testing.T) { } func TestWithOnDisconnectFunc(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { f func(ctx context.Context, c Client, addr string) error @@ -291,7 +300,7 @@ func TestWithOnDisconnectFunc(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -327,9 +336,11 @@ func TestWithOnDisconnectFunc(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -350,7 +361,7 @@ func TestWithOnDisconnectFunc(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -358,7 +369,7 @@ func TestWithOnDisconnectFunc(t *testing.T) { got := WithOnDisconnectFunc(test.args.f) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -367,6 +378,8 @@ func TestWithOnDisconnectFunc(t *testing.T) { } func TestWithDiscovererClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { gc grpc.Client @@ -404,7 +417,7 @@ func TestWithDiscovererClient(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -440,9 +453,11 @@ func TestWithDiscovererClient(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -463,7 +478,7 @@ func TestWithDiscovererClient(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -471,7 +486,7 @@ func TestWithDiscovererClient(t *testing.T) { got := WithDiscovererClient(test.args.gc) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -480,6 +495,8 @@ func TestWithDiscovererClient(t *testing.T) { } func TestWithDiscovererAddr(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { addr string @@ -517,7 +534,7 @@ func TestWithDiscovererAddr(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -553,9 +570,11 @@ func TestWithDiscovererAddr(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -576,7 +595,7 @@ func TestWithDiscovererAddr(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -584,7 +603,7 @@ func TestWithDiscovererAddr(t *testing.T) { got := WithDiscovererAddr(test.args.addr) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -593,6 +612,8 @@ func TestWithDiscovererAddr(t *testing.T) { } func TestWithDiscovererHostPort(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { host string @@ -631,7 +652,7 @@ func TestWithDiscovererHostPort(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -669,9 +690,11 @@ func TestWithDiscovererHostPort(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -692,7 +715,7 @@ func TestWithDiscovererHostPort(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -700,7 +723,7 @@ func TestWithDiscovererHostPort(t *testing.T) { got := WithDiscovererHostPort(test.args.host, test.args.port) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -709,6 +732,8 @@ func TestWithDiscovererHostPort(t *testing.T) { } func TestWithDiscoverDuration(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -746,7 +771,7 @@ func TestWithDiscoverDuration(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -782,9 +807,11 @@ func TestWithDiscoverDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -805,7 +832,7 @@ func TestWithDiscoverDuration(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -813,7 +840,7 @@ func TestWithDiscoverDuration(t *testing.T) { got := WithDiscoverDuration(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -822,6 +849,8 @@ func TestWithDiscoverDuration(t *testing.T) { } func TestWithOptions(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { opts []grpc.Option @@ -859,7 +888,7 @@ func TestWithOptions(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -895,9 +924,11 @@ func TestWithOptions(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -918,7 +949,7 @@ func TestWithOptions(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -926,7 +957,7 @@ func TestWithOptions(t *testing.T) { got := WithOptions(test.args.opts...) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -935,6 +966,8 @@ func TestWithOptions(t *testing.T) { } func TestWithAutoConnect(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { flg bool @@ -972,7 +1005,7 @@ func TestWithAutoConnect(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1008,9 +1041,11 @@ func TestWithAutoConnect(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1031,7 +1066,7 @@ func TestWithAutoConnect(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1039,7 +1074,7 @@ func TestWithAutoConnect(t *testing.T) { got := WithAutoConnect(test.args.flg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1048,6 +1083,8 @@ func TestWithAutoConnect(t *testing.T) { } func TestWithName(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { name string @@ -1085,7 +1122,7 @@ func TestWithName(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1121,9 +1158,11 @@ func TestWithName(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1144,7 +1183,7 @@ func TestWithName(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1152,7 +1191,7 @@ func TestWithName(t *testing.T) { got := WithName(test.args.name) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1161,6 +1200,8 @@ func TestWithName(t *testing.T) { } func TestWithNamespace(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { ns string @@ -1198,7 +1239,7 @@ func TestWithNamespace(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1234,9 +1275,11 @@ func TestWithNamespace(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1257,7 +1300,7 @@ func TestWithNamespace(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1265,7 +1308,7 @@ func TestWithNamespace(t *testing.T) { got := WithNamespace(test.args.ns) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1274,6 +1317,8 @@ func TestWithNamespace(t *testing.T) { } func TestWithPort(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { port int @@ -1311,7 +1356,7 @@ func TestWithPort(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1347,9 +1392,11 @@ func TestWithPort(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1370,7 +1417,7 @@ func TestWithPort(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1378,7 +1425,7 @@ func TestWithPort(t *testing.T) { got := WithPort(test.args.port) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1387,6 +1434,8 @@ func TestWithPort(t *testing.T) { } func TestWithServiceDNSARecord(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { a string @@ -1424,7 +1473,7 @@ func TestWithServiceDNSARecord(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1460,9 +1509,11 @@ func TestWithServiceDNSARecord(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1483,7 +1534,7 @@ func TestWithServiceDNSARecord(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1491,7 +1542,7 @@ func TestWithServiceDNSARecord(t *testing.T) { got := WithServiceDNSARecord(test.args.a) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1500,6 +1551,8 @@ func TestWithServiceDNSARecord(t *testing.T) { } func TestWithNodeName(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { nn string @@ -1537,7 +1590,7 @@ func TestWithNodeName(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1573,9 +1626,11 @@ func TestWithNodeName(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1596,7 +1651,7 @@ func TestWithNodeName(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1604,7 +1659,7 @@ func TestWithNodeName(t *testing.T) { got := WithNodeName(test.args.nn) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1613,6 +1668,8 @@ func TestWithNodeName(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -1650,7 +1707,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1686,9 +1743,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1709,7 +1768,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1717,7 +1776,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/internal/client/gateway/vald/rest/option.go b/internal/client/v1/client/vald/option.go similarity index 69% rename from internal/client/gateway/vald/rest/option.go rename to internal/client/v1/client/vald/option.go index 676016a30c..4b192cf913 100644 --- a/internal/client/gateway/vald/rest/option.go +++ b/internal/client/v1/client/vald/option.go @@ -14,23 +14,27 @@ // limitations under the License. // -// Package rest provides vald REST client functions -package rest +// Package vald provides vald grpc client library +package vald -// Option is gatewayClient configure -type Option func(*gatewayClient) +import "github.com/vdaas/vald/internal/net/grpc" -var ( - defaultOptions = []Option{ - WithAddr("http://127.0.0.1:8080"), - } -) +type Option func(*client) + +var defaultOpts = []Option{} -// WithAddr returns Option that sets addr func WithAddr(addr string) Option { - return func(c *gatewayClient) { - if len(addr) != 0 { + return func(c *client) { + if addr != "" { c.addr = addr } } } + +func WithClient(cl grpc.Client) Option { + return func(c *client) { + if cl != nil { + c.c = cl + } + } +} diff --git a/internal/client/agent/grpc/option_test.go b/internal/client/v1/client/vald/option_test.go similarity index 85% rename from internal/client/agent/grpc/option_test.go rename to internal/client/v1/client/vald/option_test.go index 5b1be577f3..f1d45b4cf6 100644 --- a/internal/client/agent/grpc/option_test.go +++ b/internal/client/v1/client/vald/option_test.go @@ -14,18 +14,19 @@ // limitations under the License. // -// Package grpc provides agent ngt gRPC client functions -package grpc +// Package vald provides vald grpc client library +package vald import ( "testing" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestWithAddr(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { addr string @@ -63,7 +64,7 @@ func TestWithAddr(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithAddr(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithAddr(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithAddr(t *testing.T) { got := WithAddr(test.args.addr) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -138,10 +141,12 @@ func TestWithAddr(t *testing.T) { } } -func TestWithGRPCClientOption(t *testing.T) { +func TestWithClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { - opts []grpc.Option + cl grpc.Client } type want struct { obj *T @@ -176,7 +181,7 @@ func TestWithGRPCClientOption(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -188,7 +193,7 @@ func TestWithGRPCClientOption(t *testing.T) { { name: "test_case_1", args: args { - opts: nil, + cl: nil, }, want: want { obj: new(T), @@ -202,7 +207,7 @@ func TestWithGRPCClientOption(t *testing.T) { return test { name: "test_case_2", args: args { - opts: nil, + cl: nil, }, want: want { obj: new(T), @@ -212,9 +217,11 @@ func TestWithGRPCClientOption(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -228,22 +235,22 @@ func TestWithGRPCClientOption(t *testing.T) { test.checkFunc = defaultCheckFunc } - got := WithGRPCClientOption(test.args.opts...) + got := WithClient(test.args.cl) obj := new(T) if err := test.checkFunc(test.want, obj, got(obj)); err != nil { tt.Errorf("error = %v", err) } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - got := WithGRPCClientOption(test.args.opts...) + got := WithClient(test.args.cl) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/internal/client/v1/client/vald/vald.go b/internal/client/v1/client/vald/vald.go new file mode 100644 index 0000000000..6ad393b7e6 --- /dev/null +++ b/internal/client/v1/client/vald/vald.go @@ -0,0 +1,314 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package vald provides vald grpc client library +package vald + +import ( + "context" + + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/net/grpc" +) + +type Client vald.Client + +type client struct { + addr string + c grpc.Client +} + +func New(opts ...Option) Client { + c := new(client) + for _, opt := range opts { + opt(c) + } + return c +} + +func (c *client) Exists(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (oid *payload.Object_ID, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + oid, err = vald.NewValdClient(conn).Exists(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return oid, nil +} + +func (c *client) Search(ctx context.Context, in *payload.Search_Request, opts ...grpc.CallOption) (res *payload.Search_Response, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).Search(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) SearchByID(ctx context.Context, in *payload.Search_IDRequest, opts ...grpc.CallOption) (res *payload.Search_Response, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).SearchByID(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) StreamSearch(ctx context.Context, opts ...grpc.CallOption) (res vald.Search_StreamSearchClient, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).StreamSearch(ctx, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) StreamSearchByID(ctx context.Context, opts ...grpc.CallOption) (res vald.Search_StreamSearchByIDClient, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).StreamSearchByID(ctx, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) MultiSearch(ctx context.Context, in *payload.Search_MultiRequest, opts ...grpc.CallOption) (res *payload.Search_Responses, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).MultiSearch(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) MultiSearchByID(ctx context.Context, in *payload.Search_MultiIDRequest, opts ...grpc.CallOption) (res *payload.Search_Responses, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).MultiSearchByID(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) Insert(ctx context.Context, in *payload.Insert_Request, opts ...grpc.CallOption) (res *payload.Object_Location, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).Insert(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) StreamInsert(ctx context.Context, opts ...grpc.CallOption) (res vald.Insert_StreamInsertClient, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).StreamInsert(ctx, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) MultiInsert(ctx context.Context, in *payload.Insert_MultiRequest, opts ...grpc.CallOption) (res *payload.Object_Locations, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).MultiInsert(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) Update(ctx context.Context, in *payload.Update_Request, opts ...grpc.CallOption) (res *payload.Object_Location, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).Update(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) StreamUpdate(ctx context.Context, opts ...grpc.CallOption) (res vald.Update_StreamUpdateClient, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).StreamUpdate(ctx, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) MultiUpdate(ctx context.Context, in *payload.Update_MultiRequest, opts ...grpc.CallOption) (res *payload.Object_Locations, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).MultiUpdate(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) Upsert(ctx context.Context, in *payload.Upsert_Request, opts ...grpc.CallOption) (res *payload.Object_Location, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).Upsert(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) StreamUpsert(ctx context.Context, opts ...grpc.CallOption) (res vald.Upsert_StreamUpsertClient, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).StreamUpsert(ctx, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) MultiUpsert(ctx context.Context, in *payload.Upsert_MultiRequest, opts ...grpc.CallOption) (res *payload.Object_Locations, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).MultiUpsert(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) Remove(ctx context.Context, in *payload.Remove_Request, opts ...grpc.CallOption) (res *payload.Object_Location, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).Remove(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) StreamRemove(ctx context.Context, opts ...grpc.CallOption) (res vald.Remove_StreamRemoveClient, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).StreamRemove(ctx, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) MultiRemove(ctx context.Context, in *payload.Remove_MultiRequest, opts ...grpc.CallOption) (res *payload.Object_Locations, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).MultiRemove(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) GetObject(ctx context.Context, in *payload.Object_ID, opts ...grpc.CallOption) (res *payload.Object_Vector, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).GetObject(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + +func (c *client) StreamGetObject(ctx context.Context, opts ...grpc.CallOption) (res vald.Object_StreamGetObjectClient, err error) { + _, err = c.c.Do(ctx, c.addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (interface{}, error) { + res, err = vald.NewValdClient(conn).StreamGetObject(ctx, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} diff --git a/internal/client/agent/grpc/client_test.go b/internal/client/v1/client/vald/vald_test.go similarity index 61% rename from internal/client/agent/grpc/client_test.go rename to internal/client/v1/client/vald/vald_test.go index 858de55cdd..53aead5961 100644 --- a/internal/client/agent/grpc/client_test.go +++ b/internal/client/v1/client/vald/vald_test.go @@ -14,42 +14,38 @@ // limitations under the License. // -// Package grpc provides agent ngt gRPC client functions -package grpc +// Package vald provides vald grpc client library +package vald import ( "context" "reflect" "testing" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context opts []Option } type want struct { want Client - err error } type test struct { name string args args want want - checkFunc func(want, Client, error) error + checkFunc func(want, Client) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got Client, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) - } + defaultCheckFunc := func(w want, got Client) error { if !reflect.DeepEqual(got, w.want) { return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } @@ -61,7 +57,6 @@ func TestNew(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, opts: nil, }, want: want{}, @@ -75,7 +70,6 @@ func TestNew(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, opts: nil, }, want: want{}, @@ -85,9 +79,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -98,44 +94,44 @@ func TestNew(t *testing.T) { test.checkFunc = defaultCheckFunc } - got, err := New(test.args.ctx, test.args.opts...) - if err := test.checkFunc(test.want, got, err); err != nil { + got := New(test.args.opts...) + if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_Exists(t *testing.T) { +func Test_client_Exists(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectID + ctx context.Context + in *payload.Object_ID + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - want *client.ObjectID - err error + wantOid *payload.Object_ID + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.ObjectID, error) error + checkFunc func(want, *payload.Object_ID, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.ObjectID, err error) error { + defaultCheckFunc := func(w want, gotOid *payload.Object_ID, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotOid, w.wantOid) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOid, w.wantOid) } return nil } @@ -146,12 +142,12 @@ func Test_agentClient_Exists(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -165,12 +161,12 @@ func Test_agentClient_Exists(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -179,9 +175,11 @@ func Test_agentClient_Exists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -191,50 +189,49 @@ func Test_agentClient_Exists(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - got, err := c.Exists(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotOid, err := c.Exists(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, gotOid, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_Search(t *testing.T) { +func Test_client_Search(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.SearchRequest + ctx context.Context + in *payload.Search_Request + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - want *client.SearchResponse - err error + wantRes *payload.Search_Response + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.SearchResponse, error) error + checkFunc func(want, *payload.Search_Response, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.SearchResponse, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) } return nil } @@ -245,12 +242,12 @@ func Test_agentClient_Search(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -264,12 +261,12 @@ func Test_agentClient_Search(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -278,9 +275,11 @@ func Test_agentClient_Search(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -290,50 +289,49 @@ func Test_agentClient_Search(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - got, err := c.Search(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotRes, err := c.Search(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_SearchByID(t *testing.T) { +func Test_client_SearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.SearchIDRequest + ctx context.Context + in *payload.Search_IDRequest + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - want *client.SearchResponse - err error + wantRes *payload.Search_Response + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.SearchResponse, error) error + checkFunc func(want, *payload.Search_Response, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.SearchResponse, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) } return nil } @@ -344,12 +342,12 @@ func Test_agentClient_SearchByID(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -363,12 +361,12 @@ func Test_agentClient_SearchByID(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -377,9 +375,11 @@ func Test_agentClient_SearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -389,48 +389,49 @@ func Test_agentClient_SearchByID(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - got, err := c.SearchByID(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotRes, err := c.SearchByID(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_StreamSearch(t *testing.T) { +func Test_client_StreamSearch(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.SearchRequest - f func(*client.SearchResponse, error) + ctx context.Context + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes vald.Search_StreamSearchClient + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, vald.Search_StreamSearchClient, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes vald.Search_StreamSearchClient, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -440,13 +441,11 @@ func Test_agentClient_StreamSearch(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, - f: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -460,13 +459,11 @@ func Test_agentClient_StreamSearch(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, - f: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -475,9 +472,11 @@ func Test_agentClient_StreamSearch(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -487,48 +486,49 @@ func Test_agentClient_StreamSearch(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.StreamSearch(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.StreamSearch(test.args.ctx, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_StreamSearchByID(t *testing.T) { +func Test_client_StreamSearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.SearchIDRequest - f func(*client.SearchResponse, error) + ctx context.Context + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes vald.Search_StreamSearchByIDClient + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, vald.Search_StreamSearchByIDClient, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes vald.Search_StreamSearchByIDClient, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -538,13 +538,11 @@ func Test_agentClient_StreamSearchByID(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, - f: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -558,13 +556,11 @@ func Test_agentClient_StreamSearchByID(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, - f: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -573,9 +569,11 @@ func Test_agentClient_StreamSearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -585,47 +583,50 @@ func Test_agentClient_StreamSearchByID(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.StreamSearchByID(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.StreamSearchByID(test.args.ctx, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_Insert(t *testing.T) { +func Test_client_MultiSearch(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVector + ctx context.Context + in *payload.Search_MultiRequest + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes *payload.Search_Responses + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Search_Responses, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Responses, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -635,12 +636,12 @@ func Test_agentClient_Insert(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -654,12 +655,12 @@ func Test_agentClient_Insert(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -668,9 +669,11 @@ func Test_agentClient_Insert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -680,48 +683,50 @@ func Test_agentClient_Insert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.Insert(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.MultiSearch(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_StreamInsert(t *testing.T) { +func Test_client_MultiSearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectVector - f func(error) + ctx context.Context + in *payload.Search_MultiIDRequest + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes *payload.Search_Responses + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Search_Responses, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Responses, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -731,13 +736,12 @@ func Test_agentClient_StreamInsert(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, - f: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -751,13 +755,12 @@ func Test_agentClient_StreamInsert(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, - f: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -766,9 +769,11 @@ func Test_agentClient_StreamInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -778,47 +783,50 @@ func Test_agentClient_StreamInsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.StreamInsert(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.MultiSearchByID(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_MultiInsert(t *testing.T) { +func Test_client_Insert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVectors + ctx context.Context + in *payload.Insert_Request + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -828,12 +836,12 @@ func Test_agentClient_MultiInsert(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -847,12 +855,12 @@ func Test_agentClient_MultiInsert(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -861,9 +869,11 @@ func Test_agentClient_MultiInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -873,47 +883,49 @@ func Test_agentClient_MultiInsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.MultiInsert(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.Insert(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_Update(t *testing.T) { +func Test_client_StreamInsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVector + ctx context.Context + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes vald.Insert_StreamInsertClient + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, vald.Insert_StreamInsertClient, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes vald.Insert_StreamInsertClient, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -923,12 +935,11 @@ func Test_agentClient_Update(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -942,12 +953,11 @@ func Test_agentClient_Update(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -956,9 +966,11 @@ func Test_agentClient_Update(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -968,48 +980,50 @@ func Test_agentClient_Update(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.Update(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.StreamInsert(test.args.ctx, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_StreamUpdate(t *testing.T) { +func Test_client_MultiInsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectVector - f func(error) + ctx context.Context + in *payload.Insert_MultiRequest + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1019,13 +1033,12 @@ func Test_agentClient_StreamUpdate(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, - f: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1039,13 +1052,12 @@ func Test_agentClient_StreamUpdate(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, - f: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1054,9 +1066,11 @@ func Test_agentClient_StreamUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1066,47 +1080,50 @@ func Test_agentClient_StreamUpdate(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.StreamUpdate(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.MultiInsert(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_MultiUpdate(t *testing.T) { +func Test_client_Update(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVectors + ctx context.Context + in *payload.Update_Request + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1116,12 +1133,12 @@ func Test_agentClient_MultiUpdate(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1135,12 +1152,12 @@ func Test_agentClient_MultiUpdate(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1149,9 +1166,11 @@ func Test_agentClient_MultiUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1161,47 +1180,49 @@ func Test_agentClient_MultiUpdate(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.MultiUpdate(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.Update(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_Remove(t *testing.T) { +func Test_client_StreamUpdate(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectID + ctx context.Context + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes vald.Update_StreamUpdateClient + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, vald.Update_StreamUpdateClient, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes vald.Update_StreamUpdateClient, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1211,12 +1232,11 @@ func Test_agentClient_Remove(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1230,12 +1250,11 @@ func Test_agentClient_Remove(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1244,9 +1263,11 @@ func Test_agentClient_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1256,48 +1277,50 @@ func Test_agentClient_Remove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.Remove(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.StreamUpdate(test.args.ctx, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_StreamRemove(t *testing.T) { +func Test_client_MultiUpdate(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectID - f func(error) + ctx context.Context + in *payload.Update_MultiRequest + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1307,13 +1330,12 @@ func Test_agentClient_StreamRemove(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, - f: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1327,13 +1349,12 @@ func Test_agentClient_StreamRemove(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, - f: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1342,9 +1363,11 @@ func Test_agentClient_StreamRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1354,47 +1377,50 @@ func Test_agentClient_StreamRemove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.StreamRemove(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.MultiUpdate(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_MultiRemove(t *testing.T) { +func Test_client_Upsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectIDs + ctx context.Context + in *payload.Upsert_Request + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1404,12 +1430,12 @@ func Test_agentClient_MultiRemove(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1423,12 +1449,12 @@ func Test_agentClient_MultiRemove(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1437,9 +1463,11 @@ func Test_agentClient_MultiRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1449,50 +1477,48 @@ func Test_agentClient_MultiRemove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.MultiRemove(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.Upsert(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_GetObject(t *testing.T) { +func Test_client_StreamUpsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectID + ctx context.Context + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - want *client.ObjectVector - err error + wantRes vald.Upsert_StreamUpsertClient + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.ObjectVector, error) error + checkFunc func(want, vald.Upsert_StreamUpsertClient, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.ObjectVector, err error) error { + defaultCheckFunc := func(w want, gotRes vald.Upsert_StreamUpsertClient, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) } return nil } @@ -1503,12 +1529,11 @@ func Test_agentClient_GetObject(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1522,12 +1547,11 @@ func Test_agentClient_GetObject(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1536,9 +1560,11 @@ func Test_agentClient_GetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1548,48 +1574,50 @@ func Test_agentClient_GetObject(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - got, err := c.GetObject(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotRes, err := c.StreamUpsert(test.args.ctx, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_StreamGetObject(t *testing.T) { +func Test_client_MultiUpsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectID - f func(*client.ObjectVector, error) + ctx context.Context + in *payload.Upsert_MultiRequest + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1599,13 +1627,12 @@ func Test_agentClient_StreamGetObject(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, - f: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1619,13 +1646,12 @@ func Test_agentClient_StreamGetObject(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, - f: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1634,9 +1660,11 @@ func Test_agentClient_StreamGetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1646,47 +1674,50 @@ func Test_agentClient_StreamGetObject(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.StreamGetObject(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.MultiUpsert(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_CreateIndex(t *testing.T) { +func Test_client_Remove(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ControlCreateIndexRequest + ctx context.Context + in *payload.Remove_Request + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1696,12 +1727,12 @@ func Test_agentClient_CreateIndex(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1715,12 +1746,12 @@ func Test_agentClient_CreateIndex(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1729,9 +1760,11 @@ func Test_agentClient_CreateIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1741,46 +1774,49 @@ func Test_agentClient_CreateIndex(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.CreateIndex(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.Remove(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_SaveIndex(t *testing.T) { +func Test_client_StreamRemove(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context + ctx context.Context + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes vald.Remove_StreamRemoveClient + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, vald.Remove_StreamRemoveClient, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes vald.Remove_StreamRemoveClient, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1790,11 +1826,11 @@ func Test_agentClient_SaveIndex(t *testing.T) { name: "test_case_1", args: args { ctx: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1808,11 +1844,11 @@ func Test_agentClient_SaveIndex(t *testing.T) { name: "test_case_2", args: args { ctx: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1821,9 +1857,11 @@ func Test_agentClient_SaveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1833,47 +1871,50 @@ func Test_agentClient_SaveIndex(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.SaveIndex(test.args.ctx) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.StreamRemove(test.args.ctx, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_CreateAndSaveIndex(t *testing.T) { +func Test_client_MultiRemove(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ControlCreateIndexRequest + ctx context.Context + in *payload.Remove_MultiRequest + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - err error + wantRes *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1883,12 +1924,12 @@ func Test_agentClient_CreateAndSaveIndex(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1902,12 +1943,12 @@ func Test_agentClient_CreateAndSaveIndex(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1916,9 +1957,11 @@ func Test_agentClient_CreateAndSaveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1928,49 +1971,49 @@ func Test_agentClient_CreateAndSaveIndex(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - err := c.CreateAndSaveIndex(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.MultiRemove(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_IndexInfo(t *testing.T) { +func Test_client_GetObject(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context + ctx context.Context + in *payload.Object_ID + opts []grpc.CallOption } type fields struct { - addr string - opts []grpc.Option - Client grpc.Client + addr string + c grpc.Client } type want struct { - want *client.InfoIndex - err error + wantRes *payload.Object_Vector + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.InfoIndex, error) error + checkFunc func(want, *payload.Object_Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.InfoIndex, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) } return nil } @@ -1981,11 +2024,12 @@ func Test_agentClient_IndexInfo(t *testing.T) { name: "test_case_1", args: args { ctx: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1999,11 +2043,12 @@ func Test_agentClient_IndexInfo(t *testing.T) { name: "test_case_2", args: args { ctx: nil, + in: nil, + opts: nil, }, fields: fields { addr: "", - opts: nil, - Client: nil, + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -2012,9 +2057,11 @@ func Test_agentClient_IndexInfo(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2024,119 +2071,49 @@ func Test_agentClient_IndexInfo(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, - opts: test.fields.opts, - Client: test.fields.Client, + c := &client{ + addr: test.fields.addr, + c: test.fields.c, } - got, err := c.IndexInfo(test.args.ctx) - if err := test.checkFunc(test.want, got, err); err != nil { + gotRes, err := c.GetObject(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_streamSearch(t *testing.T) { +func Test_client_StreamGetObject(t *testing.T) { + t.Parallel() type args struct { - st grpc.ClientStream - dataProvider func() interface{} - f func(*client.SearchResponse, error) - } - type want struct { - err error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - st: nil, - dataProvider: nil, - f: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - st: nil, - dataProvider: nil, - f: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - err := streamSearch(test.args.st, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { - tt.Errorf("error = %v", err) - } - - }) + ctx context.Context + opts []grpc.CallOption } -} - -func Test_stream(t *testing.T) { - type args struct { - st grpc.ClientStream - dataProvider func() interface{} - f func(error) + type fields struct { + addr string + c grpc.Client } type want struct { - err error + wantRes vald.Object_StreamGetObjectClient + err error } type test struct { name string args args + fields fields want want - checkFunc func(want, error) error + checkFunc func(want, vald.Object_StreamGetObjectClient, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes vald.Object_StreamGetObjectClient, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -2145,9 +2122,12 @@ func Test_stream(t *testing.T) { { name: "test_case_1", args: args { - st: nil, - dataProvider: nil, - f: nil, + ctx: nil, + opts: nil, + }, + fields: fields { + addr: "", + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -2160,9 +2140,12 @@ func Test_stream(t *testing.T) { return test { name: "test_case_2", args: args { - st: nil, - dataProvider: nil, - f: nil, + ctx: nil, + opts: nil, + }, + fields: fields { + addr: "", + c: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -2171,9 +2154,11 @@ func Test_stream(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2183,12 +2168,15 @@ func Test_stream(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } + c := &client{ + addr: test.fields.addr, + c: test.fields.c, + } - err := stream(test.args.st, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := c.StreamGetObject(test.args.ctx, test.args.opts...) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/compress/gob/gob_mock_test.go b/internal/compress/gob/gob_mock_test.go new file mode 100644 index 0000000000..321a6acca7 --- /dev/null +++ b/internal/compress/gob/gob_mock_test.go @@ -0,0 +1,373 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package gob + +import ( + "bytes" + "io" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestMockEncoder_Encode(t *testing.T) { + t.Parallel() + type args struct { + e interface{} + } + type fields struct { + EncodeFunc func(e interface{}) error + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + e: nil, + }, + fields: fields { + EncodeFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + e: nil, + }, + fields: fields { + EncodeFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockEncoder{ + EncodeFunc: test.fields.EncodeFunc, + } + + err := m.Encode(test.args.e) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockDecoder_Decode(t *testing.T) { + t.Parallel() + type args struct { + e interface{} + } + type fields struct { + DecodeFunc func(e interface{}) error + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + e: nil, + }, + fields: fields { + DecodeFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + e: nil, + }, + fields: fields { + DecodeFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockDecoder{ + DecodeFunc: test.fields.DecodeFunc, + } + + err := m.Decode(test.args.e) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockTranscoder_NewEncoder(t *testing.T) { + t.Parallel() + type fields struct { + NewEncoderFunc func(w io.Writer) Encoder + NewDecoderFunc func(r io.Reader) Decoder + } + type want struct { + want Encoder + wantW string + } + type test struct { + name string + fields fields + want want + checkFunc func(want, Encoder, string) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got Encoder, gotW string) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + if !reflect.DeepEqual(gotW, w.wantW) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotW, w.wantW) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + NewEncoderFunc: nil, + NewDecoderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + NewEncoderFunc: nil, + NewDecoderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockTranscoder{ + NewEncoderFunc: test.fields.NewEncoderFunc, + NewDecoderFunc: test.fields.NewDecoderFunc, + } + w := &bytes.Buffer{} + + got := m.NewEncoder(w) + if err := test.checkFunc(test.want, got, w.String()); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockTranscoder_NewDecoder(t *testing.T) { + t.Parallel() + type args struct { + r io.Reader + } + type fields struct { + NewEncoderFunc func(w io.Writer) Encoder + NewDecoderFunc func(r io.Reader) Decoder + } + type want struct { + want Decoder + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, Decoder) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Decoder) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + r: nil, + }, + fields: fields { + NewEncoderFunc: nil, + NewDecoderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + r: nil, + }, + fields: fields { + NewEncoderFunc: nil, + NewDecoderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockTranscoder{ + NewEncoderFunc: test.fields.NewEncoderFunc, + NewDecoderFunc: test.fields.NewDecoderFunc, + } + + got := m.NewDecoder(test.args.r) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/compress/gob/gob_test.go b/internal/compress/gob/gob_test.go new file mode 100644 index 0000000000..cee4e71639 --- /dev/null +++ b/internal/compress/gob/gob_test.go @@ -0,0 +1,234 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package gob + +import ( + "bytes" + "io" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type want struct { + want Transcoder + } + type test struct { + name string + want want + checkFunc func(want, Transcoder) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got Transcoder) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := New() + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_transcoder_NewEncoder(t *testing.T) { + t.Parallel() + type want struct { + want Encoder + wantW string + } + type test struct { + name string + t *transcoder + want want + checkFunc func(want, Encoder, string) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got Encoder, gotW string) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + if !reflect.DeepEqual(gotW, w.wantW) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotW, w.wantW) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + t := &transcoder{} + w := &bytes.Buffer{} + + got := t.NewEncoder(w) + if err := test.checkFunc(test.want, got, w.String()); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_transcoder_NewDecoder(t *testing.T) { + t.Parallel() + type args struct { + r io.Reader + } + type want struct { + want Decoder + } + type test struct { + name string + args args + t *transcoder + want want + checkFunc func(want, Decoder) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Decoder) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + t := &transcoder{} + + got := t.NewDecoder(test.args.r) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/compress/gob_option.go b/internal/compress/gob_option.go index 6373139d02..7900ce1d3f 100644 --- a/internal/compress/gob_option.go +++ b/internal/compress/gob_option.go @@ -19,6 +19,4 @@ package compress type GobOption func(c *gobCompressor) error -var ( - defaultGobOpts = []GobOption{} -) +var defaultGobOpts = []GobOption{} diff --git a/internal/compress/gob_test.go b/internal/compress/gob_test.go index 577bd3cec8..79d378d1bb 100644 --- a/internal/compress/gob_test.go +++ b/internal/compress/gob_test.go @@ -22,10 +22,9 @@ import ( "reflect" "testing" - "go.uber.org/goleak" - "github.com/vdaas/vald/internal/compress/gob" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNewGob(t *testing.T) { @@ -455,7 +454,6 @@ func Test_gobCompressor_Reader(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/compress/gzip.go b/internal/compress/gzip.go index 416d434334..fd80e1fde2 100644 --- a/internal/compress/gzip.go +++ b/internal/compress/gzip.go @@ -144,7 +144,7 @@ type gzipWriter struct { w io.WriteCloser } -// Write writes len(p) bytes from p +// Write writes len(p) bytes from p. func (g *gzipWriter) Write(p []byte) (n int, err error) { return g.w.Write(p) } diff --git a/internal/compress/gzip/gzip_mock_test.go b/internal/compress/gzip/gzip_mock_test.go new file mode 100644 index 0000000000..abec2939e5 --- /dev/null +++ b/internal/compress/gzip/gzip_mock_test.go @@ -0,0 +1,968 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package gzip + +import ( + "bytes" + "io" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestMockReader_Read(t *testing.T) { + t.Parallel() + type args struct { + p []byte + } + type fields struct { + ReadFunc func(p []byte) (n int, err error) + CloseFunc func() error + ResetFunc func(r io.Reader) error + MultistreamFunc func(ok bool) + } + type want struct { + wantN int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotN int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotN, w.wantN) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotN, w.wantN) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + p: nil, + }, + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + MultistreamFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + p: nil, + }, + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + MultistreamFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockReader{ + ReadFunc: test.fields.ReadFunc, + CloseFunc: test.fields.CloseFunc, + ResetFunc: test.fields.ResetFunc, + MultistreamFunc: test.fields.MultistreamFunc, + } + + gotN, err := m.Read(test.args.p) + if err := test.checkFunc(test.want, gotN, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockReader_Close(t *testing.T) { + t.Parallel() + type fields struct { + ReadFunc func(p []byte) (n int, err error) + CloseFunc func() error + ResetFunc func(r io.Reader) error + MultistreamFunc func(ok bool) + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + MultistreamFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + MultistreamFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockReader{ + ReadFunc: test.fields.ReadFunc, + CloseFunc: test.fields.CloseFunc, + ResetFunc: test.fields.ResetFunc, + MultistreamFunc: test.fields.MultistreamFunc, + } + + err := m.Close() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockReader_Reset(t *testing.T) { + t.Parallel() + type args struct { + r io.Reader + } + type fields struct { + ReadFunc func(p []byte) (n int, err error) + CloseFunc func() error + ResetFunc func(r io.Reader) error + MultistreamFunc func(ok bool) + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + r: nil, + }, + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + MultistreamFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + r: nil, + }, + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + MultistreamFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockReader{ + ReadFunc: test.fields.ReadFunc, + CloseFunc: test.fields.CloseFunc, + ResetFunc: test.fields.ResetFunc, + MultistreamFunc: test.fields.MultistreamFunc, + } + + err := m.Reset(test.args.r) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockReader_Multistream(t *testing.T) { + t.Parallel() + type args struct { + ok bool + } + type fields struct { + ReadFunc func(p []byte) (n int, err error) + CloseFunc func() error + ResetFunc func(r io.Reader) error + MultistreamFunc func(ok bool) + } + type want struct { + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want) error { + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ok: false, + }, + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + MultistreamFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ok: false, + }, + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + MultistreamFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockReader{ + ReadFunc: test.fields.ReadFunc, + CloseFunc: test.fields.CloseFunc, + ResetFunc: test.fields.ResetFunc, + MultistreamFunc: test.fields.MultistreamFunc, + } + + m.Multistream(test.args.ok) + if err := test.checkFunc(test.want); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockWriter_Write(t *testing.T) { + t.Parallel() + type args struct { + p []byte + } + type fields struct { + WriteFunc func(p []byte) (n int, err error) + CloseFunc func() error + ResetFunc func(w io.Writer) + FlushFunc func() error + } + type want struct { + wantN int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotN int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotN, w.wantN) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotN, w.wantN) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + p: nil, + }, + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + p: nil, + }, + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockWriter{ + WriteFunc: test.fields.WriteFunc, + CloseFunc: test.fields.CloseFunc, + ResetFunc: test.fields.ResetFunc, + FlushFunc: test.fields.FlushFunc, + } + + gotN, err := m.Write(test.args.p) + if err := test.checkFunc(test.want, gotN, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockWriter_Close(t *testing.T) { + t.Parallel() + type fields struct { + WriteFunc func(p []byte) (n int, err error) + CloseFunc func() error + ResetFunc func(w io.Writer) + FlushFunc func() error + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockWriter{ + WriteFunc: test.fields.WriteFunc, + CloseFunc: test.fields.CloseFunc, + ResetFunc: test.fields.ResetFunc, + FlushFunc: test.fields.FlushFunc, + } + + err := m.Close() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockWriter_Reset(t *testing.T) { + t.Parallel() + type fields struct { + WriteFunc func(p []byte) (n int, err error) + CloseFunc func() error + ResetFunc func(w io.Writer) + FlushFunc func() error + } + type want struct { + wantW string + } + type test struct { + name string + fields fields + want want + checkFunc func(want, string) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, gotW string) error { + if !reflect.DeepEqual(gotW, w.wantW) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotW, w.wantW) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockWriter{ + WriteFunc: test.fields.WriteFunc, + CloseFunc: test.fields.CloseFunc, + ResetFunc: test.fields.ResetFunc, + FlushFunc: test.fields.FlushFunc, + } + w := &bytes.Buffer{} + + m.Reset(w) + if err := test.checkFunc(test.want, w.String()); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockWriter_Flush(t *testing.T) { + t.Parallel() + type fields struct { + WriteFunc func(p []byte) (n int, err error) + CloseFunc func() error + ResetFunc func(w io.Writer) + FlushFunc func() error + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + ResetFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockWriter{ + WriteFunc: test.fields.WriteFunc, + CloseFunc: test.fields.CloseFunc, + ResetFunc: test.fields.ResetFunc, + FlushFunc: test.fields.FlushFunc, + } + + err := m.Flush() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockGzip_NewWriterLevel(t *testing.T) { + t.Parallel() + type args struct { + level int + } + type fields struct { + NewWriterLevelFunc func(w io.Writer, level int) (Writer, error) + NewReaderFunc func(r io.Reader) (Reader, error) + } + type want struct { + want Writer + wantW string + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, Writer, string, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Writer, gotW string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + if !reflect.DeepEqual(gotW, w.wantW) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotW, w.wantW) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + level: 0, + }, + fields: fields { + NewWriterLevelFunc: nil, + NewReaderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + level: 0, + }, + fields: fields { + NewWriterLevelFunc: nil, + NewReaderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockGzip{ + NewWriterLevelFunc: test.fields.NewWriterLevelFunc, + NewReaderFunc: test.fields.NewReaderFunc, + } + w := &bytes.Buffer{} + + got, err := m.NewWriterLevel(w, test.args.level) + if err := test.checkFunc(test.want, got, w.String(), err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockGzip_NewReader(t *testing.T) { + t.Parallel() + type args struct { + r io.Reader + } + type fields struct { + NewWriterLevelFunc func(w io.Writer, level int) (Writer, error) + NewReaderFunc func(r io.Reader) (Reader, error) + } + type want struct { + want Reader + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, Reader, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Reader, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + r: nil, + }, + fields: fields { + NewWriterLevelFunc: nil, + NewReaderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + r: nil, + }, + fields: fields { + NewWriterLevelFunc: nil, + NewReaderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockGzip{ + NewWriterLevelFunc: test.fields.NewWriterLevelFunc, + NewReaderFunc: test.fields.NewReaderFunc, + } + + got, err := m.NewReader(test.args.r) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/compress/gzip/gzip_test.go b/internal/compress/gzip/gzip_test.go new file mode 100644 index 0000000000..b4005c5f98 --- /dev/null +++ b/internal/compress/gzip/gzip_test.go @@ -0,0 +1,252 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package gzip + +import ( + "bytes" + "io" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type want struct { + want Gzip + } + type test struct { + name string + want want + checkFunc func(want, Gzip) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got Gzip) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := New() + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_compress_NewWriterLevel(t *testing.T) { + t.Parallel() + type args struct { + level int + } + type want struct { + want Writer + wantW string + err error + } + type test struct { + name string + args args + c *compress + want want + checkFunc func(want, Writer, string, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Writer, gotW string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + if !reflect.DeepEqual(gotW, w.wantW) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotW, w.wantW) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + level: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + level: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &compress{} + w := &bytes.Buffer{} + + got, err := c.NewWriterLevel(w, test.args.level) + if err := test.checkFunc(test.want, got, w.String(), err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_compress_NewReader(t *testing.T) { + t.Parallel() + type args struct { + r io.Reader + } + type want struct { + want Reader + err error + } + type test struct { + name string + args args + c *compress + want want + checkFunc func(want, Reader, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Reader, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &compress{} + + got, err := c.NewReader(test.args.r) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/compress/gzip_option.go b/internal/compress/gzip_option.go index 5404e3d23f..2bb3a4ed6d 100644 --- a/internal/compress/gzip_option.go +++ b/internal/compress/gzip_option.go @@ -25,12 +25,10 @@ import ( // GzipOption represents the functional option for gzipCompressor. type GzipOption func(c *gzipCompressor) error -var ( - defaultGzipOpts = []GzipOption{ - WithGzipGob(), - WithGzipCompressionLevel(gzip.DefaultCompression), - } -) +var defaultGzipOpts = []GzipOption{ + WithGzipGob(), + WithGzipCompressionLevel(gzip.DefaultCompression), +} // WithGzipGob represents the option to set the GobOption to initialize Gob. func WithGzipGob(opts ...GobOption) GzipOption { diff --git a/internal/compress/gzip_test.go b/internal/compress/gzip_test.go index 232b6ec0dd..c12044d067 100644 --- a/internal/compress/gzip_test.go +++ b/internal/compress/gzip_test.go @@ -788,7 +788,6 @@ func Test_gzipReader_Read(t *testing.T) { if err := test.checkFunc(test.want, gotN, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -914,7 +913,6 @@ func Test_gzipReader_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -990,7 +988,6 @@ func Test_gzipWriter_Write(t *testing.T) { if err := test.checkFunc(test.want, gotN, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1116,7 +1113,6 @@ func Test_gzipWriter_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/compress/lz4/lz4_mock_test.go b/internal/compress/lz4/lz4_mock_test.go new file mode 100644 index 0000000000..d664d9becd --- /dev/null +++ b/internal/compress/lz4/lz4_mock_test.go @@ -0,0 +1,764 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package lz4 + +import ( + "bytes" + "io" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestMockReader_Read(t *testing.T) { + t.Parallel() + type args struct { + p []byte + } + type fields struct { + ReadFunc func(p []byte) (n int, err error) + } + type want struct { + wantN int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotN int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotN, w.wantN) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotN, w.wantN) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + p: nil, + }, + fields: fields { + ReadFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + p: nil, + }, + fields: fields { + ReadFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockReader{ + ReadFunc: test.fields.ReadFunc, + } + + gotN, err := m.Read(test.args.p) + if err := test.checkFunc(test.want, gotN, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockWriter_Write(t *testing.T) { + t.Parallel() + type args struct { + p []byte + } + type fields struct { + WriteFunc func(p []byte) (n int, err error) + CloseFunc func() error + HeaderFunc func() *Header + FlushFunc func() error + } + type want struct { + wantN int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotN int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotN, w.wantN) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotN, w.wantN) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + p: nil, + }, + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + HeaderFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + p: nil, + }, + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + HeaderFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockWriter{ + WriteFunc: test.fields.WriteFunc, + CloseFunc: test.fields.CloseFunc, + HeaderFunc: test.fields.HeaderFunc, + FlushFunc: test.fields.FlushFunc, + } + + gotN, err := m.Write(test.args.p) + if err := test.checkFunc(test.want, gotN, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockWriter_Close(t *testing.T) { + t.Parallel() + type fields struct { + WriteFunc func(p []byte) (n int, err error) + CloseFunc func() error + HeaderFunc func() *Header + FlushFunc func() error + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + HeaderFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + HeaderFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockWriter{ + WriteFunc: test.fields.WriteFunc, + CloseFunc: test.fields.CloseFunc, + HeaderFunc: test.fields.HeaderFunc, + FlushFunc: test.fields.FlushFunc, + } + + err := m.Close() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockWriter_Header(t *testing.T) { + t.Parallel() + type fields struct { + WriteFunc func(p []byte) (n int, err error) + CloseFunc func() error + HeaderFunc func() *Header + FlushFunc func() error + } + type want struct { + want *Header + } + type test struct { + name string + fields fields + want want + checkFunc func(want, *Header) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got *Header) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + HeaderFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + HeaderFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockWriter{ + WriteFunc: test.fields.WriteFunc, + CloseFunc: test.fields.CloseFunc, + HeaderFunc: test.fields.HeaderFunc, + FlushFunc: test.fields.FlushFunc, + } + + got := m.Header() + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockWriter_Flush(t *testing.T) { + t.Parallel() + type fields struct { + WriteFunc func(p []byte) (n int, err error) + CloseFunc func() error + HeaderFunc func() *Header + FlushFunc func() error + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + HeaderFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + HeaderFunc: nil, + FlushFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockWriter{ + WriteFunc: test.fields.WriteFunc, + CloseFunc: test.fields.CloseFunc, + HeaderFunc: test.fields.HeaderFunc, + FlushFunc: test.fields.FlushFunc, + } + + err := m.Flush() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockLZ4_NewWriter(t *testing.T) { + t.Parallel() + type fields struct { + NewWriterFunc func(w io.Writer) Writer + NewWriterLevelFunc func(w io.Writer, level int) Writer + NewReaderFunc func(r io.Reader) Reader + } + type want struct { + want Writer + wantW string + } + type test struct { + name string + fields fields + want want + checkFunc func(want, Writer, string) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got Writer, gotW string) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + if !reflect.DeepEqual(gotW, w.wantW) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotW, w.wantW) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + NewWriterFunc: nil, + NewWriterLevelFunc: nil, + NewReaderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + NewWriterFunc: nil, + NewWriterLevelFunc: nil, + NewReaderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockLZ4{ + NewWriterFunc: test.fields.NewWriterFunc, + NewWriterLevelFunc: test.fields.NewWriterLevelFunc, + NewReaderFunc: test.fields.NewReaderFunc, + } + w := &bytes.Buffer{} + + got := m.NewWriter(w) + if err := test.checkFunc(test.want, got, w.String()); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockLZ4_NewWriterLevel(t *testing.T) { + t.Parallel() + type args struct { + level int + } + type fields struct { + NewWriterFunc func(w io.Writer) Writer + NewWriterLevelFunc func(w io.Writer, level int) Writer + NewReaderFunc func(r io.Reader) Reader + } + type want struct { + want Writer + wantW string + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, Writer, string) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Writer, gotW string) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + if !reflect.DeepEqual(gotW, w.wantW) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotW, w.wantW) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + level: 0, + }, + fields: fields { + NewWriterFunc: nil, + NewWriterLevelFunc: nil, + NewReaderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + level: 0, + }, + fields: fields { + NewWriterFunc: nil, + NewWriterLevelFunc: nil, + NewReaderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockLZ4{ + NewWriterFunc: test.fields.NewWriterFunc, + NewWriterLevelFunc: test.fields.NewWriterLevelFunc, + NewReaderFunc: test.fields.NewReaderFunc, + } + w := &bytes.Buffer{} + + got := m.NewWriterLevel(w, test.args.level) + if err := test.checkFunc(test.want, got, w.String()); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockLZ4_NewReader(t *testing.T) { + t.Parallel() + type args struct { + r io.Reader + } + type fields struct { + NewWriterFunc func(w io.Writer) Writer + NewWriterLevelFunc func(w io.Writer, level int) Writer + NewReaderFunc func(r io.Reader) Reader + } + type want struct { + want Reader + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, Reader) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Reader) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + r: nil, + }, + fields: fields { + NewWriterFunc: nil, + NewWriterLevelFunc: nil, + NewReaderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + r: nil, + }, + fields: fields { + NewWriterFunc: nil, + NewWriterLevelFunc: nil, + NewReaderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockLZ4{ + NewWriterFunc: test.fields.NewWriterFunc, + NewWriterLevelFunc: test.fields.NewWriterLevelFunc, + NewReaderFunc: test.fields.NewReaderFunc, + } + + got := m.NewReader(test.args.r) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/compress/lz4/lz4_test.go b/internal/compress/lz4/lz4_test.go new file mode 100644 index 0000000000..53002f25c2 --- /dev/null +++ b/internal/compress/lz4/lz4_test.go @@ -0,0 +1,391 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package lz4 + +import ( + "bytes" + "io" + "reflect" + "testing" + + lz4 "github.com/pierrec/lz4/v3" + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func Test_writer_Header(t *testing.T) { + t.Parallel() + type fields struct { + Writer *lz4.Writer + } + type want struct { + want *Header + } + type test struct { + name string + fields fields + want want + checkFunc func(want, *Header) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got *Header) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + Writer: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + Writer: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + w := &writer{ + Writer: test.fields.Writer, + } + + got := w.Header() + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestNew(t *testing.T) { + t.Parallel() + type want struct { + want LZ4 + } + type test struct { + name string + want want + checkFunc func(want, LZ4) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got LZ4) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := New() + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_compress_NewWriterLevel(t *testing.T) { + t.Parallel() + type args struct { + level int + } + type want struct { + want Writer + wantW string + } + type test struct { + name string + args args + c *compress + want want + checkFunc func(want, Writer, string) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Writer, gotW string) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + if !reflect.DeepEqual(gotW, w.wantW) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotW, w.wantW) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + level: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + level: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &compress{} + w := &bytes.Buffer{} + + got := c.NewWriterLevel(w, test.args.level) + if err := test.checkFunc(test.want, got, w.String()); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_compress_NewWriter(t *testing.T) { + t.Parallel() + type want struct { + want Writer + wantW string + } + type test struct { + name string + c *compress + want want + checkFunc func(want, Writer, string) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got Writer, gotW string) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + if !reflect.DeepEqual(gotW, w.wantW) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotW, w.wantW) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &compress{} + w := &bytes.Buffer{} + + got := c.NewWriter(w) + if err := test.checkFunc(test.want, got, w.String()); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_compress_NewReader(t *testing.T) { + t.Parallel() + type args struct { + r io.Reader + } + type want struct { + want Reader + } + type test struct { + name string + args args + c *compress + want want + checkFunc func(want, Reader) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Reader) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &compress{} + + got := c.NewReader(test.args.r) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/compress/lz4_option.go b/internal/compress/lz4_option.go index e6ace1c2a4..209faba7aa 100644 --- a/internal/compress/lz4_option.go +++ b/internal/compress/lz4_option.go @@ -24,12 +24,10 @@ import ( // LZ4Option represents the functional option for lz4Compressor. type LZ4Option func(c *lz4Compressor) error -var ( - defaultLZ4Opts = []LZ4Option{ - WithLZ4Gob(), - WithLZ4CompressionLevel(0), - } -) +var defaultLZ4Opts = []LZ4Option{ + WithLZ4Gob(), + WithLZ4CompressionLevel(0), +} // WithLZ4Gob returns the option to set gobc for lz4Compressor. func WithLZ4Gob(opts ...GobOption) LZ4Option { diff --git a/internal/compress/lz4_test.go b/internal/compress/lz4_test.go index 34df6a7417..2c3521a377 100644 --- a/internal/compress/lz4_test.go +++ b/internal/compress/lz4_test.go @@ -120,7 +120,6 @@ func TestNewLZ4(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -320,7 +319,6 @@ func Test_lz4Compressor_CompressVector(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -388,10 +386,10 @@ func Test_E2E_lz4Compressor_CompressVector(t *testing.T) { if err := test.checkFunc(test.want, got, err, l); err != nil { tt.Errorf("error = %v", err) } - }) } } + func Test_lz4Compressor_DecompressVector(t *testing.T) { type args struct { bs []byte @@ -524,7 +522,6 @@ func Test_lz4Compressor_DecompressVector(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -610,7 +607,6 @@ func Test_lz4Compressor_Reader(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -696,7 +692,6 @@ func Test_lz4Compressor_Writer(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -772,7 +767,6 @@ func Test_lz4Reader_Read(t *testing.T) { if err := test.checkFunc(test.want, gotN, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -841,7 +835,6 @@ func Test_lz4Reader_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -917,7 +910,6 @@ func Test_lz4Writer_Write(t *testing.T) { if err := test.checkFunc(test.want, gotN, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1004,7 +996,6 @@ func Test_lz4Writer_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/compress/mock_test.go b/internal/compress/mock_test.go new file mode 100644 index 0000000000..fdb2d5f121 --- /dev/null +++ b/internal/compress/mock_test.go @@ -0,0 +1,781 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package compress + +import ( + "io" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestMockCompressor_CompressVector(t *testing.T) { + t.Parallel() + type args struct { + vector []float32 + } + type fields struct { + CompressVectorFunc func(vector []float32) (bytes []byte, err error) + DecompressVectorFunc func(bytes []byte) (vector []float32, err error) + ReaderFunc func(src io.ReadCloser) (io.ReadCloser, error) + WriterFunc func(dst io.WriteCloser) (io.WriteCloser, error) + } + type want struct { + wantBytes []byte + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, []byte, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotBytes []byte, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotBytes, w.wantBytes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotBytes, w.wantBytes) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + vector: nil, + }, + fields: fields { + CompressVectorFunc: nil, + DecompressVectorFunc: nil, + ReaderFunc: nil, + WriterFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + vector: nil, + }, + fields: fields { + CompressVectorFunc: nil, + DecompressVectorFunc: nil, + ReaderFunc: nil, + WriterFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockCompressor{ + CompressVectorFunc: test.fields.CompressVectorFunc, + DecompressVectorFunc: test.fields.DecompressVectorFunc, + ReaderFunc: test.fields.ReaderFunc, + WriterFunc: test.fields.WriterFunc, + } + + gotBytes, err := m.CompressVector(test.args.vector) + if err := test.checkFunc(test.want, gotBytes, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockCompressor_DecompressVector(t *testing.T) { + t.Parallel() + type args struct { + bytes []byte + } + type fields struct { + CompressVectorFunc func(vector []float32) (bytes []byte, err error) + DecompressVectorFunc func(bytes []byte) (vector []float32, err error) + ReaderFunc func(src io.ReadCloser) (io.ReadCloser, error) + WriterFunc func(dst io.WriteCloser) (io.WriteCloser, error) + } + type want struct { + wantVector []float32 + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, []float32, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotVector []float32, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotVector, w.wantVector) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVector, w.wantVector) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + bytes: nil, + }, + fields: fields { + CompressVectorFunc: nil, + DecompressVectorFunc: nil, + ReaderFunc: nil, + WriterFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + bytes: nil, + }, + fields: fields { + CompressVectorFunc: nil, + DecompressVectorFunc: nil, + ReaderFunc: nil, + WriterFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockCompressor{ + CompressVectorFunc: test.fields.CompressVectorFunc, + DecompressVectorFunc: test.fields.DecompressVectorFunc, + ReaderFunc: test.fields.ReaderFunc, + WriterFunc: test.fields.WriterFunc, + } + + gotVector, err := m.DecompressVector(test.args.bytes) + if err := test.checkFunc(test.want, gotVector, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockCompressor_Reader(t *testing.T) { + t.Parallel() + type args struct { + src io.ReadCloser + } + type fields struct { + CompressVectorFunc func(vector []float32) (bytes []byte, err error) + DecompressVectorFunc func(bytes []byte) (vector []float32, err error) + ReaderFunc func(src io.ReadCloser) (io.ReadCloser, error) + WriterFunc func(dst io.WriteCloser) (io.WriteCloser, error) + } + type want struct { + want io.ReadCloser + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, io.ReadCloser, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got io.ReadCloser, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + src: nil, + }, + fields: fields { + CompressVectorFunc: nil, + DecompressVectorFunc: nil, + ReaderFunc: nil, + WriterFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + src: nil, + }, + fields: fields { + CompressVectorFunc: nil, + DecompressVectorFunc: nil, + ReaderFunc: nil, + WriterFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockCompressor{ + CompressVectorFunc: test.fields.CompressVectorFunc, + DecompressVectorFunc: test.fields.DecompressVectorFunc, + ReaderFunc: test.fields.ReaderFunc, + WriterFunc: test.fields.WriterFunc, + } + + got, err := m.Reader(test.args.src) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockCompressor_Writer(t *testing.T) { + t.Parallel() + type args struct { + dst io.WriteCloser + } + type fields struct { + CompressVectorFunc func(vector []float32) (bytes []byte, err error) + DecompressVectorFunc func(bytes []byte) (vector []float32, err error) + ReaderFunc func(src io.ReadCloser) (io.ReadCloser, error) + WriterFunc func(dst io.WriteCloser) (io.WriteCloser, error) + } + type want struct { + want io.WriteCloser + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, io.WriteCloser, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got io.WriteCloser, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + dst: nil, + }, + fields: fields { + CompressVectorFunc: nil, + DecompressVectorFunc: nil, + ReaderFunc: nil, + WriterFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + dst: nil, + }, + fields: fields { + CompressVectorFunc: nil, + DecompressVectorFunc: nil, + ReaderFunc: nil, + WriterFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockCompressor{ + CompressVectorFunc: test.fields.CompressVectorFunc, + DecompressVectorFunc: test.fields.DecompressVectorFunc, + ReaderFunc: test.fields.ReaderFunc, + WriterFunc: test.fields.WriterFunc, + } + + got, err := m.Writer(test.args.dst) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockReadCloser_Read(t *testing.T) { + t.Parallel() + type args struct { + p []byte + } + type fields struct { + ReadFunc func(p []byte) (n int, err error) + CloseFunc func() error + } + type want struct { + want int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + p: nil, + }, + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + p: nil, + }, + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockReadCloser{ + ReadFunc: test.fields.ReadFunc, + CloseFunc: test.fields.CloseFunc, + } + + got, err := m.Read(test.args.p) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockReadCloser_Close(t *testing.T) { + t.Parallel() + type fields struct { + ReadFunc func(p []byte) (n int, err error) + CloseFunc func() error + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockReadCloser{ + ReadFunc: test.fields.ReadFunc, + CloseFunc: test.fields.CloseFunc, + } + + err := m.Close() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockWriteCloser_Write(t *testing.T) { + t.Parallel() + type args struct { + p []byte + } + type fields struct { + WriteFunc func(p []byte) (n int, err error) + CloseFunc func() error + } + type want struct { + want int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + p: nil, + }, + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + p: nil, + }, + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockWriteCloser{ + WriteFunc: test.fields.WriteFunc, + CloseFunc: test.fields.CloseFunc, + } + + got, err := m.Write(test.args.p) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockWriteCloser_Close(t *testing.T) { + t.Parallel() + type fields struct { + WriteFunc func(p []byte) (n int, err error) + CloseFunc func() error + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockWriteCloser{ + WriteFunc: test.fields.WriteFunc, + CloseFunc: test.fields.CloseFunc, + } + + err := m.Close() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/client/gateway/vald/rest/option_test.go b/internal/compress/zstd/option_test.go similarity index 84% rename from internal/client/gateway/vald/rest/option_test.go rename to internal/compress/zstd/option_test.go index ce2b1f5250..e162f059e2 100644 --- a/internal/client/gateway/vald/rest/option_test.go +++ b/internal/compress/zstd/option_test.go @@ -13,9 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // - -// Package rest provides vald REST client functions -package rest +package zstd import ( "testing" @@ -23,10 +21,12 @@ import ( "go.uber.org/goleak" ) -func TestWithAddr(t *testing.T) { +func TestWithEncoderLevel(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { - addr string + level int } type want struct { obj *T @@ -61,7 +61,7 @@ func TestWithAddr(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -73,7 +73,7 @@ func TestWithAddr(t *testing.T) { { name: "test_case_1", args: args { - addr: "", + level: 0, }, want: want { obj: new(T), @@ -87,7 +87,7 @@ func TestWithAddr(t *testing.T) { return test { name: "test_case_2", args: args { - addr: "", + level: 0, }, want: want { obj: new(T), @@ -97,9 +97,11 @@ func TestWithAddr(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -113,22 +115,22 @@ func TestWithAddr(t *testing.T) { test.checkFunc = defaultCheckFunc } - got := WithAddr(test.args.addr) + got := WithEncoderLevel(test.args.level) obj := new(T) if err := test.checkFunc(test.want, obj, got(obj)); err != nil { tt.Errorf("error = %v", err) } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - got := WithAddr(test.args.addr) + got := WithEncoderLevel(test.args.level) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/internal/compress/zstd/zstd_mock.go b/internal/compress/zstd/zstd_mock.go index 005955e143..8346efaace 100644 --- a/internal/compress/zstd/zstd_mock.go +++ b/internal/compress/zstd/zstd_mock.go @@ -43,7 +43,7 @@ func (m *MockEncoder) ReadFrom(r io.Reader) (n int64, err error) { return m.ReadFromFunc(r) } -// MockDecoder represents +// MockDecoder represents. type MockDecoder struct { CloseFunc func() ReadFunc func(p []byte) (int, error) diff --git a/internal/compress/zstd/zstd_mock_test.go b/internal/compress/zstd/zstd_mock_test.go new file mode 100644 index 0000000000..f4898e97b3 --- /dev/null +++ b/internal/compress/zstd/zstd_mock_test.go @@ -0,0 +1,774 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package zstd + +import ( + "bytes" + "io" + "reflect" + "testing" + + "github.com/klauspost/compress/zstd" + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestMockEncoder_Write(t *testing.T) { + t.Parallel() + type args struct { + p []byte + } + type fields struct { + WriteFunc func(p []byte) (n int, err error) + CloseFunc func() error + ReadFromFunc func(r io.Reader) (n int64, err error) + } + type want struct { + wantN int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotN int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotN, w.wantN) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotN, w.wantN) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + p: nil, + }, + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + ReadFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + p: nil, + }, + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + ReadFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockEncoder{ + WriteFunc: test.fields.WriteFunc, + CloseFunc: test.fields.CloseFunc, + ReadFromFunc: test.fields.ReadFromFunc, + } + + gotN, err := m.Write(test.args.p) + if err := test.checkFunc(test.want, gotN, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockEncoder_Close(t *testing.T) { + t.Parallel() + type fields struct { + WriteFunc func(p []byte) (n int, err error) + CloseFunc func() error + ReadFromFunc func(r io.Reader) (n int64, err error) + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + ReadFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + ReadFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockEncoder{ + WriteFunc: test.fields.WriteFunc, + CloseFunc: test.fields.CloseFunc, + ReadFromFunc: test.fields.ReadFromFunc, + } + + err := m.Close() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockEncoder_ReadFrom(t *testing.T) { + t.Parallel() + type args struct { + r io.Reader + } + type fields struct { + WriteFunc func(p []byte) (n int, err error) + CloseFunc func() error + ReadFromFunc func(r io.Reader) (n int64, err error) + } + type want struct { + wantN int64 + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int64, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotN int64, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotN, w.wantN) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotN, w.wantN) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + r: nil, + }, + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + ReadFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + r: nil, + }, + fields: fields { + WriteFunc: nil, + CloseFunc: nil, + ReadFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockEncoder{ + WriteFunc: test.fields.WriteFunc, + CloseFunc: test.fields.CloseFunc, + ReadFromFunc: test.fields.ReadFromFunc, + } + + gotN, err := m.ReadFrom(test.args.r) + if err := test.checkFunc(test.want, gotN, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockDecoder_Close(t *testing.T) { + t.Parallel() + type fields struct { + CloseFunc func() + ReadFunc func(p []byte) (int, error) + WriteToFunc func(w io.Writer) (int64, error) + } + type want struct { + } + type test struct { + name string + fields fields + want want + checkFunc func(want) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want) error { + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + CloseFunc: nil, + ReadFunc: nil, + WriteToFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + CloseFunc: nil, + ReadFunc: nil, + WriteToFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockDecoder{ + CloseFunc: test.fields.CloseFunc, + ReadFunc: test.fields.ReadFunc, + WriteToFunc: test.fields.WriteToFunc, + } + + m.Close() + if err := test.checkFunc(test.want); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockDecoder_Read(t *testing.T) { + t.Parallel() + type args struct { + p []byte + } + type fields struct { + CloseFunc func() + ReadFunc func(p []byte) (int, error) + WriteToFunc func(w io.Writer) (int64, error) + } + type want struct { + want int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + p: nil, + }, + fields: fields { + CloseFunc: nil, + ReadFunc: nil, + WriteToFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + p: nil, + }, + fields: fields { + CloseFunc: nil, + ReadFunc: nil, + WriteToFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockDecoder{ + CloseFunc: test.fields.CloseFunc, + ReadFunc: test.fields.ReadFunc, + WriteToFunc: test.fields.WriteToFunc, + } + + got, err := m.Read(test.args.p) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockDecoder_WriteTo(t *testing.T) { + t.Parallel() + type fields struct { + CloseFunc func() + ReadFunc func(p []byte) (int, error) + WriteToFunc func(w io.Writer) (int64, error) + } + type want struct { + want int64 + wantW string + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, int64, string, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got int64, gotW string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + if !reflect.DeepEqual(gotW, w.wantW) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotW, w.wantW) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + CloseFunc: nil, + ReadFunc: nil, + WriteToFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + CloseFunc: nil, + ReadFunc: nil, + WriteToFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockDecoder{ + CloseFunc: test.fields.CloseFunc, + ReadFunc: test.fields.ReadFunc, + WriteToFunc: test.fields.WriteToFunc, + } + w := &bytes.Buffer{} + + got, err := m.WriteTo(w) + if err := test.checkFunc(test.want, got, w.String(), err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockZstd_NewWriter(t *testing.T) { + t.Parallel() + type args struct { + opts []zstd.EOption + } + type fields struct { + NewWriterFunc func(w io.Writer, opts ...zstd.EOption) (Encoder, error) + NewReaderFunc func(r io.Reader, opts ...zstd.DOption) (Decoder, error) + } + type want struct { + want Encoder + wantW string + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, Encoder, string, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Encoder, gotW string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + if !reflect.DeepEqual(gotW, w.wantW) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotW, w.wantW) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + opts: nil, + }, + fields: fields { + NewWriterFunc: nil, + NewReaderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + opts: nil, + }, + fields: fields { + NewWriterFunc: nil, + NewReaderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockZstd{ + NewWriterFunc: test.fields.NewWriterFunc, + NewReaderFunc: test.fields.NewReaderFunc, + } + w := &bytes.Buffer{} + + got, err := m.NewWriter(w, test.args.opts...) + if err := test.checkFunc(test.want, got, w.String(), err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockZstd_NewReader(t *testing.T) { + t.Parallel() + type args struct { + r io.Reader + opts []zstd.DOption + } + type fields struct { + NewWriterFunc func(w io.Writer, opts ...zstd.EOption) (Encoder, error) + NewReaderFunc func(r io.Reader, opts ...zstd.DOption) (Decoder, error) + } + type want struct { + want Decoder + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, Decoder, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Decoder, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + r: nil, + opts: nil, + }, + fields: fields { + NewWriterFunc: nil, + NewReaderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + r: nil, + opts: nil, + }, + fields: fields { + NewWriterFunc: nil, + NewReaderFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockZstd{ + NewWriterFunc: test.fields.NewWriterFunc, + NewReaderFunc: test.fields.NewReaderFunc, + } + + got, err := m.NewReader(test.args.r, test.args.opts...) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/compress/zstd/zstd_test.go b/internal/compress/zstd/zstd_test.go new file mode 100644 index 0000000000..e4d57aedaa --- /dev/null +++ b/internal/compress/zstd/zstd_test.go @@ -0,0 +1,255 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package zstd + +import ( + "bytes" + "io" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type want struct { + want Zstd + } + type test struct { + name string + want want + checkFunc func(want, Zstd) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got Zstd) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := New() + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_compress_NewWriter(t *testing.T) { + t.Parallel() + type args struct { + opts []EOption + } + type want struct { + want Encoder + wantW string + err error + } + type test struct { + name string + args args + c *compress + want want + checkFunc func(want, Encoder, string, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Encoder, gotW string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + if !reflect.DeepEqual(gotW, w.wantW) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotW, w.wantW) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &compress{} + w := &bytes.Buffer{} + + got, err := c.NewWriter(w, test.args.opts...) + if err := test.checkFunc(test.want, got, w.String(), err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_compress_NewReader(t *testing.T) { + t.Parallel() + type args struct { + r io.Reader + opts []DOption + } + type want struct { + want Decoder + err error + } + type test struct { + name string + args args + c *compress + want want + checkFunc func(want, Decoder, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Decoder, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + r: nil, + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + r: nil, + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &compress{} + + got, err := c.NewReader(test.args.r, test.args.opts...) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/compress/zstd_option.go b/internal/compress/zstd_option.go index 2beb8358ab..b581523f67 100644 --- a/internal/compress/zstd_option.go +++ b/internal/compress/zstd_option.go @@ -21,15 +21,13 @@ import ( "github.com/vdaas/vald/internal/compress/zstd" ) -// ZstdOption represents the functional option for zstdCompressor +// ZstdOption represents the functional option for zstdCompressor. type ZstdOption func(c *zstdCompressor) error -var ( - defaultZstdOpts = []ZstdOption{ - WithZstdGob(), - WithZstdCompressionLevel(3), - } -) +var defaultZstdOpts = []ZstdOption{ + WithZstdGob(), + WithZstdCompressionLevel(3), +} // WithZstdGob represents the option to set the GobOption to initialize Gob. func WithZstdGob(opts ...GobOption) ZstdOption { diff --git a/internal/compress/zstd_test.go b/internal/compress/zstd_test.go index 7329e5cf7b..797392fdad 100644 --- a/internal/compress/zstd_test.go +++ b/internal/compress/zstd_test.go @@ -28,20 +28,18 @@ import ( "go.uber.org/goleak" ) -var ( - zstdCompressorComparatorOptions = []comparator.Option{ - comparator.AllowUnexported(zstdCompressor{}), - comparator.Comparer(func(x, y gobCompressor) bool { - return reflect.DeepEqual(x, y) - }), - comparator.Comparer(func(x, y zstd.EOption) bool { - if (x == nil && y != nil) || (x != nil && y == nil) { - return false - } - return reflect.ValueOf(x).Pointer() == reflect.ValueOf(y).Pointer() - }), - } -) +var zstdCompressorComparatorOptions = []comparator.Option{ + comparator.AllowUnexported(zstdCompressor{}), + comparator.Comparer(func(x, y gobCompressor) bool { + return reflect.DeepEqual(x, y) + }), + comparator.Comparer(func(x, y zstd.EOption) bool { + if (x == nil && y != nil) || (x != nil && y == nil) { + return false + } + return reflect.ValueOf(x).Pointer() == reflect.ValueOf(y).Pointer() + }), +} func TestNewZstd(t *testing.T) { type args struct { @@ -140,7 +138,6 @@ func TestNewZstd(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -328,7 +325,6 @@ func Test_zstdCompressor_CompressVector(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -568,7 +564,6 @@ func Test_zstdCompressor_DecompressVector(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -676,7 +671,6 @@ func Test_zstdCompressor_Reader(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -741,7 +735,8 @@ func Test_zstdCompressor_Writer(t *testing.T) { want: &zstdWriter{ dst: nil, w: e, - }}, + }, + }, } }(), { @@ -784,7 +779,6 @@ func Test_zstdCompressor_Writer(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -876,7 +870,6 @@ func Test_zstdReader_Read(t *testing.T) { if err := test.checkFunc(test.want, gotN, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -953,7 +946,6 @@ func Test_zstdReader_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1045,7 +1037,6 @@ func Test_zstdWriter_Write(t *testing.T) { if err := test.checkFunc(test.want, gotN, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1168,7 +1159,6 @@ func Test_zstdWriter_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/backoff_test.go b/internal/config/backoff_test.go index fea816fb07..29de9f1e59 100644 --- a/internal/config/backoff_test.go +++ b/internal/config/backoff_test.go @@ -117,7 +117,6 @@ func TestBackoff_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -214,7 +213,6 @@ func TestBackoff_Opts(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/backup_test.go b/internal/config/backup_test.go index 7034abb313..d4a4d96a14 100644 --- a/internal/config/backup_test.go +++ b/internal/config/backup_test.go @@ -100,7 +100,6 @@ func TestBackupManager_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/blob_test.go b/internal/config/blob_test.go index 15f8686212..3ad9aeed38 100644 --- a/internal/config/blob_test.go +++ b/internal/config/blob_test.go @@ -82,7 +82,6 @@ func TestBlobStorageType_String(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -153,7 +152,6 @@ func TestAtoBST(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -235,7 +233,6 @@ func TestBlob_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -373,7 +370,6 @@ func TestS3Config_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/cassandra.go b/internal/config/cassandra.go index f2df49b253..e2863e826d 100644 --- a/internal/config/cassandra.go +++ b/internal/config/cassandra.go @@ -67,7 +67,7 @@ type Cassandra struct { VKTable string `json:"vk_table" yaml:"vk_table"` // backup manager - MetaTable string `json:"meta_table" yaml:"meta_table"` + VectorBackupTable string `json:"vector_backup_table" yaml:"vector_backup_table"` } type PoolConfig struct { @@ -138,7 +138,7 @@ func (c *Cassandra) Bind() *Cassandra { c.KVTable = GetActualValue(c.KVTable) c.VKTable = GetActualValue(c.VKTable) - c.MetaTable = GetActualValue(c.MetaTable) + c.VectorBackupTable = GetActualValue(c.VectorBackupTable) return c } diff --git a/internal/config/cassandra_test.go b/internal/config/cassandra_test.go index 028a991f6b..2cb1f3c4ab 100644 --- a/internal/config/cassandra_test.go +++ b/internal/config/cassandra_test.go @@ -23,11 +23,11 @@ import ( "github.com/vdaas/vald/internal/db/nosql/cassandra" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestCassandra_Bind(t *testing.T) { + t.Parallel() type fields struct { Hosts []string CQLVersion string @@ -38,11 +38,13 @@ func TestCassandra_Bind(t *testing.T) { Keyspace string NumConns int Consistency string + SerialConsistency string Username string Password string PoolConfig *PoolConfig RetryPolicy *RetryPolicy ReconnectionPolicy *ReconnectionPolicy + HostFilter *HostFilter SocketKeepalive string MaxPreparedStmts int MaxRoutingKeyInfo int @@ -63,7 +65,7 @@ func TestCassandra_Bind(t *testing.T) { WriteCoalesceWaitTime string KVTable string VKTable string - MetaTable string + VectorBackupTable string } type want struct { want *Cassandra @@ -97,11 +99,13 @@ func TestCassandra_Bind(t *testing.T) { Keyspace: "", NumConns: 0, Consistency: "", + SerialConsistency: "", Username: "", Password: "", PoolConfig: PoolConfig{}, RetryPolicy: RetryPolicy{}, ReconnectionPolicy: ReconnectionPolicy{}, + HostFilter: HostFilter{}, SocketKeepalive: "", MaxPreparedStmts: 0, MaxRoutingKeyInfo: 0, @@ -122,7 +126,7 @@ func TestCassandra_Bind(t *testing.T) { WriteCoalesceWaitTime: "", KVTable: "", VKTable: "", - MetaTable: "", + VectorBackupTable: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -144,11 +148,13 @@ func TestCassandra_Bind(t *testing.T) { Keyspace: "", NumConns: 0, Consistency: "", + SerialConsistency: "", Username: "", Password: "", PoolConfig: PoolConfig{}, RetryPolicy: RetryPolicy{}, ReconnectionPolicy: ReconnectionPolicy{}, + HostFilter: HostFilter{}, SocketKeepalive: "", MaxPreparedStmts: 0, MaxRoutingKeyInfo: 0, @@ -169,7 +175,7 @@ func TestCassandra_Bind(t *testing.T) { WriteCoalesceWaitTime: "", KVTable: "", VKTable: "", - MetaTable: "", + VectorBackupTable: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -178,9 +184,11 @@ func TestCassandra_Bind(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -200,11 +208,13 @@ func TestCassandra_Bind(t *testing.T) { Keyspace: test.fields.Keyspace, NumConns: test.fields.NumConns, Consistency: test.fields.Consistency, + SerialConsistency: test.fields.SerialConsistency, Username: test.fields.Username, Password: test.fields.Password, PoolConfig: test.fields.PoolConfig, RetryPolicy: test.fields.RetryPolicy, ReconnectionPolicy: test.fields.ReconnectionPolicy, + HostFilter: test.fields.HostFilter, SocketKeepalive: test.fields.SocketKeepalive, MaxPreparedStmts: test.fields.MaxPreparedStmts, MaxRoutingKeyInfo: test.fields.MaxRoutingKeyInfo, @@ -225,19 +235,19 @@ func TestCassandra_Bind(t *testing.T) { WriteCoalesceWaitTime: test.fields.WriteCoalesceWaitTime, KVTable: test.fields.KVTable, VKTable: test.fields.VKTable, - MetaTable: test.fields.MetaTable, + VectorBackupTable: test.fields.VectorBackupTable, } got := c.Bind() if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestCassandra_Opts(t *testing.T) { + t.Parallel() type fields struct { Hosts []string CQLVersion string @@ -248,11 +258,13 @@ func TestCassandra_Opts(t *testing.T) { Keyspace string NumConns int Consistency string + SerialConsistency string Username string Password string PoolConfig *PoolConfig RetryPolicy *RetryPolicy ReconnectionPolicy *ReconnectionPolicy + HostFilter *HostFilter SocketKeepalive string MaxPreparedStmts int MaxRoutingKeyInfo int @@ -273,7 +285,7 @@ func TestCassandra_Opts(t *testing.T) { WriteCoalesceWaitTime string KVTable string VKTable string - MetaTable string + VectorBackupTable string } type want struct { wantOpts []cassandra.Option @@ -311,11 +323,13 @@ func TestCassandra_Opts(t *testing.T) { Keyspace: "", NumConns: 0, Consistency: "", + SerialConsistency: "", Username: "", Password: "", PoolConfig: PoolConfig{}, RetryPolicy: RetryPolicy{}, ReconnectionPolicy: ReconnectionPolicy{}, + HostFilter: HostFilter{}, SocketKeepalive: "", MaxPreparedStmts: 0, MaxRoutingKeyInfo: 0, @@ -336,7 +350,7 @@ func TestCassandra_Opts(t *testing.T) { WriteCoalesceWaitTime: "", KVTable: "", VKTable: "", - MetaTable: "", + VectorBackupTable: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -358,11 +372,13 @@ func TestCassandra_Opts(t *testing.T) { Keyspace: "", NumConns: 0, Consistency: "", + SerialConsistency: "", Username: "", Password: "", PoolConfig: PoolConfig{}, RetryPolicy: RetryPolicy{}, ReconnectionPolicy: ReconnectionPolicy{}, + HostFilter: HostFilter{}, SocketKeepalive: "", MaxPreparedStmts: 0, MaxRoutingKeyInfo: 0, @@ -383,7 +399,7 @@ func TestCassandra_Opts(t *testing.T) { WriteCoalesceWaitTime: "", KVTable: "", VKTable: "", - MetaTable: "", + VectorBackupTable: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -392,9 +408,11 @@ func TestCassandra_Opts(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -414,11 +432,13 @@ func TestCassandra_Opts(t *testing.T) { Keyspace: test.fields.Keyspace, NumConns: test.fields.NumConns, Consistency: test.fields.Consistency, + SerialConsistency: test.fields.SerialConsistency, Username: test.fields.Username, Password: test.fields.Password, PoolConfig: test.fields.PoolConfig, RetryPolicy: test.fields.RetryPolicy, ReconnectionPolicy: test.fields.ReconnectionPolicy, + HostFilter: test.fields.HostFilter, SocketKeepalive: test.fields.SocketKeepalive, MaxPreparedStmts: test.fields.MaxPreparedStmts, MaxRoutingKeyInfo: test.fields.MaxRoutingKeyInfo, @@ -439,14 +459,13 @@ func TestCassandra_Opts(t *testing.T) { WriteCoalesceWaitTime: test.fields.WriteCoalesceWaitTime, KVTable: test.fields.KVTable, VKTable: test.fields.VKTable, - MetaTable: test.fields.MetaTable, + VectorBackupTable: test.fields.VectorBackupTable, } gotOpts, err := cfg.Opts() if err := test.checkFunc(test.want, gotOpts, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/client_test.go b/internal/config/client_test.go index f65da05eed..64b8aa699a 100644 --- a/internal/config/client_test.go +++ b/internal/config/client_test.go @@ -96,7 +96,6 @@ func TestClient_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/compress_test.go b/internal/config/compress_test.go index 4b8cc6b1ff..4bb732dd29 100644 --- a/internal/config/compress_test.go +++ b/internal/config/compress_test.go @@ -82,7 +82,6 @@ func Test_compressAlgorithm_String(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -153,7 +152,6 @@ func TestCompressAlgorithm(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -231,7 +229,6 @@ func TestCompressCore_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -313,7 +310,6 @@ func TestCompressor_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -395,7 +391,6 @@ func TestCompressorRegisterer_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/config.go b/internal/config/config.go index c2b9d18717..a750775208 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -19,13 +19,13 @@ package config import ( "bytes" - "io/ioutil" "os" "path/filepath" "strings" "unsafe" "github.com/vdaas/vald/internal/encoding/json" + "github.com/vdaas/vald/internal/io/ioutil" yaml "gopkg.in/yaml.v2" ) @@ -74,7 +74,7 @@ func (c *GlobalConfig) UnmarshalJSON(data []byte) (err error) { // New returns config struct or error when decode the configuration file to actually *Config struct. func Read(path string, cfg interface{}) error { - f, err := os.OpenFile(path, os.O_RDONLY, 0600) + f, err := os.OpenFile(path, os.O_RDONLY, 0o600) if err != nil { return err } @@ -100,13 +100,7 @@ func GetActualValue(val string) (res string) { } res = os.ExpandEnv(val) if strings.HasPrefix(res, fileValuePrefix) { - path := strings.TrimPrefix(res, fileValuePrefix) - file, err := os.OpenFile(path, os.O_RDONLY, 0600) - defer file.Close() - if err != nil { - return - } - body, err := ioutil.ReadAll(file) + body, err := ioutil.ReadFile(strings.TrimPrefix(res, fileValuePrefix)) if err != nil { return } @@ -122,7 +116,7 @@ func GetActualValues(vals []string) []string { return vals } -// checkPrefixAndSuffix checks if the str has prefix and suffix +// checkPrefixAndSuffix checks if the str has prefix and suffix. func checkPrefixAndSuffix(str, pref, suf string) bool { return strings.HasPrefix(str, pref) && strings.HasSuffix(str, suf) } diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 5cc97be770..fcddc3b4f4 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -102,7 +102,6 @@ func TestGlobalConfig_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -194,7 +193,6 @@ func TestGlobalConfig_UnmarshalJSON(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -268,7 +266,6 @@ func TestRead(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -339,7 +336,6 @@ func TestGetActualValue(t *testing.T) { if err := test.checkFunc(test.want, gotRes); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -410,7 +406,6 @@ func TestGetActualValues(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -487,7 +482,6 @@ func Test_checkPrefixAndSuffix(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -558,7 +552,6 @@ func TestToRawYaml(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/debug_test.go b/internal/config/debug_test.go index 41361ea768..9abc0bdb8e 100644 --- a/internal/config/debug_test.go +++ b/internal/config/debug_test.go @@ -103,7 +103,6 @@ func TestDebug_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/discoverer_test.go b/internal/config/discoverer_test.go index 35f793d162..1f86fec1d9 100644 --- a/internal/config/discoverer_test.go +++ b/internal/config/discoverer_test.go @@ -100,7 +100,6 @@ func TestDiscoverer_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -189,7 +188,6 @@ func TestDiscovererClient_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/filter.go b/internal/config/filter.go index 5483a286c6..3537a22f08 100644 --- a/internal/config/filter.go +++ b/internal/config/filter.go @@ -21,9 +21,36 @@ type EgressFilter struct { Client *GRPCClient `json:"client" yaml:"client"` } +type IngressFilter struct { + Client *GRPCClient `json:"client,omitempty" yaml:"client"` + Search []string `json:"search,omitempty" yaml:"search"` + Insert []string `json:"insert,omitempty" yaml:"insert"` + Update []string `json:"update,omitempty" yaml:"update"` + Upsert []string `json:"upsert,omitempty" yaml:"upsert"` +} + func (e *EgressFilter) Bind() *EgressFilter { if e.Client != nil { e.Client.Bind() } return e } + +func (i *IngressFilter) Bind() *IngressFilter { + if i.Client != nil { + i.Client.Bind() + } + if i.Search != nil { + i.Search = GetActualValues(i.Search) + } + if i.Insert != nil { + i.Insert = GetActualValues(i.Insert) + } + if i.Update != nil { + i.Update = GetActualValues(i.Update) + } + if i.Upsert != nil { + i.Upsert = GetActualValues(i.Upsert) + } + return i +} diff --git a/internal/config/filter_test.go b/internal/config/filter_test.go index d7ae275275..91e270eddf 100644 --- a/internal/config/filter_test.go +++ b/internal/config/filter_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestEgressFilter_Bind(t *testing.T) { @@ -92,7 +93,95 @@ func TestEgressFilter_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } + }) + } +} + +func TestIngressFilter_Bind(t *testing.T) { + type fields struct { + Client *GRPCClient + Search []string + Insert []string + Update []string + Upsert []string + } + type want struct { + want *IngressFilter + } + type test struct { + name string + fields fields + want want + checkFunc func(want, *IngressFilter) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got *IngressFilter) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got = %v, want %v", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + Client: GRPCClient{}, + Search: nil, + Insert: nil, + Update: nil, + Upsert: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + Client: GRPCClient{}, + Search: nil, + Insert: nil, + Update: nil, + Upsert: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + i := &IngressFilter{ + Client: test.fields.Client, + Search: test.fields.Search, + Insert: test.fields.Insert, + Update: test.fields.Update, + Upsert: test.fields.Upsert, + } + + got := i.Bind() + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } }) } } diff --git a/internal/config/gateway_test.go b/internal/config/gateway_test.go index efc4d1843d..789d6df5bd 100644 --- a/internal/config/gateway_test.go +++ b/internal/config/gateway_test.go @@ -128,7 +128,6 @@ func TestGateway_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/grpc_test.go b/internal/config/grpc_test.go index fc15bfc439..2ce88d2ed1 100644 --- a/internal/config/grpc_test.go +++ b/internal/config/grpc_test.go @@ -80,7 +80,6 @@ func Test_newGRPCClientConfig(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -177,7 +176,6 @@ func TestGRPCClient_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -258,7 +256,6 @@ func TestGRPCClientKeepalive_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -343,7 +340,6 @@ func TestCallOption_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -456,7 +452,6 @@ func TestDialOption_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -553,7 +548,6 @@ func TestGRPCClient_Opts(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/index_test.go b/internal/config/index_test.go index c0f72eacf4..6d676f053d 100644 --- a/internal/config/index_test.go +++ b/internal/config/index_test.go @@ -132,7 +132,6 @@ func TestIndexer_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/lb.go b/internal/config/lb.go new file mode 100644 index 0000000000..e494c5bd7a --- /dev/null +++ b/internal/config/lb.go @@ -0,0 +1,55 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package config providers configuration type and load configuration logic +package config + +type LB struct { + // AgentPort represent agent port number + AgentPort int `json:"agent_port" yaml:"agent_port"` + + // AgentName represent agents meta_name for service discovery + AgentName string `json:"agent_name" yaml:"agent_name"` + + // AgentNamespace represent agent namespace location + AgentNamespace string `json:"agent_namespace" yaml:"agent_namespace"` + + // AgentDNS represent agents dns A record for service discovery + AgentDNS string `json:"agent_dns" yaml:"agent_dns"` + + // NodeName represents node name + NodeName string `json:"node_name" yaml:"node_name"` + + // IndexReplica represents index replication count + IndexReplica int `json:"index_replica" yaml:"index_replica"` + + // Discoverer represent agent discoverer service configuration + Discoverer *DiscovererClient `json:"discoverer" yaml:"discoverer"` +} + +func (g *LB) Bind() *LB { + g.AgentName = GetActualValue(g.AgentName) + g.AgentNamespace = GetActualValue(g.AgentNamespace) + + g.AgentDNS = GetActualValue(g.AgentDNS) + + g.NodeName = GetActualValue(g.NodeName) + + if g.Discoverer != nil { + g.Discoverer = g.Discoverer.Bind() + } + return g +} diff --git a/internal/config/lb_test.go b/internal/config/lb_test.go new file mode 100644 index 0000000000..16bab0be1c --- /dev/null +++ b/internal/config/lb_test.go @@ -0,0 +1,123 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package config providers configuration type and load configuration logic +package config + +import ( + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestLB_Bind(t *testing.T) { + type fields struct { + AgentPort int + AgentName string + AgentNamespace string + AgentDNS string + NodeName string + IndexReplica int + Discoverer *DiscovererClient + } + type want struct { + want *LB + } + type test struct { + name string + fields fields + want want + checkFunc func(want, *LB) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got *LB) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got = %v, want %v", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + AgentPort: 0, + AgentName: "", + AgentNamespace: "", + AgentDNS: "", + NodeName: "", + IndexReplica: 0, + Discoverer: DiscovererClient{}, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + AgentPort: 0, + AgentName: "", + AgentNamespace: "", + AgentDNS: "", + NodeName: "", + IndexReplica: 0, + Discoverer: DiscovererClient{}, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + g := &LB{ + AgentPort: test.fields.AgentPort, + AgentName: test.fields.AgentName, + AgentNamespace: test.fields.AgentNamespace, + AgentDNS: test.fields.AgentDNS, + NodeName: test.fields.NodeName, + IndexReplica: test.fields.IndexReplica, + Discoverer: test.fields.Discoverer, + } + + got := g.Bind() + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/config/log_test.go b/internal/config/log_test.go index 06859ad4e2..44944236a5 100644 --- a/internal/config/log_test.go +++ b/internal/config/log_test.go @@ -118,7 +118,6 @@ func TestLogging_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/meta_test.go b/internal/config/meta_test.go index 6f566a09e7..359d547515 100644 --- a/internal/config/meta_test.go +++ b/internal/config/meta_test.go @@ -112,7 +112,6 @@ func TestMeta_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/mysql_test.go b/internal/config/mysql_test.go index e767869287..1dc11854a2 100644 --- a/internal/config/mysql_test.go +++ b/internal/config/mysql_test.go @@ -22,7 +22,9 @@ import ( "reflect" "testing" + "github.com/vdaas/vald/internal/db/rdb/mysql" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestMySQL_Bind(t *testing.T) { @@ -248,3 +250,138 @@ func TestMySQL_Bind(t *testing.T) { }) } } + +func TestMySQL_Opts(t *testing.T) { + t.Parallel() + type fields struct { + DB string + Host string + Port int + User string + Pass string + Name string + Charset string + Timezone string + InitialPingTimeLimit string + InitialPingDuration string + ConnMaxLifeTime string + MaxOpenConns int + MaxIdleConns int + TLS *TLS + TCP *TCP + } + type want struct { + want []mysql.Option + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, []mysql.Option, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got []mysql.Option, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + DB: "", + Host: "", + Port: 0, + User: "", + Pass: "", + Name: "", + Charset: "", + Timezone: "", + InitialPingTimeLimit: "", + InitialPingDuration: "", + ConnMaxLifeTime: "", + MaxOpenConns: 0, + MaxIdleConns: 0, + TLS: TLS{}, + TCP: TCP{}, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + DB: "", + Host: "", + Port: 0, + User: "", + Pass: "", + Name: "", + Charset: "", + Timezone: "", + InitialPingTimeLimit: "", + InitialPingDuration: "", + ConnMaxLifeTime: "", + MaxOpenConns: 0, + MaxIdleConns: 0, + TLS: TLS{}, + TCP: TCP{}, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MySQL{ + DB: test.fields.DB, + Host: test.fields.Host, + Port: test.fields.Port, + User: test.fields.User, + Pass: test.fields.Pass, + Name: test.fields.Name, + Charset: test.fields.Charset, + Timezone: test.fields.Timezone, + InitialPingTimeLimit: test.fields.InitialPingTimeLimit, + InitialPingDuration: test.fields.InitialPingDuration, + ConnMaxLifeTime: test.fields.ConnMaxLifeTime, + MaxOpenConns: test.fields.MaxOpenConns, + MaxIdleConns: test.fields.MaxIdleConns, + TLS: test.fields.TLS, + TCP: test.fields.TCP, + } + + got, err := m.Opts() + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/config/ngt_test.go b/internal/config/ngt_test.go index 328023e560..2533d8a3b9 100644 --- a/internal/config/ngt_test.go +++ b/internal/config/ngt_test.go @@ -186,7 +186,6 @@ func TestNGT_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/observability_test.go b/internal/config/observability_test.go index 155fb929cb..4df6a13fba 100644 --- a/internal/config/observability_test.go +++ b/internal/config/observability_test.go @@ -114,7 +114,6 @@ func TestObservability_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -192,7 +191,6 @@ func TestCollector_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -278,7 +276,6 @@ func TestStackdriver_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/redis_test.go b/internal/config/redis_test.go index 55e9007468..f11d2acbbd 100644 --- a/internal/config/redis_test.go +++ b/internal/config/redis_test.go @@ -21,7 +21,9 @@ import ( "reflect" "testing" + "github.com/vdaas/vald/internal/db/kvs/redis" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestRedis_Bind(t *testing.T) { @@ -196,7 +198,189 @@ func TestRedis_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } + }) + } +} + +func TestRedis_Opts(t *testing.T) { + t.Parallel() + type fields struct { + Addrs []string + DB int + DialTimeout string + IdleCheckFrequency string + IdleTimeout string + InitialPingTimeLimit string + InitialPingDuration string + KeyPref string + MaxConnAge string + MaxRedirects int + MaxRetries int + MaxRetryBackoff string + MinIdleConns int + MinRetryBackoff string + Password string + PoolSize int + PoolTimeout string + ReadOnly bool + ReadTimeout string + RouteByLatency bool + RouteRandomly bool + TLS *TLS + TCP *TCP + WriteTimeout string + KVPrefix string + VKPrefix string + PrefixDelimiter string + } + type want struct { + wantOpts []redis.Option + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, []redis.Option, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, gotOpts []redis.Option, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotOpts, w.wantOpts) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOpts, w.wantOpts) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + Addrs: nil, + DB: 0, + DialTimeout: "", + IdleCheckFrequency: "", + IdleTimeout: "", + InitialPingTimeLimit: "", + InitialPingDuration: "", + KeyPref: "", + MaxConnAge: "", + MaxRedirects: 0, + MaxRetries: 0, + MaxRetryBackoff: "", + MinIdleConns: 0, + MinRetryBackoff: "", + Password: "", + PoolSize: 0, + PoolTimeout: "", + ReadOnly: false, + ReadTimeout: "", + RouteByLatency: false, + RouteRandomly: false, + TLS: TLS{}, + TCP: TCP{}, + WriteTimeout: "", + KVPrefix: "", + VKPrefix: "", + PrefixDelimiter: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + Addrs: nil, + DB: 0, + DialTimeout: "", + IdleCheckFrequency: "", + IdleTimeout: "", + InitialPingTimeLimit: "", + InitialPingDuration: "", + KeyPref: "", + MaxConnAge: "", + MaxRedirects: 0, + MaxRetries: 0, + MaxRetryBackoff: "", + MinIdleConns: 0, + MinRetryBackoff: "", + Password: "", + PoolSize: 0, + PoolTimeout: "", + ReadOnly: false, + ReadTimeout: "", + RouteByLatency: false, + RouteRandomly: false, + TLS: TLS{}, + TCP: TCP{}, + WriteTimeout: "", + KVPrefix: "", + VKPrefix: "", + PrefixDelimiter: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &Redis{ + Addrs: test.fields.Addrs, + DB: test.fields.DB, + DialTimeout: test.fields.DialTimeout, + IdleCheckFrequency: test.fields.IdleCheckFrequency, + IdleTimeout: test.fields.IdleTimeout, + InitialPingTimeLimit: test.fields.InitialPingTimeLimit, + InitialPingDuration: test.fields.InitialPingDuration, + KeyPref: test.fields.KeyPref, + MaxConnAge: test.fields.MaxConnAge, + MaxRedirects: test.fields.MaxRedirects, + MaxRetries: test.fields.MaxRetries, + MaxRetryBackoff: test.fields.MaxRetryBackoff, + MinIdleConns: test.fields.MinIdleConns, + MinRetryBackoff: test.fields.MinRetryBackoff, + Password: test.fields.Password, + PoolSize: test.fields.PoolSize, + PoolTimeout: test.fields.PoolTimeout, + ReadOnly: test.fields.ReadOnly, + ReadTimeout: test.fields.ReadTimeout, + RouteByLatency: test.fields.RouteByLatency, + RouteRandomly: test.fields.RouteRandomly, + TLS: test.fields.TLS, + TCP: test.fields.TCP, + WriteTimeout: test.fields.WriteTimeout, + KVPrefix: test.fields.KVPrefix, + VKPrefix: test.fields.VKPrefix, + PrefixDelimiter: test.fields.PrefixDelimiter, + } + + gotOpts, err := r.Opts() + if err := test.checkFunc(test.want, gotOpts, err); err != nil { + tt.Errorf("error = %v", err) + } }) } } diff --git a/internal/config/server_test.go b/internal/config/server_test.go index 7b33e4c70e..4cd49f530c 100644 --- a/internal/config/server_test.go +++ b/internal/config/server_test.go @@ -117,7 +117,6 @@ func TestServers_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -214,7 +213,6 @@ func TestServers_GetGRPCStreamConcurrency(t *testing.T) { if err := test.checkFunc(test.want, gotC); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -307,7 +305,6 @@ func TestHTTP_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -424,7 +421,6 @@ func TestGRPC_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -513,7 +509,6 @@ func TestGRPCKeepalive_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -614,7 +609,6 @@ func TestServer_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -715,7 +709,6 @@ func TestServer_Opts(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/sidecar_test.go b/internal/config/sidecar_test.go index 6145cd0918..67792846f0 100644 --- a/internal/config/sidecar_test.go +++ b/internal/config/sidecar_test.go @@ -126,7 +126,6 @@ func TestAgentSidecar_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/tcp_test.go b/internal/config/tcp_test.go index b39cdfdc21..10271a0bcc 100644 --- a/internal/config/tcp_test.go +++ b/internal/config/tcp_test.go @@ -101,7 +101,6 @@ func TestDNS_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -182,7 +181,6 @@ func TestDialer_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -263,7 +261,6 @@ func TestTCP_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -344,7 +341,6 @@ func TestTCP_Opts(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/tls_test.go b/internal/config/tls_test.go index 9a59e0c44d..7b1f0a984c 100644 --- a/internal/config/tls_test.go +++ b/internal/config/tls_test.go @@ -125,7 +125,6 @@ func TestTLS_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -217,7 +216,6 @@ func TestTLS_Opts(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/config/transport_test.go b/internal/config/transport_test.go index a69a60f24b..21f2d526d5 100644 --- a/internal/config/transport_test.go +++ b/internal/config/transport_test.go @@ -132,7 +132,6 @@ func TestRoundTripper_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -209,7 +208,6 @@ func TestTransport_Bind(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/core/algorithm/algorithm.go b/internal/core/algorithm/algorithm.go new file mode 100644 index 0000000000..adea389fa8 --- /dev/null +++ b/internal/core/algorithm/algorithm.go @@ -0,0 +1,27 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package algorithm defines vald's core ann algorithm interface & constraint. +package algorithm + +import "math" + +const ( + // MaximumVectorDimensionSize is maximum value of vector dimension. + MaximumVectorDimensionSize = math.MaxInt64 + // MinimumVectorDimensionSize is minimum value of vector dimension. + MinimumVectorDimensionSize = 2 +) diff --git a/internal/core/ngt/Makefile b/internal/core/algorithm/ngt/Makefile similarity index 100% rename from internal/core/ngt/Makefile rename to internal/core/algorithm/ngt/Makefile diff --git a/internal/core/ngt/assets/index/grp b/internal/core/algorithm/ngt/assets/index/grp old mode 100755 new mode 100644 similarity index 100% rename from internal/core/ngt/assets/index/grp rename to internal/core/algorithm/ngt/assets/index/grp diff --git a/internal/core/ngt/assets/index/obj b/internal/core/algorithm/ngt/assets/index/obj old mode 100755 new mode 100644 similarity index 100% rename from internal/core/ngt/assets/index/obj rename to internal/core/algorithm/ngt/assets/index/obj diff --git a/internal/core/ngt/assets/index/prf b/internal/core/algorithm/ngt/assets/index/prf old mode 100755 new mode 100644 similarity index 100% rename from internal/core/ngt/assets/index/prf rename to internal/core/algorithm/ngt/assets/index/prf diff --git a/internal/core/ngt/assets/index/tre b/internal/core/algorithm/ngt/assets/index/tre old mode 100755 new mode 100644 similarity index 100% rename from internal/core/ngt/assets/index/tre rename to internal/core/algorithm/ngt/assets/index/tre diff --git a/internal/core/ngt/assets/test.ssv b/internal/core/algorithm/ngt/assets/test.ssv old mode 100755 new mode 100644 similarity index 100% rename from internal/core/ngt/assets/test.ssv rename to internal/core/algorithm/ngt/assets/test.ssv diff --git a/internal/core/ngt/model.go b/internal/core/algorithm/ngt/model.go similarity index 93% rename from internal/core/ngt/model.go rename to internal/core/algorithm/ngt/model.go index f518f7a8b9..e37838011b 100644 --- a/internal/core/ngt/model.go +++ b/internal/core/algorithm/ngt/model.go @@ -17,7 +17,7 @@ // Package ngt provides implementation of Go API for https://github.com/yahoojapan/NGT package ngt -// SearchResult is struct for comfortable use in Go +// SearchResult is struct for comfortable use in Go. type SearchResult struct { ID uint32 Distance float32 diff --git a/internal/core/ngt/ngt.go b/internal/core/algorithm/ngt/ngt.go similarity index 83% rename from internal/core/ngt/ngt.go rename to internal/core/algorithm/ngt/ngt.go index b65ba45561..cce92b848d 100644 --- a/internal/core/ngt/ngt.go +++ b/internal/core/algorithm/ngt/ngt.go @@ -23,18 +23,20 @@ package ngt #include */ import "C" + import ( "os" "reflect" "sync" "unsafe" + "github.com/vdaas/vald/internal/core/algorithm" "github.com/vdaas/vald/internal/errors" ) type ( - // NGT is core interface + // NGT is core interface. NGT interface { // Search returns search result as []SearchResult Search(vec []float32, size int, epsilon, radius float32) ([]SearchResult, error) @@ -94,59 +96,67 @@ type ( } ) -// ObjectType is alias of object type in NGT +// ObjectType is alias of object type in NGT. type objectType int -// DistanceType is alias of distance type in NGT +// DistanceType is alias of distance type in NGT. type distanceType int const ( // ------------------------------------------------------------- // Object Type Definition // ------------------------------------------------------------- - // ObjectNone is unknown object type + // ObjectNone is unknown object type. ObjectNone objectType = iota - // Uint8 is 8bit unsigned integer + // Uint8 is 8bit unsigned integer. Uint8 - // Float is 32bit floating point number + // Float is 32bit floating point number. Float - // ------------------------------------------------------------- + // -------------------------------------------------------------. // ------------------------------------------------------------- // Distance Type Definition // ------------------------------------------------------------- - // DistanceNone is unknown distance type + // DistanceNone is unknown distance type. DistanceNone distanceType = iota - 1 - // L1 is l1 norm + // L1 is l1 norm. L1 - // L2 is l2 norm + // L2 is l2 norm. L2 - // Angle is angle distance + // Angle is angle distance. Angle - // Hamming is hamming distance + // Hamming is hamming distance. Hamming - // Cosine is cosine distance + // Cosine is cosine distance. Cosine - // NormalizedAngle is angle distance with normalization + // NormalizedAngle is angle distance with normalization. NormalizedAngle - // NormalizedCosine is cosine distance with normalization + // NormalizedCosine is cosine distance with normalization. NormalizedCosine - // Jaccard is jaccard distance + // Jaccard is jaccard distance. Jaccard - // ------------------------------------------------------------- + // -------------------------------------------------------------. + // ------------------------------------------------------------- // ErrorCode is false + // -------------------------------------------------------------. ErrorCode = C._Bool(false) + // -------------------------------------------------------------. - dimensionLimit = 1 << 16 + // ------------------------------------------------------------- + // dimension constraints + // -------------------------------------------------------------. + ngtVectorDimensionSizeLimit = 1 << 16 + minimumDimensionSize = algorithm.MinimumVectorDimensionSize + // -------------------------------------------------------------. ) -// New returns NGT instance with recreating empty index file +// New returns NGT instance with recreating empty index file. func New(opts ...Option) (NGT, error) { return gen(false, opts...) } -// Load returns NGT instance from existing index file +// Load returns NGT instance from existing index file. func Load(opts ...Option) (NGT, error) { return gen(true, opts...) } @@ -275,8 +285,12 @@ func (n *ngt) loadObjectSpace() error { return nil } -// Search returns search result as []SearchResult +// Search returns search result as []SearchResult. func (n *ngt) Search(vec []float32, size int, epsilon, radius float32) ([]SearchResult, error) { + if len(vec) != int(n.dimension) { + return nil, errors.ErrIncompatibleDimensionSize(len(vec), int(n.dimension)) + } + results := C.ngt_create_empty_results(n.ebuf) defer C.ngt_destroy_results(results) @@ -336,6 +350,10 @@ func (n *ngt) Search(vec []float32, size int, epsilon, radius float32) ([]Search // Insert returns NGT object id. // This only stores not indexing, you must call CreateIndex and SaveIndex. func (n *ngt) Insert(vec []float32) (uint, error) { + dim := int(n.dimension) + if len(vec) != dim { + return 0, errors.ErrIncompatibleDimensionSize(len(vec), dim) + } n.mu.Lock() id := C.ngt_insert_index_as_float(n.index, (*C.float)(&vec[0]), C.uint32_t(n.dimension), n.ebuf) n.mu.Unlock() @@ -373,14 +391,20 @@ func (n *ngt) BulkInsert(vecs [][]float32) ([]uint, []error) { ids := make([]uint, 0, len(vecs)) errs := make([]error, 0, len(vecs)) + dim := int(n.dimension) var id uint n.mu.Lock() for _, vec := range vecs { - // n.mu.Lock() - id = uint(C.ngt_insert_index_as_float(n.index, (*C.float)(&vec[0]), C.uint32_t(n.dimension), n.ebuf)) - // n.mu.Unlock() - if id == 0 { - errs = append(errs, n.newGoError(n.ebuf)) + id = 0 + if len(vec) != dim { + errs = append(errs, errors.ErrIncompatibleDimensionSize(len(vec), dim)) + } else { + // n.mu.Lock() + id = uint(C.ngt_insert_index_as_float(n.index, (*C.float)(&vec[0]), C.uint32_t(n.dimension), n.ebuf)) + // n.mu.Unlock() + if id == 0 { + errs = append(errs, n.newGoError(n.ebuf)) + } } ids = append(ids, id) } @@ -508,8 +532,8 @@ func (n *ngt) GetVector(id uint) ([]float32, error) { if results == nil { return nil, n.newGoError(n.ebuf) } - ret = (*[dimensionLimit]float32)(unsafe.Pointer(results))[:dimension:dimension] - // for _, elem := range (*[dimensionLimit]C.float)(unsafe.Pointer(results))[:dimension:dimension]{ + ret = (*[ngtVectorDimensionSizeLimit]float32)(unsafe.Pointer(results))[:dimension:dimension] + // for _, elem := range (*[ngtVectorDimensionSizeLimit]C.float)(unsafe.Pointer(results))[:dimension:dimension]{ // ret = append(ret, float32(elem)) // } case Uint8: @@ -520,7 +544,7 @@ func (n *ngt) GetVector(id uint) ([]float32, error) { return nil, n.newGoError(n.ebuf) } ret = make([]float32, 0, dimension) - for _, elem := range (*[dimensionLimit]C.uint8_t)(unsafe.Pointer(results))[:dimension:dimension] { + for _, elem := range (*[ngtVectorDimensionSizeLimit]C.uint8_t)(unsafe.Pointer(results))[:dimension:dimension] { ret = append(ret, float32(elem)) } default: diff --git a/internal/core/ngt/option.go b/internal/core/algorithm/ngt/option.go similarity index 96% rename from internal/core/ngt/option.go rename to internal/core/algorithm/ngt/option.go index 87e54b5d9b..f4950b60c3 100644 --- a/internal/core/ngt/option.go +++ b/internal/core/algorithm/ngt/option.go @@ -22,6 +22,7 @@ package ngt #include */ import "C" + import ( "strings" @@ -38,7 +39,7 @@ var ( defaultOpts = []Option{ WithIndexPath("/tmp/ngt-" + string(fastime.FormattedNow())), - WithDimension(0), + WithDimension(minimumDimensionSize), WithDefaultRadius(DefaultRadius), WithDefaultEpsilon(DefaultEpsilon), WithDefaultPoolSize(DefaultPoolSize), @@ -76,13 +77,16 @@ func WithBulkInsertChunkSize(size int) Option { func WithDimension(size int) Option { return func(n *ngt) error { - if size > dimensionLimit { - return errors.ErrDimensionLimitExceed(size, dimensionLimit) + if size > ngtVectorDimensionSizeLimit || size < minimumDimensionSize { + return errors.ErrInvalidDimensionSize(size, ngtVectorDimensionSizeLimit) } + if C.ngt_set_property_dimension(n.prop, C.int32_t(size), n.ebuf) == ErrorCode { return errors.ErrFailedToSetDimension(n.newGoError(n.ebuf)) } + n.dimension = C.int32_t(size) + return nil } } diff --git a/internal/core/ngt/util.go b/internal/core/algorithm/ngt/util.go similarity index 100% rename from internal/core/ngt/util.go rename to internal/core/algorithm/ngt/util.go diff --git a/internal/core/converter/tensorflow/option.go b/internal/core/converter/tensorflow/option.go index 70ed8e32ec..75a7371151 100644 --- a/internal/core/converter/tensorflow/option.go +++ b/internal/core/converter/tensorflow/option.go @@ -24,14 +24,12 @@ import ( // Option is tensorflow configure. type Option func(*tensorflow) -var ( - defaultOpts = []Option{ - withLoadFunc(tf.LoadSavedModel), // set to default - WithOperations(), // set to default - WithSessionOptions(nil), // set to default - WithNdim(0), // set to default - } -) +var defaultOpts = []Option{ + withLoadFunc(tf.LoadSavedModel), // set to default + WithOperations(), // set to default + WithSessionOptions(nil), // set to default + WithNdim(0), // set to default +} // WithSessionOptions returns Option that sets options. func WithSessionOptions(opts *SessionOptions) Option { diff --git a/internal/core/converter/tensorflow/option_test.go b/internal/core/converter/tensorflow/option_test.go index 8a132da335..9e32df0c4f 100644 --- a/internal/core/converter/tensorflow/option_test.go +++ b/internal/core/converter/tensorflow/option_test.go @@ -1074,3 +1074,117 @@ func TestWithNdim(t *testing.T) { }) } } + +func Test_withLoadFunc(t *testing.T) { + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + loadFunc func(exportDir string, tags []string, options *SessionOptions) (*tf.SavedModel, error) + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got error = %v, want %v", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got = %v, want %v", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got = %v, want %v", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + loadFunc: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + loadFunc: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := withLoadFunc(test.args.loadFunc) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := withLoadFunc(test.args.loadFunc) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/internal/db/kvs/redis/hook.go b/internal/db/kvs/redis/hook.go index 1d88cfbd60..972ffc9162 100644 --- a/internal/db/kvs/redis/hook.go +++ b/internal/db/kvs/redis/hook.go @@ -20,5 +20,7 @@ import ( redis "github.com/go-redis/redis/v7" ) -type Hook = redis.Hook -type Cmder = redis.Cmder +type ( + Hook = redis.Hook + Cmder = redis.Cmder +) diff --git a/internal/db/kvs/redis/option.go b/internal/db/kvs/redis/option.go index a62c123d08..7d61f6843f 100644 --- a/internal/db/kvs/redis/option.go +++ b/internal/db/kvs/redis/option.go @@ -31,12 +31,10 @@ import ( // Option represents the functional option for redisClient. type Option func(*redisClient) error -var ( - defaultOpts = []Option{ - WithInitialPingDuration("30ms"), - WithInitialPingTimeLimit("5m"), - } -) +var defaultOpts = []Option{ + WithInitialPingDuration("30ms"), + WithInitialPingTimeLimit("5m"), +} // WithDialer returns the option to set the dialer. func WithDialer(der tcp.Dialer) Option { @@ -362,7 +360,7 @@ func WithInitialPingDuration(dur string) Option { } } -// WithHooks returns the option to add hooks +// WithHooks returns the option to add hooks. func WithHooks(hooks ...Hook) Option { return func(r *redisClient) error { if hooks == nil { diff --git a/internal/db/kvs/redis/redis.go b/internal/db/kvs/redis/redis.go index b43de02302..777eb60453 100644 --- a/internal/db/kvs/redis/redis.go +++ b/internal/db/kvs/redis/redis.go @@ -29,10 +29,8 @@ import ( "github.com/vdaas/vald/internal/net/tcp" ) -var ( - // Nil is a type alias of redis.Nil. - Nil = redis.Nil -) +// Nil is a type alias of redis.Nil. +var Nil = redis.Nil // Connector is an interface to connect to Redis servers. type Connector interface { diff --git a/internal/db/kvs/redis/redis_test.go b/internal/db/kvs/redis/redis_test.go index cb26287842..5b8c94bfd2 100644 --- a/internal/db/kvs/redis/redis_test.go +++ b/internal/db/kvs/redis/redis_test.go @@ -35,14 +35,12 @@ import ( "go.uber.org/goleak" ) -var ( - // Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper"), - goleak.IgnoreTopFunction("github.com/go-redis/redis/v7.(*ClusterClient).reaper"), - } -) +// Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), + goleak.IgnoreTopFunction("github.com/go-redis/redis/v7/internal/pool.(*ConnPool).reaper"), + goleak.IgnoreTopFunction("github.com/go-redis/redis/v7.(*ClusterClient).reaper"), +} func TestMain(m *testing.M) { log.Init() @@ -631,7 +629,6 @@ func Test_redisClient_newSentinelClient(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -878,7 +875,6 @@ func Test_redisClient_newClusterClient(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1037,7 +1033,6 @@ func Test_redisClient_Connect(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/db/nosql/cassandra/cassandra_mock.go b/internal/db/nosql/cassandra/cassandra_mock.go index 8a741d4e01..4896ad6787 100644 --- a/internal/db/nosql/cassandra/cassandra_mock.go +++ b/internal/db/nosql/cassandra/cassandra_mock.go @@ -1,3 +1,18 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// package cassandra import "github.com/gocql/gocql" diff --git a/internal/db/nosql/cassandra/cassandra_test.go b/internal/db/nosql/cassandra/cassandra_test.go index 90ca6e7622..9833da1216 100644 --- a/internal/db/nosql/cassandra/cassandra_test.go +++ b/internal/db/nosql/cassandra/cassandra_test.go @@ -34,58 +34,56 @@ import ( "go.uber.org/goleak" ) -var ( - // default comparator option for client - clientComparatorOpts = []comparator.Option{ - comparator.AllowUnexported(client{}), - comparator.AllowUnexported(gocql.ClusterConfig{}), - comparator.Comparer(func(x, y retryPolicy) bool { - return reflect.DeepEqual(x, y) - }), - comparator.Comparer(func(x, y reconnectionPolicy) bool { - return reflect.DeepEqual(x, y) - }), - comparator.Comparer(func(x, y poolConfig) bool { - return reflect.DeepEqual(x, y) - }), - comparator.Comparer(func(x, y hostFilter) bool { - return reflect.DeepEqual(x, y) - }), - comparator.Comparer(func(x, y gocql.PoolConfig) bool { - return reflect.DeepEqual(x, y) - }), - comparator.Comparer(func(x, y gocql.HostSelectionPolicy) bool { - return reflect.DeepEqual(x, y) - }), - comparator.Comparer(func(x, y func(h *gocql.HostInfo) (gocql.Authenticator, error)) bool { - if (x == nil && y != nil) || (x != nil && y == nil) { - return false - } - if x == nil && y == nil { - return true - } - return reflect.ValueOf(x).Pointer() == reflect.ValueOf(y).Pointer() - }), - comparator.Comparer(func(x, y gocql.HostFilter) bool { - if (x == nil && y != nil) || (x != nil && y == nil) { - return false - } - if x == nil && y == nil { - return true - } +// default comparator option for client +var clientComparatorOpts = []comparator.Option{ + comparator.AllowUnexported(client{}), + comparator.AllowUnexported(gocql.ClusterConfig{}), + comparator.Comparer(func(x, y retryPolicy) bool { + return reflect.DeepEqual(x, y) + }), + comparator.Comparer(func(x, y reconnectionPolicy) bool { + return reflect.DeepEqual(x, y) + }), + comparator.Comparer(func(x, y poolConfig) bool { + return reflect.DeepEqual(x, y) + }), + comparator.Comparer(func(x, y hostFilter) bool { + return reflect.DeepEqual(x, y) + }), + comparator.Comparer(func(x, y gocql.PoolConfig) bool { + return reflect.DeepEqual(x, y) + }), + comparator.Comparer(func(x, y gocql.HostSelectionPolicy) bool { + return reflect.DeepEqual(x, y) + }), + comparator.Comparer(func(x, y func(h *gocql.HostInfo) (gocql.Authenticator, error)) bool { + if (x == nil && y != nil) || (x != nil && y == nil) { + return false + } + if x == nil && y == nil { + return true + } + return reflect.ValueOf(x).Pointer() == reflect.ValueOf(y).Pointer() + }), + comparator.Comparer(func(x, y gocql.HostFilter) bool { + if (x == nil && y != nil) || (x != nil && y == nil) { + return false + } + if x == nil && y == nil { + return true + } - switch x.(type) { - case gocql.HostFilterFunc: - return true - } - return reflect.ValueOf(x).Pointer() == reflect.ValueOf(y).Pointer() - }), + switch x.(type) { + case gocql.HostFilterFunc: + return true + } + return reflect.ValueOf(x).Pointer() == reflect.ValueOf(y).Pointer() + }), - comparator.Comparer(func(x, y tls.Config) bool { - return reflect.DeepEqual(x, y) - }), - } -) + comparator.Comparer(func(x, y tls.Config) bool { + return reflect.DeepEqual(x, y) + }), +} func TestMain(m *testing.M) { log.Init() @@ -1149,7 +1147,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1343,7 +1340,6 @@ func Test_client_Open(t *testing.T) { if err := test.checkFunc(c, test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1503,7 +1499,6 @@ func Test_client_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1680,7 +1675,6 @@ func Test_client_Query(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1762,7 +1756,6 @@ func TestSelect(t *testing.T) { if err := test.checkFunc(test.want, gotStmt, gotNames); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1834,7 +1827,6 @@ func TestDelete(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1891,7 +1883,6 @@ func TestInsert(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1946,7 +1937,6 @@ func TestUpdate(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1994,7 +1984,6 @@ func TestBatch(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -2049,7 +2038,6 @@ func TestEq(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -2104,7 +2092,6 @@ func TestIn(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -2159,7 +2146,6 @@ func TestContains(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -2333,7 +2319,6 @@ func TestWrapErrorWithKeys(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/db/nosql/cassandra/conviction_test.go b/internal/db/nosql/cassandra/conviction_test.go index 773ad49049..c32ea7713e 100644 --- a/internal/db/nosql/cassandra/conviction_test.go +++ b/internal/db/nosql/cassandra/conviction_test.go @@ -70,7 +70,6 @@ func TestNewConvictionPolicy(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -133,7 +132,6 @@ func Test_convictionPolicy_AddFailure(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/db/nosql/cassandra/observer.go b/internal/db/nosql/cassandra/observer.go index be7a8c0ec9..01713d3b63 100644 --- a/internal/db/nosql/cassandra/observer.go +++ b/internal/db/nosql/cassandra/observer.go @@ -20,8 +20,10 @@ import ( "github.com/gocql/gocql" ) -type QueryObserver = gocql.QueryObserver -type ObservedQuery = gocql.ObservedQuery +type ( + QueryObserver = gocql.QueryObserver + ObservedQuery = gocql.ObservedQuery +) type BatchObserver = gocql.BatchObserver diff --git a/internal/db/nosql/cassandra/option.go b/internal/db/nosql/cassandra/option.go index fe8e15fae6..5da6d28d45 100644 --- a/internal/db/nosql/cassandra/option.go +++ b/internal/db/nosql/cassandra/option.go @@ -34,38 +34,36 @@ import ( // https://pkg.go.dev/github.com/gocql/gocql?tab=doc#ClusterConfig type Option func(*client) error -var ( - defaultOpts = []Option{ - WithCQLVersion("3.0.0"), - WithConnectTimeout("600ms"), - WithConsistency(cQuorumKey), - WithDCAwareRouting(false), - WithDefaultIdempotence(false), - WithDefaultTimestamp(true), - WithDisableInitialHostLookup(false), - WithDisableNodeStatusEvents(false), - WithDisableSkipMetadata(false), - WithDisableTopologyEvents(false), - WithEnableHostVerification(false), - WithIgnorePeerAddr(false), - WithMaxPreparedStmts(1000), - WithMaxRoutingKeyInfo(1000), - WithMaxWaitSchemaAgreement("1m"), - WithNonLocalReplicasFallback(false), - WithNumConns(2), - WithPageSize(5000), - WithPort(9042), - WithProtoVersion(0), - WithReconnectInterval("1m"), - WithSerialConsistency(scLocalSerialKey), - WithShuffleReplicas(false), - WithTimeout("600ms"), - WithTokenAwareHostPolicy(true), - WithWriteCoalesceWaitTime("200µs"), - } -) - -// WithHosts returns the option to set the hosts +var defaultOpts = []Option{ + WithCQLVersion("3.0.0"), + WithConnectTimeout("600ms"), + WithConsistency(cQuorumKey), + WithDCAwareRouting(false), + WithDefaultIdempotence(false), + WithDefaultTimestamp(true), + WithDisableInitialHostLookup(false), + WithDisableNodeStatusEvents(false), + WithDisableSkipMetadata(false), + WithDisableTopologyEvents(false), + WithEnableHostVerification(false), + WithIgnorePeerAddr(false), + WithMaxPreparedStmts(1000), + WithMaxRoutingKeyInfo(1000), + WithMaxWaitSchemaAgreement("1m"), + WithNonLocalReplicasFallback(false), + WithNumConns(2), + WithPageSize(5000), + WithPort(9042), + WithProtoVersion(0), + WithReconnectInterval("1m"), + WithSerialConsistency(scLocalSerialKey), + WithShuffleReplicas(false), + WithTimeout("600ms"), + WithTokenAwareHostPolicy(true), + WithWriteCoalesceWaitTime("200µs"), +} + +// WithHosts returns the option to set the hosts. func WithHosts(hosts ...string) Option { return func(c *client) error { if len(hosts) == 0 { @@ -80,7 +78,7 @@ func WithHosts(hosts ...string) Option { } } -// WithDialer returns the option to set the dialer +// WithDialer returns the option to set the dialer. func WithDialer(der gocql.Dialer) Option { return func(c *client) error { if der == nil { @@ -91,7 +89,7 @@ func WithDialer(der gocql.Dialer) Option { } } -// WithCQLVersion returns the option to set the CQL version +// WithCQLVersion returns the option to set the CQL version. func WithCQLVersion(version string) Option { return func(c *client) error { if len(version) == 0 { @@ -102,7 +100,7 @@ func WithCQLVersion(version string) Option { } } -// WithProtoVersion returns the option to set the proto version +// WithProtoVersion returns the option to set the proto version. func WithProtoVersion(version int) Option { return func(c *client) error { if version < 0 { @@ -113,7 +111,7 @@ func WithProtoVersion(version int) Option { } } -// WithTimeout returns the option to set the cassandra connect timeout time +// WithTimeout returns the option to set the cassandra connect timeout time. func WithTimeout(dur string) Option { return func(c *client) error { if len(dur) == 0 { @@ -128,7 +126,7 @@ func WithTimeout(dur string) Option { } } -// WithConnectTimeout returns the option to set the cassandra initial connection timeout +// WithConnectTimeout returns the option to set the cassandra initial connection timeout. func WithConnectTimeout(dur string) Option { return func(c *client) error { if len(dur) == 0 { @@ -144,7 +142,7 @@ func WithConnectTimeout(dur string) Option { } } -// WithPort returns the option to set the port number +// WithPort returns the option to set the port number. func WithPort(port int) Option { return func(c *client) error { if port <= 0 || port > math.MaxUint16 { @@ -155,7 +153,7 @@ func WithPort(port int) Option { } } -// WithKeyspace returns the option to set the keyspace +// WithKeyspace returns the option to set the keyspace. func WithKeyspace(keyspace string) Option { return func(c *client) error { if len(keyspace) == 0 { @@ -166,7 +164,7 @@ func WithKeyspace(keyspace string) Option { } } -// WithNumConns returns the option to set the number of connection per host +// WithNumConns returns the option to set the number of connection per host. func WithNumConns(numConns int) Option { return func(c *client) error { if numConns < 0 { @@ -201,7 +199,7 @@ var ( } ) -// WithConsistency returns the option to set the cassandra consistency level +// WithConsistency returns the option to set the cassandra consistency level. func WithConsistency(consistency string) Option { return func(c *client) error { if len(consistency) == 0 { @@ -225,7 +223,7 @@ var ( } ) -// WithSerialConsistency returns the option to set the cassandra serial consistency level +// WithSerialConsistency returns the option to set the cassandra serial consistency level. func WithSerialConsistency(consistency string) Option { return func(c *client) error { if len(consistency) == 0 { @@ -240,7 +238,7 @@ func WithSerialConsistency(consistency string) Option { } } -// WithCompressor returns the option to set the compressor +// WithCompressor returns the option to set the compressor. func WithCompressor(compressor gocql.Compressor) Option { return func(c *client) error { if compressor == nil { @@ -251,7 +249,7 @@ func WithCompressor(compressor gocql.Compressor) Option { } } -// WithUsername returns the option to set the username +// WithUsername returns the option to set the username. func WithUsername(username string) Option { return func(c *client) error { if len(username) == 0 { @@ -262,7 +260,7 @@ func WithUsername(username string) Option { } } -// WithPassword returns the option to set the password +// WithPassword returns the option to set the password. func WithPassword(password string) Option { return func(c *client) error { if len(password) == 0 { @@ -273,7 +271,7 @@ func WithPassword(password string) Option { } } -// WithAuthProvider returns the option to set the auth provider +// WithAuthProvider returns the option to set the auth provider. func WithAuthProvider(authProvider func(h *gocql.HostInfo) (gocql.Authenticator, error)) Option { return func(c *client) error { if authProvider == nil { @@ -284,7 +282,7 @@ func WithAuthProvider(authProvider func(h *gocql.HostInfo) (gocql.Authenticator, } } -// WithRetryPolicyNumRetries returns the option to set the number of retries +// WithRetryPolicyNumRetries returns the option to set the number of retries. func WithRetryPolicyNumRetries(n int) Option { return func(c *client) error { if n < 0 { @@ -295,7 +293,7 @@ func WithRetryPolicyNumRetries(n int) Option { } } -// WithRetryPolicyMinDuration returns the option to set the retry min duration +// WithRetryPolicyMinDuration returns the option to set the retry min duration. func WithRetryPolicyMinDuration(minDuration string) Option { return func(c *client) error { if len(minDuration) == 0 { @@ -310,7 +308,7 @@ func WithRetryPolicyMinDuration(minDuration string) Option { } } -// WithRetryPolicyMaxDuration returns the option to set the retry max duration +// WithRetryPolicyMaxDuration returns the option to set the retry max duration. func WithRetryPolicyMaxDuration(maxDuration string) Option { return func(c *client) error { if len(maxDuration) == 0 { @@ -325,7 +323,7 @@ func WithRetryPolicyMaxDuration(maxDuration string) Option { } } -// WithReconnectionPolicyInitialInterval returns the option to set the reconnect initial interval +// WithReconnectionPolicyInitialInterval returns the option to set the reconnect initial interval. func WithReconnectionPolicyInitialInterval(initialInterval string) Option { return func(c *client) error { if len(initialInterval) == 0 { @@ -340,7 +338,7 @@ func WithReconnectionPolicyInitialInterval(initialInterval string) Option { } } -// WithReconnectionPolicyMaxRetries returns the option to set the reconnect max retries +// WithReconnectionPolicyMaxRetries returns the option to set the reconnect max retries. func WithReconnectionPolicyMaxRetries(maxRetries int) Option { return func(c *client) error { if maxRetries < 0 { @@ -351,7 +349,7 @@ func WithReconnectionPolicyMaxRetries(maxRetries int) Option { } } -// WithSocketKeepalive returns the option to set the socket keepalive time +// WithSocketKeepalive returns the option to set the socket keepalive time. func WithSocketKeepalive(socketKeepalive string) Option { return func(c *client) error { if len(socketKeepalive) == 0 { @@ -366,7 +364,7 @@ func WithSocketKeepalive(socketKeepalive string) Option { } } -// WithMaxPreparedStmts returns the option to set the max prepared statement +// WithMaxPreparedStmts returns the option to set the max prepared statement. func WithMaxPreparedStmts(maxPreparedStmts int) Option { return func(c *client) error { if maxPreparedStmts < 0 { @@ -377,7 +375,7 @@ func WithMaxPreparedStmts(maxPreparedStmts int) Option { } } -// WithMaxRoutingKeyInfo returns the option to set the max routing key info +// WithMaxRoutingKeyInfo returns the option to set the max routing key info. func WithMaxRoutingKeyInfo(maxRoutingKeyInfo int) Option { return func(c *client) error { if maxRoutingKeyInfo < 0 { @@ -388,7 +386,7 @@ func WithMaxRoutingKeyInfo(maxRoutingKeyInfo int) Option { } } -// WithPageSize returns the option to set the page size +// WithPageSize returns the option to set the page size. func WithPageSize(pageSize int) Option { return func(c *client) error { if pageSize < 0 { @@ -399,7 +397,7 @@ func WithPageSize(pageSize int) Option { } } -// WithTLS returns the option to set the TLS config +// WithTLS returns the option to set the TLS config. func WithTLS(tls *tls.Config) Option { return func(c *client) error { if tls == nil { @@ -410,7 +408,7 @@ func WithTLS(tls *tls.Config) Option { } } -// WithTLSCertPath returns the option to set the TLS cert path +// WithTLSCertPath returns the option to set the TLS cert path. func WithTLSCertPath(certPath string) Option { return func(c *client) error { if len(certPath) == 0 { @@ -421,7 +419,7 @@ func WithTLSCertPath(certPath string) Option { } } -// WithTLSKeyPath returns the option to set the TLS key path +// WithTLSKeyPath returns the option to set the TLS key path. func WithTLSKeyPath(keyPath string) Option { return func(c *client) error { if len(keyPath) == 0 { @@ -432,7 +430,7 @@ func WithTLSKeyPath(keyPath string) Option { } } -// WithTLSCAPath returns the option to set the TLS CA path +// WithTLSCAPath returns the option to set the TLS CA path. func WithTLSCAPath(caPath string) Option { return func(c *client) error { if len(caPath) == 0 { @@ -443,7 +441,7 @@ func WithTLSCAPath(caPath string) Option { } } -// WithEnableHostVerification returns the option to set the host verification enable flag +// WithEnableHostVerification returns the option to set the host verification enable flag. func WithEnableHostVerification(enableHostVerification bool) Option { return func(c *client) error { c.enableHostVerification = enableHostVerification @@ -451,7 +449,7 @@ func WithEnableHostVerification(enableHostVerification bool) Option { } } -// WithDefaultTimestamp returns the option to set the default timestamp enable flag +// WithDefaultTimestamp returns the option to set the default timestamp enable flag. func WithDefaultTimestamp(defaultTimestamp bool) Option { return func(c *client) error { c.defaultTimestamp = defaultTimestamp @@ -459,7 +457,7 @@ func WithDefaultTimestamp(defaultTimestamp bool) Option { } } -// WithDC returns the option to set the data center name +// WithDC returns the option to set the data center name. func WithDC(name string) Option { return func(c *client) error { if len(name) == 0 { @@ -470,7 +468,7 @@ func WithDC(name string) Option { } } -// WithDCAwareRouting returns the option to set the data center aware routing enable flag +// WithDCAwareRouting returns the option to set the data center aware routing enable flag. func WithDCAwareRouting(dcAwareRouting bool) Option { return func(c *client) error { c.poolConfig.enableDCAwareRouting = dcAwareRouting @@ -478,7 +476,7 @@ func WithDCAwareRouting(dcAwareRouting bool) Option { } } -// WithNonLocalReplicasFallback returns the option to set the non local replicas fallback enable flag +// WithNonLocalReplicasFallback returns the option to set the non local replicas fallback enable flag. func WithNonLocalReplicasFallback(nonLocalReplicasFallBack bool) Option { return func(c *client) error { c.poolConfig.enableNonLocalReplicasFallback = nonLocalReplicasFallBack @@ -486,7 +484,7 @@ func WithNonLocalReplicasFallback(nonLocalReplicasFallBack bool) Option { } } -// WithShuffleReplicas returns the option to set the shuffle replicas enable flag +// WithShuffleReplicas returns the option to set the shuffle replicas enable flag. func WithShuffleReplicas(shuffleReplicas bool) Option { return func(c *client) error { c.poolConfig.enableShuffleReplicas = shuffleReplicas @@ -494,7 +492,7 @@ func WithShuffleReplicas(shuffleReplicas bool) Option { } } -// WithTokenAwareHostPolicy returns the option to set the token aware host policy enable flag +// WithTokenAwareHostPolicy returns the option to set the token aware host policy enable flag. func WithTokenAwareHostPolicy(tokenAwareHostPolicy bool) Option { return func(c *client) error { c.poolConfig.enableTokenAwareHostPolicy = tokenAwareHostPolicy @@ -502,7 +500,7 @@ func WithTokenAwareHostPolicy(tokenAwareHostPolicy bool) Option { } } -// WithMaxWaitSchemaAgreement returns the option to set the max wait schema agreement +// WithMaxWaitSchemaAgreement returns the option to set the max wait schema agreement. func WithMaxWaitSchemaAgreement(maxWaitSchemaAgreement string) Option { return func(c *client) error { if len(maxWaitSchemaAgreement) == 0 { @@ -517,7 +515,7 @@ func WithMaxWaitSchemaAgreement(maxWaitSchemaAgreement string) Option { } } -// WithReconnectInterval returns the option to set the reconnect interval +// WithReconnectInterval returns the option to set the reconnect interval. func WithReconnectInterval(reconnectInterval string) Option { return func(c *client) error { if len(reconnectInterval) == 0 { @@ -532,7 +530,7 @@ func WithReconnectInterval(reconnectInterval string) Option { } } -// WithIgnorePeerAddr returns the option to set ignore peer address flag +// WithIgnorePeerAddr returns the option to set ignore peer address flag. func WithIgnorePeerAddr(ignorePeerAddr bool) Option { return func(c *client) error { c.ignorePeerAddr = ignorePeerAddr @@ -540,7 +538,7 @@ func WithIgnorePeerAddr(ignorePeerAddr bool) Option { } } -// WithDisableInitialHostLookup returns the option to set disable initial host lookup flag +// WithDisableInitialHostLookup returns the option to set disable initial host lookup flag. func WithDisableInitialHostLookup(disableInitialHostLookup bool) Option { return func(c *client) error { c.disableInitialHostLookup = disableInitialHostLookup @@ -548,7 +546,7 @@ func WithDisableInitialHostLookup(disableInitialHostLookup bool) Option { } } -// WithDisableNodeStatusEvents returns the option to set disable node status events flag +// WithDisableNodeStatusEvents returns the option to set disable node status events flag. func WithDisableNodeStatusEvents(disableNodeStatusEvents bool) Option { return func(c *client) error { c.disableNodeStatusEvents = disableNodeStatusEvents @@ -556,7 +554,7 @@ func WithDisableNodeStatusEvents(disableNodeStatusEvents bool) Option { } } -// WithDisableTopologyEvents returns the option to set disable topology events flag +// WithDisableTopologyEvents returns the option to set disable topology events flag. func WithDisableTopologyEvents(disableTopologyEvents bool) Option { return func(c *client) error { c.disableTopologyEvents = disableTopologyEvents @@ -564,7 +562,7 @@ func WithDisableTopologyEvents(disableTopologyEvents bool) Option { } } -// WithDisableSchemaEvents returns the option to set disable schema events flag +// WithDisableSchemaEvents returns the option to set disable schema events flag. func WithDisableSchemaEvents(disableSchemaEvents bool) Option { return func(c *client) error { c.disableSchemaEvents = disableSchemaEvents @@ -572,7 +570,7 @@ func WithDisableSchemaEvents(disableSchemaEvents bool) Option { } } -// WithDisableSkipMetadata returns the option to set disable skip metadata flag +// WithDisableSkipMetadata returns the option to set disable skip metadata flag. func WithDisableSkipMetadata(disableSkipMetadata bool) Option { return func(c *client) error { c.disableSkipMetadata = disableSkipMetadata @@ -580,7 +578,7 @@ func WithDisableSkipMetadata(disableSkipMetadata bool) Option { } } -// WithQueryObserver returns the option to set query observer +// WithQueryObserver returns the option to set query observer. func WithQueryObserver(obs QueryObserver) Option { return func(c *client) error { if obs == nil { @@ -592,7 +590,7 @@ func WithQueryObserver(obs QueryObserver) Option { } } -// WithBatchObserver returns the option to set batch observer +// WithBatchObserver returns the option to set batch observer. func WithBatchObserver(obs BatchObserver) Option { return func(c *client) error { if obs == nil { @@ -604,7 +602,7 @@ func WithBatchObserver(obs BatchObserver) Option { } } -// WithConnectObserver returns the option to set connect observer +// WithConnectObserver returns the option to set connect observer. func WithConnectObserver(obs ConnectObserver) Option { return func(c *client) error { if obs == nil { @@ -616,7 +614,7 @@ func WithConnectObserver(obs ConnectObserver) Option { } } -// WithFrameHeaderObserver returns the option to set FrameHeader observer +// WithFrameHeaderObserver returns the option to set FrameHeader observer. func WithFrameHeaderObserver(obs FrameHeaderObserver) Option { return func(c *client) error { if obs == nil { @@ -628,7 +626,7 @@ func WithFrameHeaderObserver(obs FrameHeaderObserver) Option { } } -// WithDefaultIdempotence returns the option to set default idempotence flag +// WithDefaultIdempotence returns the option to set default idempotence flag. func WithDefaultIdempotence(defaultIdempotence bool) Option { return func(c *client) error { c.defaultIdempotence = defaultIdempotence @@ -636,7 +634,7 @@ func WithDefaultIdempotence(defaultIdempotence bool) Option { } } -// WithWriteCoalesceWaitTime returns the option to set the write coalesce wait time +// WithWriteCoalesceWaitTime returns the option to set the write coalesce wait time. func WithWriteCoalesceWaitTime(writeCoalesceWaitTime string) Option { return func(c *client) error { if len(writeCoalesceWaitTime) == 0 { @@ -651,7 +649,7 @@ func WithWriteCoalesceWaitTime(writeCoalesceWaitTime string) Option { } } -// WithHostFilter returns the option to set the host filter enable flag +// WithHostFilter returns the option to set the host filter enable flag. func WithHostFilter(flg bool) Option { return func(c *client) error { c.hostFilter.enable = flg @@ -659,7 +657,7 @@ func WithHostFilter(flg bool) Option { } } -// WithDCHostFilter returns the option to set the DC host filter +// WithDCHostFilter returns the option to set the DC host filter. func WithDCHostFilter(dc string) Option { return func(c *client) error { if len(dc) == 0 { @@ -673,7 +671,7 @@ func WithDCHostFilter(dc string) Option { } } -// WithWhiteListHostFilter returns the option to set the white list host filter +// WithWhiteListHostFilter returns the option to set the white list host filter. func WithWhiteListHostFilter(list []string) Option { return func(c *client) error { if len(list) <= 0 { diff --git a/internal/db/nosql/cassandra/option_test.go b/internal/db/nosql/cassandra/option_test.go index a0990266ea..4e398fdafa 100644 --- a/internal/db/nosql/cassandra/option_test.go +++ b/internal/db/nosql/cassandra/option_test.go @@ -46,12 +46,10 @@ type frameHeaderObserverImpl struct{} func (frameHeaderObserverImpl) ObserveFrameHeader(context.Context, gocql.ObservedFrameHeader) {} -var ( - // Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - } -) +// Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), +} func TestWithHosts(t *testing.T) { type T = client @@ -1062,6 +1060,7 @@ func TestWithSerialConsistency(t *testing.T) { }) } } + func TestWithCompressor(t *testing.T) { type T = client type args struct { @@ -3609,6 +3608,7 @@ func TestWithConnectObserver(t *testing.T) { }) } } + func TestWithFrameHeaderObserver(t *testing.T) { type T = client type args struct { @@ -3681,6 +3681,7 @@ func TestWithFrameHeaderObserver(t *testing.T) { }) } } + func TestWithDefaultIdempotence(t *testing.T) { type T = client type args struct { diff --git a/internal/db/rdb/mysql/dbr/connection_test.go b/internal/db/rdb/mysql/dbr/connection_test.go new file mode 100644 index 0000000000..7a7a0cbf30 --- /dev/null +++ b/internal/db/rdb/mysql/dbr/connection_test.go @@ -0,0 +1,359 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package dbr + +import ( + "reflect" + "testing" + "time" + + dbr "github.com/gocraft/dbr/v2" + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func Test_connection_NewSession(t *testing.T) { + t.Parallel() + type args struct { + event EventReceiver + } + type fields struct { + Connection *dbr.Connection + } + type want struct { + want Session + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, Session) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Session) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + event: nil, + }, + fields: fields { + Connection: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + event: nil, + }, + fields: fields { + Connection: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + conn := &connection{ + Connection: test.fields.Connection, + } + + got := conn.NewSession(test.args.event) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_connection_SetConnMaxLifetime(t *testing.T) { + t.Parallel() + type args struct { + d time.Duration + } + type fields struct { + Connection *dbr.Connection + } + type want struct { + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want) error { + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + d: nil, + }, + fields: fields { + Connection: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + d: nil, + }, + fields: fields { + Connection: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + conn := &connection{ + Connection: test.fields.Connection, + } + + conn.SetConnMaxLifetime(test.args.d) + if err := test.checkFunc(test.want); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_connection_SetMaxIdleConns(t *testing.T) { + t.Parallel() + type args struct { + n int + } + type fields struct { + Connection *dbr.Connection + } + type want struct { + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want) error { + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + n: 0, + }, + fields: fields { + Connection: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + n: 0, + }, + fields: fields { + Connection: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + conn := &connection{ + Connection: test.fields.Connection, + } + + conn.SetMaxIdleConns(test.args.n) + if err := test.checkFunc(test.want); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_connection_SetMaxOpenConns(t *testing.T) { + t.Parallel() + type args struct { + n int + } + type fields struct { + Connection *dbr.Connection + } + type want struct { + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want) error { + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + n: 0, + }, + fields: fields { + Connection: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + n: 0, + }, + fields: fields { + Connection: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + conn := &connection{ + Connection: test.fields.Connection, + } + + conn.SetMaxOpenConns(test.args.n) + if err := test.checkFunc(test.want); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/db/rdb/mysql/dbr/dbr_mock_test.go b/internal/db/rdb/mysql/dbr/dbr_mock_test.go new file mode 100644 index 0000000000..cc150e1327 --- /dev/null +++ b/internal/db/rdb/mysql/dbr/dbr_mock_test.go @@ -0,0 +1,2591 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package dbr + +import ( + "context" + "database/sql" + "reflect" + "testing" + "time" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestMockDBR_Open(t *testing.T) { + t.Parallel() + type args struct { + driver string + dsn string + log EventReceiver + } + type fields struct { + OpenFunc func(driver, dsn string, log EventReceiver) (Connection, error) + EqFunc func(col string, val interface{}) Builder + } + type want struct { + want Connection + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, Connection, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Connection, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + driver: "", + dsn: "", + log: nil, + }, + fields: fields { + OpenFunc: nil, + EqFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + driver: "", + dsn: "", + log: nil, + }, + fields: fields { + OpenFunc: nil, + EqFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + d := &MockDBR{ + OpenFunc: test.fields.OpenFunc, + EqFunc: test.fields.EqFunc, + } + + got, err := d.Open(test.args.driver, test.args.dsn, test.args.log) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockDBR_Eq(t *testing.T) { + t.Parallel() + type args struct { + col string + val interface{} + } + type fields struct { + OpenFunc func(driver, dsn string, log EventReceiver) (Connection, error) + EqFunc func(col string, val interface{}) Builder + } + type want struct { + want Builder + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, Builder) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Builder) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + col: "", + val: nil, + }, + fields: fields { + OpenFunc: nil, + EqFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + col: "", + val: nil, + }, + fields: fields { + OpenFunc: nil, + EqFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + d := &MockDBR{ + OpenFunc: test.fields.OpenFunc, + EqFunc: test.fields.EqFunc, + } + + got := d.Eq(test.args.col, test.args.val) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockSession_Select(t *testing.T) { + t.Parallel() + type args struct { + column []string + } + type fields struct { + SelectFunc func(column ...string) SelectStmt + BeginFunc func() (Tx, error) + CloseFunc func() error + PingContextFunc func(ctx context.Context) error + } + type want struct { + want SelectStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, SelectStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got SelectStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + column: nil, + }, + fields: fields { + SelectFunc: nil, + BeginFunc: nil, + CloseFunc: nil, + PingContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + column: nil, + }, + fields: fields { + SelectFunc: nil, + BeginFunc: nil, + CloseFunc: nil, + PingContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &MockSession{ + SelectFunc: test.fields.SelectFunc, + BeginFunc: test.fields.BeginFunc, + CloseFunc: test.fields.CloseFunc, + PingContextFunc: test.fields.PingContextFunc, + } + + got := s.Select(test.args.column...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockSession_Begin(t *testing.T) { + t.Parallel() + type fields struct { + SelectFunc func(column ...string) SelectStmt + BeginFunc func() (Tx, error) + CloseFunc func() error + PingContextFunc func(ctx context.Context) error + } + type want struct { + want Tx + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, Tx, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got Tx, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + SelectFunc: nil, + BeginFunc: nil, + CloseFunc: nil, + PingContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + SelectFunc: nil, + BeginFunc: nil, + CloseFunc: nil, + PingContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &MockSession{ + SelectFunc: test.fields.SelectFunc, + BeginFunc: test.fields.BeginFunc, + CloseFunc: test.fields.CloseFunc, + PingContextFunc: test.fields.PingContextFunc, + } + + got, err := s.Begin() + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockSession_Close(t *testing.T) { + t.Parallel() + type fields struct { + SelectFunc func(column ...string) SelectStmt + BeginFunc func() (Tx, error) + CloseFunc func() error + PingContextFunc func(ctx context.Context) error + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + SelectFunc: nil, + BeginFunc: nil, + CloseFunc: nil, + PingContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + SelectFunc: nil, + BeginFunc: nil, + CloseFunc: nil, + PingContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &MockSession{ + SelectFunc: test.fields.SelectFunc, + BeginFunc: test.fields.BeginFunc, + CloseFunc: test.fields.CloseFunc, + PingContextFunc: test.fields.PingContextFunc, + } + + err := s.Close() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockSession_PingContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + SelectFunc func(column ...string) SelectStmt + BeginFunc func() (Tx, error) + CloseFunc func() error + PingContextFunc func(ctx context.Context) error + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + SelectFunc: nil, + BeginFunc: nil, + CloseFunc: nil, + PingContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + SelectFunc: nil, + BeginFunc: nil, + CloseFunc: nil, + PingContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &MockSession{ + SelectFunc: test.fields.SelectFunc, + BeginFunc: test.fields.BeginFunc, + CloseFunc: test.fields.CloseFunc, + PingContextFunc: test.fields.PingContextFunc, + } + + err := s.PingContext(test.args.ctx) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockTx_Commit(t *testing.T) { + t.Parallel() + type fields struct { + CommitFunc func() error + RollbackFunc func() error + RollbackUnlessCommittedFunc func() + InsertBySqlFunc func(query string, value ...interface{}) InsertStmt + InsertIntoFunc func(table string) InsertStmt + SelectFunc func(column ...string) SelectStmt + DeleteFromFunc func(table string) DeleteStmt + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + CommitFunc: nil, + RollbackFunc: nil, + RollbackUnlessCommittedFunc: nil, + InsertBySqlFunc: nil, + InsertIntoFunc: nil, + SelectFunc: nil, + DeleteFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + CommitFunc: nil, + RollbackFunc: nil, + RollbackUnlessCommittedFunc: nil, + InsertBySqlFunc: nil, + InsertIntoFunc: nil, + SelectFunc: nil, + DeleteFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + t := &MockTx{ + CommitFunc: test.fields.CommitFunc, + RollbackFunc: test.fields.RollbackFunc, + RollbackUnlessCommittedFunc: test.fields.RollbackUnlessCommittedFunc, + InsertBySqlFunc: test.fields.InsertBySqlFunc, + InsertIntoFunc: test.fields.InsertIntoFunc, + SelectFunc: test.fields.SelectFunc, + DeleteFromFunc: test.fields.DeleteFromFunc, + } + + err := t.Commit() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockTx_Rollback(t *testing.T) { + t.Parallel() + type fields struct { + CommitFunc func() error + RollbackFunc func() error + RollbackUnlessCommittedFunc func() + InsertBySqlFunc func(query string, value ...interface{}) InsertStmt + InsertIntoFunc func(table string) InsertStmt + SelectFunc func(column ...string) SelectStmt + DeleteFromFunc func(table string) DeleteStmt + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + CommitFunc: nil, + RollbackFunc: nil, + RollbackUnlessCommittedFunc: nil, + InsertBySqlFunc: nil, + InsertIntoFunc: nil, + SelectFunc: nil, + DeleteFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + CommitFunc: nil, + RollbackFunc: nil, + RollbackUnlessCommittedFunc: nil, + InsertBySqlFunc: nil, + InsertIntoFunc: nil, + SelectFunc: nil, + DeleteFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + t := &MockTx{ + CommitFunc: test.fields.CommitFunc, + RollbackFunc: test.fields.RollbackFunc, + RollbackUnlessCommittedFunc: test.fields.RollbackUnlessCommittedFunc, + InsertBySqlFunc: test.fields.InsertBySqlFunc, + InsertIntoFunc: test.fields.InsertIntoFunc, + SelectFunc: test.fields.SelectFunc, + DeleteFromFunc: test.fields.DeleteFromFunc, + } + + err := t.Rollback() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockTx_RollbackUnlessCommitted(t *testing.T) { + t.Parallel() + type fields struct { + CommitFunc func() error + RollbackFunc func() error + RollbackUnlessCommittedFunc func() + InsertBySqlFunc func(query string, value ...interface{}) InsertStmt + InsertIntoFunc func(table string) InsertStmt + SelectFunc func(column ...string) SelectStmt + DeleteFromFunc func(table string) DeleteStmt + } + type want struct { + } + type test struct { + name string + fields fields + want want + checkFunc func(want) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want) error { + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + CommitFunc: nil, + RollbackFunc: nil, + RollbackUnlessCommittedFunc: nil, + InsertBySqlFunc: nil, + InsertIntoFunc: nil, + SelectFunc: nil, + DeleteFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + CommitFunc: nil, + RollbackFunc: nil, + RollbackUnlessCommittedFunc: nil, + InsertBySqlFunc: nil, + InsertIntoFunc: nil, + SelectFunc: nil, + DeleteFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + t := &MockTx{ + CommitFunc: test.fields.CommitFunc, + RollbackFunc: test.fields.RollbackFunc, + RollbackUnlessCommittedFunc: test.fields.RollbackUnlessCommittedFunc, + InsertBySqlFunc: test.fields.InsertBySqlFunc, + InsertIntoFunc: test.fields.InsertIntoFunc, + SelectFunc: test.fields.SelectFunc, + DeleteFromFunc: test.fields.DeleteFromFunc, + } + + t.RollbackUnlessCommitted() + if err := test.checkFunc(test.want); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockTx_InsertBySql(t *testing.T) { + t.Parallel() + type args struct { + query string + value []interface{} + } + type fields struct { + CommitFunc func() error + RollbackFunc func() error + RollbackUnlessCommittedFunc func() + InsertBySqlFunc func(query string, value ...interface{}) InsertStmt + InsertIntoFunc func(table string) InsertStmt + SelectFunc func(column ...string) SelectStmt + DeleteFromFunc func(table string) DeleteStmt + } + type want struct { + want InsertStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, InsertStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got InsertStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + query: "", + value: nil, + }, + fields: fields { + CommitFunc: nil, + RollbackFunc: nil, + RollbackUnlessCommittedFunc: nil, + InsertBySqlFunc: nil, + InsertIntoFunc: nil, + SelectFunc: nil, + DeleteFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + query: "", + value: nil, + }, + fields: fields { + CommitFunc: nil, + RollbackFunc: nil, + RollbackUnlessCommittedFunc: nil, + InsertBySqlFunc: nil, + InsertIntoFunc: nil, + SelectFunc: nil, + DeleteFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + t := &MockTx{ + CommitFunc: test.fields.CommitFunc, + RollbackFunc: test.fields.RollbackFunc, + RollbackUnlessCommittedFunc: test.fields.RollbackUnlessCommittedFunc, + InsertBySqlFunc: test.fields.InsertBySqlFunc, + InsertIntoFunc: test.fields.InsertIntoFunc, + SelectFunc: test.fields.SelectFunc, + DeleteFromFunc: test.fields.DeleteFromFunc, + } + + got := t.InsertBySql(test.args.query, test.args.value...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockTx_InsertInto(t *testing.T) { + t.Parallel() + type args struct { + table string + } + type fields struct { + CommitFunc func() error + RollbackFunc func() error + RollbackUnlessCommittedFunc func() + InsertBySqlFunc func(query string, value ...interface{}) InsertStmt + InsertIntoFunc func(table string) InsertStmt + SelectFunc func(column ...string) SelectStmt + DeleteFromFunc func(table string) DeleteStmt + } + type want struct { + want InsertStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, InsertStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got InsertStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + table: "", + }, + fields: fields { + CommitFunc: nil, + RollbackFunc: nil, + RollbackUnlessCommittedFunc: nil, + InsertBySqlFunc: nil, + InsertIntoFunc: nil, + SelectFunc: nil, + DeleteFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + table: "", + }, + fields: fields { + CommitFunc: nil, + RollbackFunc: nil, + RollbackUnlessCommittedFunc: nil, + InsertBySqlFunc: nil, + InsertIntoFunc: nil, + SelectFunc: nil, + DeleteFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + t := &MockTx{ + CommitFunc: test.fields.CommitFunc, + RollbackFunc: test.fields.RollbackFunc, + RollbackUnlessCommittedFunc: test.fields.RollbackUnlessCommittedFunc, + InsertBySqlFunc: test.fields.InsertBySqlFunc, + InsertIntoFunc: test.fields.InsertIntoFunc, + SelectFunc: test.fields.SelectFunc, + DeleteFromFunc: test.fields.DeleteFromFunc, + } + + got := t.InsertInto(test.args.table) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockTx_Select(t *testing.T) { + t.Parallel() + type args struct { + column []string + } + type fields struct { + CommitFunc func() error + RollbackFunc func() error + RollbackUnlessCommittedFunc func() + InsertBySqlFunc func(query string, value ...interface{}) InsertStmt + InsertIntoFunc func(table string) InsertStmt + SelectFunc func(column ...string) SelectStmt + DeleteFromFunc func(table string) DeleteStmt + } + type want struct { + want SelectStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, SelectStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got SelectStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + column: nil, + }, + fields: fields { + CommitFunc: nil, + RollbackFunc: nil, + RollbackUnlessCommittedFunc: nil, + InsertBySqlFunc: nil, + InsertIntoFunc: nil, + SelectFunc: nil, + DeleteFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + column: nil, + }, + fields: fields { + CommitFunc: nil, + RollbackFunc: nil, + RollbackUnlessCommittedFunc: nil, + InsertBySqlFunc: nil, + InsertIntoFunc: nil, + SelectFunc: nil, + DeleteFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + t := &MockTx{ + CommitFunc: test.fields.CommitFunc, + RollbackFunc: test.fields.RollbackFunc, + RollbackUnlessCommittedFunc: test.fields.RollbackUnlessCommittedFunc, + InsertBySqlFunc: test.fields.InsertBySqlFunc, + InsertIntoFunc: test.fields.InsertIntoFunc, + SelectFunc: test.fields.SelectFunc, + DeleteFromFunc: test.fields.DeleteFromFunc, + } + + got := t.Select(test.args.column...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockTx_DeleteFrom(t *testing.T) { + t.Parallel() + type args struct { + table string + } + type fields struct { + CommitFunc func() error + RollbackFunc func() error + RollbackUnlessCommittedFunc func() + InsertBySqlFunc func(query string, value ...interface{}) InsertStmt + InsertIntoFunc func(table string) InsertStmt + SelectFunc func(column ...string) SelectStmt + DeleteFromFunc func(table string) DeleteStmt + } + type want struct { + want DeleteStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, DeleteStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got DeleteStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + table: "", + }, + fields: fields { + CommitFunc: nil, + RollbackFunc: nil, + RollbackUnlessCommittedFunc: nil, + InsertBySqlFunc: nil, + InsertIntoFunc: nil, + SelectFunc: nil, + DeleteFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + table: "", + }, + fields: fields { + CommitFunc: nil, + RollbackFunc: nil, + RollbackUnlessCommittedFunc: nil, + InsertBySqlFunc: nil, + InsertIntoFunc: nil, + SelectFunc: nil, + DeleteFromFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + t := &MockTx{ + CommitFunc: test.fields.CommitFunc, + RollbackFunc: test.fields.RollbackFunc, + RollbackUnlessCommittedFunc: test.fields.RollbackUnlessCommittedFunc, + InsertBySqlFunc: test.fields.InsertBySqlFunc, + InsertIntoFunc: test.fields.InsertIntoFunc, + SelectFunc: test.fields.SelectFunc, + DeleteFromFunc: test.fields.DeleteFromFunc, + } + + got := t.DeleteFrom(test.args.table) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockConn_NewSession(t *testing.T) { + t.Parallel() + type args struct { + event EventReceiver + } + type fields struct { + NewSessionFunc func(event EventReceiver) Session + SetConnMaxLifetimeFunc func(d time.Duration) + SetMaxIdleConnsFunc func(n int) + SetMaxOpenConnsFunc func(n int) + } + type want struct { + want Session + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, Session) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Session) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + event: nil, + }, + fields: fields { + NewSessionFunc: nil, + SetConnMaxLifetimeFunc: nil, + SetMaxIdleConnsFunc: nil, + SetMaxOpenConnsFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + event: nil, + }, + fields: fields { + NewSessionFunc: nil, + SetConnMaxLifetimeFunc: nil, + SetMaxIdleConnsFunc: nil, + SetMaxOpenConnsFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &MockConn{ + NewSessionFunc: test.fields.NewSessionFunc, + SetConnMaxLifetimeFunc: test.fields.SetConnMaxLifetimeFunc, + SetMaxIdleConnsFunc: test.fields.SetMaxIdleConnsFunc, + SetMaxOpenConnsFunc: test.fields.SetMaxOpenConnsFunc, + } + + got := c.NewSession(test.args.event) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockConn_SetConnMaxLifetime(t *testing.T) { + t.Parallel() + type args struct { + d time.Duration + } + type fields struct { + NewSessionFunc func(event EventReceiver) Session + SetConnMaxLifetimeFunc func(d time.Duration) + SetMaxIdleConnsFunc func(n int) + SetMaxOpenConnsFunc func(n int) + } + type want struct { + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want) error { + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + d: nil, + }, + fields: fields { + NewSessionFunc: nil, + SetConnMaxLifetimeFunc: nil, + SetMaxIdleConnsFunc: nil, + SetMaxOpenConnsFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + d: nil, + }, + fields: fields { + NewSessionFunc: nil, + SetConnMaxLifetimeFunc: nil, + SetMaxIdleConnsFunc: nil, + SetMaxOpenConnsFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &MockConn{ + NewSessionFunc: test.fields.NewSessionFunc, + SetConnMaxLifetimeFunc: test.fields.SetConnMaxLifetimeFunc, + SetMaxIdleConnsFunc: test.fields.SetMaxIdleConnsFunc, + SetMaxOpenConnsFunc: test.fields.SetMaxOpenConnsFunc, + } + + c.SetConnMaxLifetime(test.args.d) + if err := test.checkFunc(test.want); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockConn_SetMaxIdleConns(t *testing.T) { + t.Parallel() + type args struct { + n int + } + type fields struct { + NewSessionFunc func(event EventReceiver) Session + SetConnMaxLifetimeFunc func(d time.Duration) + SetMaxIdleConnsFunc func(n int) + SetMaxOpenConnsFunc func(n int) + } + type want struct { + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want) error { + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + n: 0, + }, + fields: fields { + NewSessionFunc: nil, + SetConnMaxLifetimeFunc: nil, + SetMaxIdleConnsFunc: nil, + SetMaxOpenConnsFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + n: 0, + }, + fields: fields { + NewSessionFunc: nil, + SetConnMaxLifetimeFunc: nil, + SetMaxIdleConnsFunc: nil, + SetMaxOpenConnsFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &MockConn{ + NewSessionFunc: test.fields.NewSessionFunc, + SetConnMaxLifetimeFunc: test.fields.SetConnMaxLifetimeFunc, + SetMaxIdleConnsFunc: test.fields.SetMaxIdleConnsFunc, + SetMaxOpenConnsFunc: test.fields.SetMaxOpenConnsFunc, + } + + c.SetMaxIdleConns(test.args.n) + if err := test.checkFunc(test.want); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockConn_SetMaxOpenConns(t *testing.T) { + t.Parallel() + type args struct { + n int + } + type fields struct { + NewSessionFunc func(event EventReceiver) Session + SetConnMaxLifetimeFunc func(d time.Duration) + SetMaxIdleConnsFunc func(n int) + SetMaxOpenConnsFunc func(n int) + } + type want struct { + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want) error { + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + n: 0, + }, + fields: fields { + NewSessionFunc: nil, + SetConnMaxLifetimeFunc: nil, + SetMaxIdleConnsFunc: nil, + SetMaxOpenConnsFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + n: 0, + }, + fields: fields { + NewSessionFunc: nil, + SetConnMaxLifetimeFunc: nil, + SetMaxIdleConnsFunc: nil, + SetMaxOpenConnsFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &MockConn{ + NewSessionFunc: test.fields.NewSessionFunc, + SetConnMaxLifetimeFunc: test.fields.SetConnMaxLifetimeFunc, + SetMaxIdleConnsFunc: test.fields.SetMaxIdleConnsFunc, + SetMaxOpenConnsFunc: test.fields.SetMaxOpenConnsFunc, + } + + c.SetMaxOpenConns(test.args.n) + if err := test.checkFunc(test.want); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockSelect_From(t *testing.T) { + t.Parallel() + type args struct { + table interface{} + } + type fields struct { + FromFunc func(table interface{}) SelectStmt + WhereFunc func(query interface{}, value ...interface{}) SelectStmt + LimitFunc func(n uint64) SelectStmt + LoadContextFunc func(ctx context.Context, value interface{}) (int, error) + } + type want struct { + want SelectStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, SelectStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got SelectStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + table: nil, + }, + fields: fields { + FromFunc: nil, + WhereFunc: nil, + LimitFunc: nil, + LoadContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + table: nil, + }, + fields: fields { + FromFunc: nil, + WhereFunc: nil, + LimitFunc: nil, + LoadContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &MockSelect{ + FromFunc: test.fields.FromFunc, + WhereFunc: test.fields.WhereFunc, + LimitFunc: test.fields.LimitFunc, + LoadContextFunc: test.fields.LoadContextFunc, + } + + got := s.From(test.args.table) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockSelect_Where(t *testing.T) { + t.Parallel() + type args struct { + query interface{} + value []interface{} + } + type fields struct { + FromFunc func(table interface{}) SelectStmt + WhereFunc func(query interface{}, value ...interface{}) SelectStmt + LimitFunc func(n uint64) SelectStmt + LoadContextFunc func(ctx context.Context, value interface{}) (int, error) + } + type want struct { + want SelectStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, SelectStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got SelectStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + query: nil, + value: nil, + }, + fields: fields { + FromFunc: nil, + WhereFunc: nil, + LimitFunc: nil, + LoadContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + query: nil, + value: nil, + }, + fields: fields { + FromFunc: nil, + WhereFunc: nil, + LimitFunc: nil, + LoadContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &MockSelect{ + FromFunc: test.fields.FromFunc, + WhereFunc: test.fields.WhereFunc, + LimitFunc: test.fields.LimitFunc, + LoadContextFunc: test.fields.LoadContextFunc, + } + + got := s.Where(test.args.query, test.args.value...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockSelect_Limit(t *testing.T) { + t.Parallel() + type args struct { + n uint64 + } + type fields struct { + FromFunc func(table interface{}) SelectStmt + WhereFunc func(query interface{}, value ...interface{}) SelectStmt + LimitFunc func(n uint64) SelectStmt + LoadContextFunc func(ctx context.Context, value interface{}) (int, error) + } + type want struct { + want SelectStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, SelectStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got SelectStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + n: 0, + }, + fields: fields { + FromFunc: nil, + WhereFunc: nil, + LimitFunc: nil, + LoadContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + n: 0, + }, + fields: fields { + FromFunc: nil, + WhereFunc: nil, + LimitFunc: nil, + LoadContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &MockSelect{ + FromFunc: test.fields.FromFunc, + WhereFunc: test.fields.WhereFunc, + LimitFunc: test.fields.LimitFunc, + LoadContextFunc: test.fields.LoadContextFunc, + } + + got := s.Limit(test.args.n) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockSelect_LoadContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + value interface{} + } + type fields struct { + FromFunc func(table interface{}) SelectStmt + WhereFunc func(query interface{}, value ...interface{}) SelectStmt + LimitFunc func(n uint64) SelectStmt + LoadContextFunc func(ctx context.Context, value interface{}) (int, error) + } + type want struct { + want int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + value: nil, + }, + fields: fields { + FromFunc: nil, + WhereFunc: nil, + LimitFunc: nil, + LoadContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + value: nil, + }, + fields: fields { + FromFunc: nil, + WhereFunc: nil, + LimitFunc: nil, + LoadContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &MockSelect{ + FromFunc: test.fields.FromFunc, + WhereFunc: test.fields.WhereFunc, + LimitFunc: test.fields.LimitFunc, + LoadContextFunc: test.fields.LoadContextFunc, + } + + got, err := s.LoadContext(test.args.ctx, test.args.value) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockInsert_Columns(t *testing.T) { + t.Parallel() + type args struct { + column []string + } + type fields struct { + ColumnsFunc func(column ...string) InsertStmt + ExecContextFunc func(ctx context.Context) (sql.Result, error) + RecordFunc func(structValue interface{}) InsertStmt + } + type want struct { + want InsertStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, InsertStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got InsertStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + column: nil, + }, + fields: fields { + ColumnsFunc: nil, + ExecContextFunc: nil, + RecordFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + column: nil, + }, + fields: fields { + ColumnsFunc: nil, + ExecContextFunc: nil, + RecordFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &MockInsert{ + ColumnsFunc: test.fields.ColumnsFunc, + ExecContextFunc: test.fields.ExecContextFunc, + RecordFunc: test.fields.RecordFunc, + } + + got := s.Columns(test.args.column...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockInsert_ExecContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + ColumnsFunc func(column ...string) InsertStmt + ExecContextFunc func(ctx context.Context) (sql.Result, error) + RecordFunc func(structValue interface{}) InsertStmt + } + type want struct { + want sql.Result + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, sql.Result, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got sql.Result, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + ColumnsFunc: nil, + ExecContextFunc: nil, + RecordFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + ColumnsFunc: nil, + ExecContextFunc: nil, + RecordFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &MockInsert{ + ColumnsFunc: test.fields.ColumnsFunc, + ExecContextFunc: test.fields.ExecContextFunc, + RecordFunc: test.fields.RecordFunc, + } + + got, err := s.ExecContext(test.args.ctx) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockInsert_Record(t *testing.T) { + t.Parallel() + type args struct { + structValue interface{} + } + type fields struct { + ColumnsFunc func(column ...string) InsertStmt + ExecContextFunc func(ctx context.Context) (sql.Result, error) + RecordFunc func(structValue interface{}) InsertStmt + } + type want struct { + want InsertStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, InsertStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got InsertStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + structValue: nil, + }, + fields: fields { + ColumnsFunc: nil, + ExecContextFunc: nil, + RecordFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + structValue: nil, + }, + fields: fields { + ColumnsFunc: nil, + ExecContextFunc: nil, + RecordFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &MockInsert{ + ColumnsFunc: test.fields.ColumnsFunc, + ExecContextFunc: test.fields.ExecContextFunc, + RecordFunc: test.fields.RecordFunc, + } + + got := s.Record(test.args.structValue) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockDelete_ExecContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + ExecContextFunc func(ctx context.Context) (sql.Result, error) + WhereFunc func(query interface{}, value ...interface{}) DeleteStmt + } + type want struct { + want sql.Result + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, sql.Result, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got sql.Result, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + ExecContextFunc: nil, + WhereFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + ExecContextFunc: nil, + WhereFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &MockDelete{ + ExecContextFunc: test.fields.ExecContextFunc, + WhereFunc: test.fields.WhereFunc, + } + + got, err := s.ExecContext(test.args.ctx) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockDelete_Where(t *testing.T) { + t.Parallel() + type args struct { + query interface{} + value []interface{} + } + type fields struct { + ExecContextFunc func(ctx context.Context) (sql.Result, error) + WhereFunc func(query interface{}, value ...interface{}) DeleteStmt + } + type want struct { + want DeleteStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, DeleteStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got DeleteStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + query: nil, + value: nil, + }, + fields: fields { + ExecContextFunc: nil, + WhereFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + query: nil, + value: nil, + }, + fields: fields { + ExecContextFunc: nil, + WhereFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &MockDelete{ + ExecContextFunc: test.fields.ExecContextFunc, + WhereFunc: test.fields.WhereFunc, + } + + got := s.Where(test.args.query, test.args.value...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/db/rdb/mysql/dbr/dbr_test.go b/internal/db/rdb/mysql/dbr/dbr_test.go new file mode 100644 index 0000000000..1fd62c6e5c --- /dev/null +++ b/internal/db/rdb/mysql/dbr/dbr_test.go @@ -0,0 +1,251 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package dbr + +import ( + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type want struct { + want DBR + } + type test struct { + name string + want want + checkFunc func(want, DBR) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got DBR) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := New() + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_db_Open(t *testing.T) { + t.Parallel() + type args struct { + driver string + dsn string + log EventReceiver + } + type want struct { + want Connection + err error + } + type test struct { + name string + args args + d *db + want want + checkFunc func(want, Connection, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Connection, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + driver: "", + dsn: "", + log: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + driver: "", + dsn: "", + log: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + d := &db{} + + got, err := d.Open(test.args.driver, test.args.dsn, test.args.log) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_db_Eq(t *testing.T) { + t.Parallel() + type args struct { + col string + val interface{} + } + type want struct { + want Builder + } + type test struct { + name string + args args + d *db + want want + checkFunc func(want, Builder) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Builder) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + col: "", + val: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + col: "", + val: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + d := &db{} + + got := d.Eq(test.args.col, test.args.val) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/db/rdb/mysql/dbr/delete_test.go b/internal/db/rdb/mysql/dbr/delete_test.go new file mode 100644 index 0000000000..6dfb3382fb --- /dev/null +++ b/internal/db/rdb/mysql/dbr/delete_test.go @@ -0,0 +1,207 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package dbr + +import ( + "context" + "database/sql" + "reflect" + "testing" + + dbr "github.com/gocraft/dbr/v2" + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func Test_deleteStmt_ExecContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + DeleteStmt *dbr.DeleteStmt + } + type want struct { + want sql.Result + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, sql.Result, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got sql.Result, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + DeleteStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + DeleteStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + stmt := &deleteStmt{ + DeleteStmt: test.fields.DeleteStmt, + } + + got, err := stmt.ExecContext(test.args.ctx) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_deleteStmt_Where(t *testing.T) { + t.Parallel() + type args struct { + query interface{} + value []interface{} + } + type fields struct { + DeleteStmt *dbr.DeleteStmt + } + type want struct { + want DeleteStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, DeleteStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got DeleteStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + query: nil, + value: nil, + }, + fields: fields { + DeleteStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + query: nil, + value: nil, + }, + fields: fields { + DeleteStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + stmt := &deleteStmt{ + DeleteStmt: test.fields.DeleteStmt, + } + + got := stmt.Where(test.args.query, test.args.value...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/db/rdb/mysql/dbr/insert_test.go b/internal/db/rdb/mysql/dbr/insert_test.go new file mode 100644 index 0000000000..886f04c0ac --- /dev/null +++ b/internal/db/rdb/mysql/dbr/insert_test.go @@ -0,0 +1,290 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package dbr + +import ( + "context" + "database/sql" + "reflect" + "testing" + + dbr "github.com/gocraft/dbr/v2" + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func Test_insertStmt_Columns(t *testing.T) { + t.Parallel() + type args struct { + column []string + } + type fields struct { + InsertStmt *dbr.InsertStmt + } + type want struct { + want InsertStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, InsertStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got InsertStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + column: nil, + }, + fields: fields { + InsertStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + column: nil, + }, + fields: fields { + InsertStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + stmt := &insertStmt{ + InsertStmt: test.fields.InsertStmt, + } + + got := stmt.Columns(test.args.column...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_insertStmt_ExecContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + InsertStmt *dbr.InsertStmt + } + type want struct { + want sql.Result + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, sql.Result, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got sql.Result, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + InsertStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + InsertStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + stmt := &insertStmt{ + InsertStmt: test.fields.InsertStmt, + } + + got, err := stmt.ExecContext(test.args.ctx) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_insertStmt_Record(t *testing.T) { + t.Parallel() + type args struct { + structValue interface{} + } + type fields struct { + InsertStmt *dbr.InsertStmt + } + type want struct { + want InsertStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, InsertStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got InsertStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + structValue: nil, + }, + fields: fields { + InsertStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + structValue: nil, + }, + fields: fields { + InsertStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + stmt := &insertStmt{ + InsertStmt: test.fields.InsertStmt, + } + + got := stmt.Record(test.args.structValue) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/db/rdb/mysql/dbr/select_test.go b/internal/db/rdb/mysql/dbr/select_test.go new file mode 100644 index 0000000000..4476ac507a --- /dev/null +++ b/internal/db/rdb/mysql/dbr/select_test.go @@ -0,0 +1,381 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package dbr + +import ( + "context" + "reflect" + "testing" + + dbr "github.com/gocraft/dbr/v2" + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func Test_selectStmt_From(t *testing.T) { + t.Parallel() + type args struct { + table interface{} + } + type fields struct { + SelectStmt *dbr.SelectStmt + } + type want struct { + want SelectStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, SelectStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got SelectStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + table: nil, + }, + fields: fields { + SelectStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + table: nil, + }, + fields: fields { + SelectStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + stmt := &selectStmt{ + SelectStmt: test.fields.SelectStmt, + } + + got := stmt.From(test.args.table) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_selectStmt_Where(t *testing.T) { + t.Parallel() + type args struct { + query interface{} + value []interface{} + } + type fields struct { + SelectStmt *dbr.SelectStmt + } + type want struct { + want SelectStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, SelectStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got SelectStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + query: nil, + value: nil, + }, + fields: fields { + SelectStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + query: nil, + value: nil, + }, + fields: fields { + SelectStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + stmt := &selectStmt{ + SelectStmt: test.fields.SelectStmt, + } + + got := stmt.Where(test.args.query, test.args.value...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_selectStmt_Limit(t *testing.T) { + t.Parallel() + type args struct { + n uint64 + } + type fields struct { + SelectStmt *dbr.SelectStmt + } + type want struct { + want SelectStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, SelectStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got SelectStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + n: 0, + }, + fields: fields { + SelectStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + n: 0, + }, + fields: fields { + SelectStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + stmt := &selectStmt{ + SelectStmt: test.fields.SelectStmt, + } + + got := stmt.Limit(test.args.n) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_selectStmt_LoadContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + value interface{} + } + type fields struct { + SelectStmt *dbr.SelectStmt + } + type want struct { + want int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + value: nil, + }, + fields: fields { + SelectStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + value: nil, + }, + fields: fields { + SelectStmt: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + stmt := &selectStmt{ + SelectStmt: test.fields.SelectStmt, + } + + got, err := stmt.LoadContext(test.args.ctx, test.args.value) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/db/rdb/mysql/dbr/session_test.go b/internal/db/rdb/mysql/dbr/session_test.go new file mode 100644 index 0000000000..c380dadd5d --- /dev/null +++ b/internal/db/rdb/mysql/dbr/session_test.go @@ -0,0 +1,431 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package dbr + +import ( + "context" + "reflect" + "testing" + + dbr "github.com/gocraft/dbr/v2" + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNewSession(t *testing.T) { + t.Parallel() + type args struct { + conn Connection + event EventReceiver + } + type want struct { + want Session + } + type test struct { + name string + args args + want want + checkFunc func(want, Session) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Session) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + conn: nil, + event: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + conn: nil, + event: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := NewSession(test.args.conn, test.args.event) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_session_Select(t *testing.T) { + t.Parallel() + type args struct { + column []string + } + type fields struct { + Session *dbr.Session + } + type want struct { + want SelectStmt + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, SelectStmt) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got SelectStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + column: nil, + }, + fields: fields { + Session: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + column: nil, + }, + fields: fields { + Session: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + sess := &session{ + Session: test.fields.Session, + } + + got := sess.Select(test.args.column...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_session_Begin(t *testing.T) { + t.Parallel() + type fields struct { + Session *dbr.Session + } + type want struct { + want Tx + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, Tx, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got Tx, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + Session: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + Session: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + sess := &session{ + Session: test.fields.Session, + } + + got, err := sess.Begin() + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_session_Close(t *testing.T) { + t.Parallel() + type fields struct { + Session *dbr.Session + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + Session: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + Session: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + sess := &session{ + Session: test.fields.Session, + } + + err := sess.Close() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_session_PingContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + Session *dbr.Session + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + Session: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + Session: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + sess := &session{ + Session: test.fields.Session, + } + + err := sess.PingContext(test.args.ctx) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/pkg/gateway/vald/handler/grpc/checklist_test.go b/internal/db/rdb/mysql/dbr/tx_test.go similarity index 65% rename from pkg/gateway/vald/handler/grpc/checklist_test.go rename to internal/db/rdb/mysql/dbr/tx_test.go index f1b4798237..f21c38e13e 100644 --- a/pkg/gateway/vald/handler/grpc/checklist_test.go +++ b/internal/db/rdb/mysql/dbr/tx_test.go @@ -14,43 +14,36 @@ // limitations under the License. // -package grpc +package dbr import ( "reflect" - "sync" - "sync/atomic" "testing" - "unsafe" + dbr "github.com/gocraft/dbr/v2" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) -func Test_checkList_Exists(t *testing.T) { - type args struct { - key string - } +func Test_tx_Commit(t *testing.T) { + t.Parallel() type fields struct { - mu sync.Mutex - read atomic.Value - dirty map[string]*entryCheckList - misses int + Tx *dbr.Tx } type want struct { - want bool + err error } type test struct { name string - args args fields fields want want - checkFunc func(want, bool) error - beforeFunc func(args) - afterFunc func(args) + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() } - defaultCheckFunc := func(w want, got bool) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } return nil } @@ -59,14 +52,8 @@ func Test_checkList_Exists(t *testing.T) { /* { name: "test_case_1", - args: args { - key: "", - }, fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + Tx: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -78,14 +65,8 @@ func Test_checkList_Exists(t *testing.T) { func() test { return test { name: "test_case_2", - args: args { - key: "", - }, fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + Tx: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -94,55 +75,52 @@ func Test_checkList_Exists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { - test.beforeFunc(test.args) + test.beforeFunc() } if test.afterFunc != nil { - defer test.afterFunc(test.args) + defer test.afterFunc() } if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - m := &checkList{ - mu: test.fields.mu, - read: test.fields.read, - dirty: test.fields.dirty, - misses: test.fields.misses, + t := &tx{ + Tx: test.fields.Tx, } - got := m.Exists(test.args.key) - if err := test.checkFunc(test.want, got); err != nil { + err := t.Commit() + if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_checkList_Check(t *testing.T) { - type args struct { - key string - } +func Test_tx_Rollback(t *testing.T) { + t.Parallel() type fields struct { - mu sync.Mutex - read atomic.Value - dirty map[string]*entryCheckList - misses int + Tx *dbr.Tx } type want struct { + err error } type test struct { name string - args args fields fields want want - checkFunc func(want) error - beforeFunc func(args) - afterFunc func(args) + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() } - defaultCheckFunc := func(w want) error { + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } return nil } tests := []test{ @@ -150,14 +128,8 @@ func Test_checkList_Check(t *testing.T) { /* { name: "test_case_1", - args: args { - key: "", - }, fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + Tx: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -169,14 +141,8 @@ func Test_checkList_Check(t *testing.T) { func() test { return test { name: "test_case_2", - args: args { - key: "", - }, fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + Tx: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -185,55 +151,48 @@ func Test_checkList_Check(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { - test.beforeFunc(test.args) + test.beforeFunc() } if test.afterFunc != nil { - defer test.afterFunc(test.args) + defer test.afterFunc() } if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - m := &checkList{ - mu: test.fields.mu, - read: test.fields.read, - dirty: test.fields.dirty, - misses: test.fields.misses, + t := &tx{ + Tx: test.fields.Tx, } - m.Check(test.args.key) - if err := test.checkFunc(test.want); err != nil { + err := t.Rollback() + if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } }) } } -func Test_entryCheckList_tryStore(t *testing.T) { - type args struct { - i *struct{} - } +func Test_tx_RollbackUnlessCommitted(t *testing.T) { + t.Parallel() type fields struct { - p unsafe.Pointer + Tx *dbr.Tx } type want struct { - want bool } type test struct { name string - args args fields fields want want - checkFunc func(want, bool) error - beforeFunc func(args) - afterFunc func(args) + checkFunc func(want) error + beforeFunc func() + afterFunc func() } - defaultCheckFunc := func(w want, got bool) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } + defaultCheckFunc := func(w want) error { return nil } tests := []test{ @@ -241,11 +200,8 @@ func Test_entryCheckList_tryStore(t *testing.T) { /* { name: "test_case_1", - args: args { - i: struct{}{}, - }, fields: fields { - p: nil, + Tx: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -257,11 +213,8 @@ func Test_entryCheckList_tryStore(t *testing.T) { func() test { return test { name: "test_case_2", - args: args { - i: struct{}{}, - }, fields: fields { - p: nil, + Tx: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -270,48 +223,56 @@ func Test_entryCheckList_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { - test.beforeFunc(test.args) + test.beforeFunc() } if test.afterFunc != nil { - defer test.afterFunc(test.args) + defer test.afterFunc() } if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - e := &entryCheckList{ - p: test.fields.p, + t := &tx{ + Tx: test.fields.Tx, } - got := e.tryStore(test.args.i) - if err := test.checkFunc(test.want, got); err != nil { + t.RollbackUnlessCommitted() + if err := test.checkFunc(test.want); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_entryCheckList_unexpungeLocked(t *testing.T) { +func Test_tx_InsertBySql(t *testing.T) { + t.Parallel() + type args struct { + query string + value []interface{} + } type fields struct { - p unsafe.Pointer + Tx *dbr.Tx } type want struct { - wantWasExpunged bool + want InsertStmt } type test struct { name string + args args fields fields want want - checkFunc func(want, bool) error - beforeFunc func() - afterFunc func() + checkFunc func(want, InsertStmt) error + beforeFunc func(args) + afterFunc func(args) } - defaultCheckFunc := func(w want, gotWasExpunged bool) error { - if !reflect.DeepEqual(gotWasExpunged, w.wantWasExpunged) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotWasExpunged, w.wantWasExpunged) + defaultCheckFunc := func(w want, got InsertStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -320,8 +281,12 @@ func Test_entryCheckList_unexpungeLocked(t *testing.T) { /* { name: "test_case_1", + args: args { + query: "", + value: nil, + }, fields: fields { - p: nil, + Tx: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -333,8 +298,12 @@ func Test_entryCheckList_unexpungeLocked(t *testing.T) { func() test { return test { name: "test_case_2", + args: args { + query: "", + value: nil, + }, fields: fields { - p: nil, + Tx: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -343,48 +312,56 @@ func Test_entryCheckList_unexpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { - test.beforeFunc() + test.beforeFunc(test.args) } if test.afterFunc != nil { - defer test.afterFunc() + defer test.afterFunc(test.args) } if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - e := &entryCheckList{ - p: test.fields.p, + t := &tx{ + Tx: test.fields.Tx, } - gotWasExpunged := e.unexpungeLocked() - if err := test.checkFunc(test.want, gotWasExpunged); err != nil { + got := t.InsertBySql(test.args.query, test.args.value...) + if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_checkList_missLocked(t *testing.T) { +func Test_tx_InsertInto(t *testing.T) { + t.Parallel() + type args struct { + table string + } type fields struct { - mu sync.Mutex - read atomic.Value - dirty map[string]*entryCheckList - misses int + Tx *dbr.Tx } type want struct { + want InsertStmt } type test struct { name string + args args fields fields want want - checkFunc func(want) error - beforeFunc func() - afterFunc func() + checkFunc func(want, InsertStmt) error + beforeFunc func(args) + afterFunc func(args) } - defaultCheckFunc := func(w want) error { + defaultCheckFunc := func(w want, got InsertStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } return nil } tests := []test{ @@ -392,11 +369,11 @@ func Test_checkList_missLocked(t *testing.T) { /* { name: "test_case_1", + args: args { + table: "", + }, fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + Tx: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -408,11 +385,11 @@ func Test_checkList_missLocked(t *testing.T) { func() test { return test { name: "test_case_2", + args: args { + table: "", + }, fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + Tx: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -421,50 +398,56 @@ func Test_checkList_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { - test.beforeFunc() + test.beforeFunc(test.args) } if test.afterFunc != nil { - defer test.afterFunc() + defer test.afterFunc(test.args) } if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - m := &checkList{ - mu: test.fields.mu, - read: test.fields.read, - dirty: test.fields.dirty, - misses: test.fields.misses, + t := &tx{ + Tx: test.fields.Tx, } - m.missLocked() - if err := test.checkFunc(test.want); err != nil { + got := t.InsertInto(test.args.table) + if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } }) } } -func Test_checkList_dirtyLocked(t *testing.T) { +func Test_tx_Select(t *testing.T) { + t.Parallel() + type args struct { + column []string + } type fields struct { - mu sync.Mutex - read atomic.Value - dirty map[string]*entryCheckList - misses int + Tx *dbr.Tx } type want struct { + want SelectStmt } type test struct { name string + args args fields fields want want - checkFunc func(want) error - beforeFunc func() - afterFunc func() + checkFunc func(want, SelectStmt) error + beforeFunc func(args) + afterFunc func(args) } - defaultCheckFunc := func(w want) error { + defaultCheckFunc := func(w want, got SelectStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } return nil } tests := []test{ @@ -472,11 +455,11 @@ func Test_checkList_dirtyLocked(t *testing.T) { /* { name: "test_case_1", + args: args { + column: nil, + }, fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + Tx: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -488,11 +471,11 @@ func Test_checkList_dirtyLocked(t *testing.T) { func() test { return test { name: "test_case_2", + args: args { + column: nil, + }, fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + Tx: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -501,50 +484,55 @@ func Test_checkList_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { - test.beforeFunc() + test.beforeFunc(test.args) } if test.afterFunc != nil { - defer test.afterFunc() + defer test.afterFunc(test.args) } if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - m := &checkList{ - mu: test.fields.mu, - read: test.fields.read, - dirty: test.fields.dirty, - misses: test.fields.misses, + t := &tx{ + Tx: test.fields.Tx, } - m.dirtyLocked() - if err := test.checkFunc(test.want); err != nil { + got := t.Select(test.args.column...) + if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } }) } } -func Test_entryCheckList_tryExpungeLocked(t *testing.T) { +func Test_tx_DeleteFrom(t *testing.T) { + t.Parallel() + type args struct { + table string + } type fields struct { - p unsafe.Pointer + Tx *dbr.Tx } type want struct { - wantIsExpunged bool + want DeleteStmt } type test struct { name string + args args fields fields want want - checkFunc func(want, bool) error - beforeFunc func() - afterFunc func() + checkFunc func(want, DeleteStmt) error + beforeFunc func(args) + afterFunc func(args) } - defaultCheckFunc := func(w want, gotIsExpunged bool) error { - if !reflect.DeepEqual(gotIsExpunged, w.wantIsExpunged) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotIsExpunged, w.wantIsExpunged) + defaultCheckFunc := func(w want, got DeleteStmt) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -553,8 +541,11 @@ func Test_entryCheckList_tryExpungeLocked(t *testing.T) { /* { name: "test_case_1", + args: args { + table: "", + }, fields: fields { - p: nil, + Tx: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -566,8 +557,11 @@ func Test_entryCheckList_tryExpungeLocked(t *testing.T) { func() test { return test { name: "test_case_2", + args: args { + table: "", + }, fields: fields { - p: nil, + Tx: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -576,26 +570,28 @@ func Test_entryCheckList_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { - test.beforeFunc() + test.beforeFunc(test.args) } if test.afterFunc != nil { - defer test.afterFunc() + defer test.afterFunc(test.args) } if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - e := &entryCheckList{ - p: test.fields.p, + t := &tx{ + Tx: test.fields.Tx, } - gotIsExpunged := e.tryExpungeLocked() - if err := test.checkFunc(test.want, gotIsExpunged); err != nil { + got := t.DeleteFrom(test.args.table) + if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/db/rdb/mysql/get.go b/internal/db/rdb/mysql/get.go index ff708df520..9507257241 100644 --- a/internal/db/rdb/mysql/get.go +++ b/internal/db/rdb/mysql/get.go @@ -19,6 +19,6 @@ package mysql import "context" type Getter interface { - GetMeta(ctx context.Context, uuid string) (MetaVector, error) + GetVector(ctx context.Context, uuid string) (Vector, error) GetIPs(ctx context.Context, uuid string) ([]string, error) } diff --git a/internal/db/rdb/mysql/model.go b/internal/db/rdb/mysql/model.go index 7e75065c32..cab1318f0a 100644 --- a/internal/db/rdb/mysql/model.go +++ b/internal/db/rdb/mysql/model.go @@ -16,28 +16,22 @@ package mysql -import ( - dbr "github.com/gocraft/dbr/v2" -) - -// MetaVector is an interface to handle metadata keep in MySQL. -type MetaVector interface { +// Vector is an interface to handle vector keep in MySQL. +type Vector interface { GetUUID() string GetVector() []byte - GetMeta() string GetIPs() []string } -type metaVector struct { - meta meta +type vector struct { + data data podIPs []podIP } -type meta struct { - ID int64 `db:"id"` - UUID string `db:"uuid"` - Vector []byte `db:"vector"` - Meta dbr.NullString `db:"meta"` +type data struct { + ID int64 `db:"id"` + UUID string `db:"uuid"` + Vector []byte `db:"vector"` } type podIP struct { @@ -45,20 +39,17 @@ type podIP struct { IP string `db:"ip"` } -// GetUUID returns UUID of metaVector. -func (m *metaVector) GetUUID() string { return m.meta.UUID } - -// GetVector returns Vector of metaVector. -func (m *metaVector) GetVector() []byte { return m.meta.Vector } +// GetUUID returns UUID of Vector. +func (v *vector) GetUUID() string { return v.data.UUID } -// GetMeta returns meta.String of metaVector. -func (m *metaVector) GetMeta() string { return m.meta.Meta.String } +// GetVector returns Vector of Vector. +func (v *vector) GetVector() []byte { return v.data.Vector } -// GetIPs returns all podIPs which are Vald Agent Pods' IP indexed meta's vector. -func (m *metaVector) GetIPs() []string { - ips := make([]string, 0, len(m.podIPs)) +// GetIPs returns all podIPs which are Vald Agent Pods' IP indexed vector's vector. +func (v *vector) GetIPs() []string { + ips := make([]string, 0, len(v.podIPs)) - for _, ip := range m.podIPs { + for _, ip := range v.podIPs { ips = append(ips, ip.IP) } diff --git a/internal/db/rdb/mysql/model_test.go b/internal/db/rdb/mysql/model_test.go index b90b298eff..bd1d322162 100644 --- a/internal/db/rdb/mysql/model_test.go +++ b/internal/db/rdb/mysql/model_test.go @@ -17,18 +17,16 @@ package mysql import ( - "database/sql" "reflect" "testing" - dbr "github.com/gocraft/dbr/v2" "github.com/vdaas/vald/internal/errors" "go.uber.org/goleak" ) -func Test_metaVector_GetUUID(t *testing.T) { +func Test_vector_GetUUID(t *testing.T) { type fields struct { - meta meta + data data } type want struct { want string @@ -49,9 +47,9 @@ func Test_metaVector_GetUUID(t *testing.T) { } tests := []test{ { - name: "returns UUID when UUID of meta is not empty", + name: "returns UUID when UUID of vector is not empty", fields: fields{ - meta: meta{ + data: data{ UUID: "vald-vector-01", }, }, @@ -60,9 +58,9 @@ func Test_metaVector_GetUUID(t *testing.T) { }, }, { - name: "returns UUID when UUID of meta is empty string", + name: "returns UUID when UUID of vector is empty string", fields: fields{ - meta: meta{ + data: data{ UUID: "", }, }, @@ -84,22 +82,21 @@ func Test_metaVector_GetUUID(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - m := &metaVector{ - meta: test.fields.meta, + m := &vector{ + data: test.fields.data, } got := m.GetUUID() if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_metaVector_GetVector(t *testing.T) { +func Test_vector_GetVector(t *testing.T) { type fields struct { - meta meta + data data } type want struct { want []byte @@ -122,9 +119,9 @@ func Test_metaVector_GetVector(t *testing.T) { func() test { v := []byte("vdaas/vald") return test{ - name: "returns Vector when Vector of meta is not empty", + name: "returns Vector when Vector of vector is not empty", fields: fields{ - meta: meta{ + data: data{ Vector: v, }, }, @@ -135,7 +132,7 @@ func Test_metaVector_GetVector(t *testing.T) { }(), func() test { return test{ - name: "returns Vector when Vector of meta is empty", + name: "returns Vector when Vector of vector is empty", want: want{ want: nil, }, @@ -155,91 +152,19 @@ func Test_metaVector_GetVector(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - m := &metaVector{ - meta: test.fields.meta, + m := &vector{ + data: test.fields.data, } got := m.GetVector() if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - - }) - } -} - -func Test_metaVector_GetMeta(t *testing.T) { - type fields struct { - meta meta - } - type want struct { - want string - } - type test struct { - name string - fields fields - want want - checkFunc func(want, string) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got string) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "returns MetaString when MetaString is not empty", - fields: fields{ - meta: meta{ - Meta: dbr.NullString{ - sql.NullString{ - String: "vdaas/vald", - Valid: false, - }, - }, - }, - }, - want: want{ - want: "vdaas/vald", - }, - }, - { - name: "returns MetaString when MetaString is empty", - want: want{ - want: "", - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt, goleakIgnoreOptions...) - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - m := &metaVector{ - meta: test.fields.meta, - } - - got := m.GetMeta() - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) } } -func Test_metaVector_GetIPs(t *testing.T) { +func Test_vector_GetIPs(t *testing.T) { type fields struct { podIPs []podIP } @@ -302,7 +227,7 @@ func Test_metaVector_GetIPs(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - m := &metaVector{ + m := &vector{ podIPs: test.fields.podIPs, } @@ -310,7 +235,6 @@ func Test_metaVector_GetIPs(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/db/rdb/mysql/mysql.go b/internal/db/rdb/mysql/mysql.go index a0f77b4954..a897cbc3e2 100644 --- a/internal/db/rdb/mysql/mysql.go +++ b/internal/db/rdb/mysql/mysql.go @@ -33,14 +33,13 @@ import ( ) const ( - metaVectorTableName = "meta_vector" - podIPTableName = "pod_ip" - idColumnName = "id" - uuidColumnName = "uuid" - vectorColumnName = "vector" - metaColumnName = "meta" - ipColumnName = "ip" - asterisk = "*" + vectorTableName = "backup_vector" + podIPTableName = "pod_ip" + idColumnName = "id" + uuidColumnName = "uuid" + vectorColumnName = "vector" + ipColumnName = "ip" + asterisk = "*" ) // MySQL represents the interface to handle MySQL operation. @@ -167,49 +166,51 @@ func (m *mySQLClient) Ping(ctx context.Context) (err error) { // Close closes the connection of MySQL database. // If the connection is already closed or closing conncection is failed, it returns error. -func (m *mySQLClient) Close(ctx context.Context) error { +func (m *mySQLClient) Close(ctx context.Context) (err error) { if m.connected.Load().(bool) { - m.session.Close() - m.connected.Store(false) + err = m.session.Close() + if err == nil { + m.connected.Store(false) + } } return nil } -// GetMeta gets the metadata and podIPs which have index of metadata's vector. -func (m *mySQLClient) GetMeta(ctx context.Context, uuid string) (MetaVector, error) { +// GetVector gets the vector data and podIPs which have index of vector. +func (m *mySQLClient) GetVector(ctx context.Context, uuid string) (Vector, error) { if !m.connected.Load().(bool) { return nil, errors.ErrMySQLConnectionClosed } - var meta *meta - _, err := m.session.Select(asterisk).From(metaVectorTableName).Where(m.dbr.Eq(uuidColumnName, uuid)).Limit(1).LoadContext(ctx, &meta) + var data *data + _, err := m.session.Select(asterisk).From(vectorTableName).Where(m.dbr.Eq(uuidColumnName, uuid)).Limit(1).LoadContext(ctx, &data) if err != nil { return nil, err } - if meta == nil { + if data == nil { return nil, errors.ErrRequiredElementNotFoundByUUID(uuid) } var podIPs []podIP - _, err = m.session.Select(asterisk).From(podIPTableName).Where(m.dbr.Eq(idColumnName, meta.ID)).LoadContext(ctx, &podIPs) + _, err = m.session.Select(asterisk).From(podIPTableName).Where(m.dbr.Eq(idColumnName, data.ID)).LoadContext(ctx, &podIPs) if err != nil { return nil, err } - return &metaVector{ - meta: *meta, + return &vector{ + data: *data, podIPs: podIPs, }, nil } -// GetIPs gets the pod ips which have index of requested uuids' metadata's vector. +// GetIPs gets the pod ips which have index of requested uuids' vector data's vector. func (m *mySQLClient) GetIPs(ctx context.Context, uuid string) ([]string, error) { if !m.connected.Load().(bool) { return nil, errors.ErrMySQLConnectionClosed } var id int64 - _, err := m.session.Select(idColumnName).From(metaVectorTableName).Where(m.dbr.Eq(uuidColumnName, uuid)).Limit(1).LoadContext(ctx, &id) + _, err := m.session.Select(idColumnName).From(vectorTableName).Where(m.dbr.Eq(uuidColumnName, uuid)).Limit(1).LoadContext(ctx, &id) if err != nil { return nil, err } @@ -231,16 +232,16 @@ func (m *mySQLClient) GetIPs(ctx context.Context, uuid string) ([]string, error) return ips, nil } -func validateMeta(meta MetaVector) error { - if len(meta.GetVector()) == 0 { +func validateVector(vec Vector) error { + if len(vec.GetVector()) == 0 { return errors.ErrRequiredMemberNotFilled("vector") } return nil } -// SetMeta records metadata at meta_vector table and set of (podIP, uuid) at podIPtable through same transaction. +// SetVector records vector data at backup_vector table and set of (podIP, uuid) at podIPtable through same transaction. // If error occurs it will rollback by defer function. -func (m *mySQLClient) SetMeta(ctx context.Context, mv MetaVector) error { +func (m *mySQLClient) SetVector(ctx context.Context, vec Vector) error { if !m.connected.Load().(bool) { return errors.ErrMySQLConnectionClosed } @@ -251,28 +252,26 @@ func (m *mySQLClient) SetMeta(ctx context.Context, mv MetaVector) error { } defer tx.RollbackUnlessCommitted() - err = validateMeta(mv) + err = validateVector(vec) if err != nil { return err } - _, err = tx.InsertBySql("INSERT INTO meta_vector(uuid, vector, meta) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE vector = ?, meta = ?", - mv.GetUUID(), - mv.GetVector(), - mv.GetMeta(), - mv.GetVector(), - mv.GetMeta()).ExecContext(ctx) + _, err = tx.InsertBySql("INSERT INTO "+vectorTableName+"(uuid, vector) VALUES (?, ?) ON DUPLICATE KEY UPDATE vector = ?", + vec.GetUUID(), + vec.GetVector(), + vec.GetVector()).ExecContext(ctx) if err != nil { return err } var id int64 - _, err = tx.Select(idColumnName).From(metaVectorTableName).Where(m.dbr.Eq(uuidColumnName, mv.GetUUID())).Limit(1).LoadContext(ctx, &id) + _, err = tx.Select(idColumnName).From(vectorTableName).Where(m.dbr.Eq(uuidColumnName, vec.GetUUID())).Limit(1).LoadContext(ctx, &id) if err != nil { return err } if id == 0 { - return errors.ErrRequiredElementNotFoundByUUID(mv.GetUUID()) + return errors.ErrRequiredElementNotFoundByUUID(vec.GetUUID()) } _, err = tx.DeleteFrom(podIPTableName).Where(m.dbr.Eq(idColumnName, id)).ExecContext(ctx) @@ -281,7 +280,7 @@ func (m *mySQLClient) SetMeta(ctx context.Context, mv MetaVector) error { } stmt := tx.InsertInto(podIPTableName).Columns(idColumnName, ipColumnName) - for _, ip := range mv.GetIPs() { + for _, ip := range vec.GetIPs() { stmt.Record(&podIP{ID: id, IP: ip}) } _, err = stmt.ExecContext(ctx) @@ -292,8 +291,8 @@ func (m *mySQLClient) SetMeta(ctx context.Context, mv MetaVector) error { return tx.Commit() } -// SetMetas records multiple metadata like as SetMeta(). -func (m *mySQLClient) SetMetas(ctx context.Context, metas ...MetaVector) error { +// SetVectors records multiple vector data like as SetVector(). +func (m *mySQLClient) SetVectors(ctx context.Context, vecs ...Vector) error { if !m.connected.Load().(bool) { return errors.ErrMySQLConnectionClosed } @@ -304,31 +303,29 @@ func (m *mySQLClient) SetMetas(ctx context.Context, metas ...MetaVector) error { } defer tx.RollbackUnlessCommitted() - for _, meta := range metas { - err = validateMeta(meta) + for _, vec := range vecs { + err = validateVector(vec) if err != nil { return err } - _, err = tx.InsertBySql("INSERT INTO meta_vector(uuid, vector, meta) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE vector = ?, meta = ?", - meta.GetUUID(), - meta.GetVector(), - meta.GetMeta(), - meta.GetVector(), - meta.GetMeta()).ExecContext(ctx) + _, err = tx.InsertBySql("INSERT INTO "+vectorTableName+"(uuid, vector) VALUES (?, ?) ON DUPLICATE KEY UPDATE vector = ?", + vec.GetUUID(), + vec.GetVector(), + vec.GetVector()).ExecContext(ctx) if err != nil { return err } } - for _, meta := range metas { + for _, vec := range vecs { var id int64 - _, err = tx.Select(idColumnName).From(metaVectorTableName).Where(m.dbr.Eq(uuidColumnName, meta.GetUUID())).Limit(1).LoadContext(ctx, &id) + _, err = tx.Select(idColumnName).From(vectorTableName).Where(m.dbr.Eq(uuidColumnName, vec.GetUUID())).Limit(1).LoadContext(ctx, &id) if err != nil { return err } if id == 0 { - return errors.ErrRequiredElementNotFoundByUUID(meta.GetUUID()) + return errors.ErrRequiredElementNotFoundByUUID(vec.GetUUID()) } _, err = tx.DeleteFrom(podIPTableName).Where(m.dbr.Eq(idColumnName, id)).ExecContext(ctx) @@ -337,7 +334,7 @@ func (m *mySQLClient) SetMetas(ctx context.Context, metas ...MetaVector) error { } stmt := tx.InsertInto(podIPTableName).Columns(idColumnName, ipColumnName) - for _, ip := range meta.GetIPs() { + for _, ip := range vec.GetIPs() { stmt.Record(&podIP{ID: id, IP: ip}) } _, err = stmt.ExecContext(ctx) @@ -349,7 +346,7 @@ func (m *mySQLClient) SetMetas(ctx context.Context, metas ...MetaVector) error { return tx.Commit() } -func (m *mySQLClient) deleteMeta(ctx context.Context, val interface{}) error { +func (m *mySQLClient) deleteVector(ctx context.Context, val interface{}) error { if !m.connected.Load().(bool) { return errors.ErrMySQLConnectionClosed } @@ -363,7 +360,7 @@ func (m *mySQLClient) deleteMeta(ctx context.Context, val interface{}) error { } defer tx.RollbackUnlessCommitted() - _, err = tx.DeleteFrom(metaVectorTableName).Where(m.dbr.Eq(uuidColumnName, val)).ExecContext(ctx) + _, err = tx.DeleteFrom(vectorTableName).Where(m.dbr.Eq(uuidColumnName, val)).ExecContext(ctx) if err != nil { return err } @@ -375,14 +372,14 @@ func (m *mySQLClient) deleteMeta(ctx context.Context, val interface{}) error { return tx.Commit() } -// DeleteMeta deletes metadata from meta_vector table and podIPs from pod_ip table using meta's uuid. -func (m *mySQLClient) DeleteMeta(ctx context.Context, uuid string) error { - return m.deleteMeta(ctx, uuid) +// DeleteVector deletes vector data from backup_vector table and podIPs from pod_ip table using vector's uuid. +func (m *mySQLClient) DeleteVector(ctx context.Context, uuid string) error { + return m.deleteVector(ctx, uuid) } -// DeleteMetas is the same as DeleteMeta() but it deletes multiple records. -func (m *mySQLClient) DeleteMetas(ctx context.Context, uuids ...string) error { - return m.deleteMeta(ctx, uuids) +// DeleteVectors is the same as DeleteVector() but it deletes multiple records. +func (m *mySQLClient) DeleteVectors(ctx context.Context, uuids ...string) error { + return m.deleteVector(ctx, uuids) } // SetIPs insert the vector's uuid and the podIPs into database. @@ -398,7 +395,7 @@ func (m *mySQLClient) SetIPs(ctx context.Context, uuid string, ips ...string) er defer tx.RollbackUnlessCommitted() var id int64 - _, err = tx.Select(idColumnName).From(metaVectorTableName).Where(m.dbr.Eq(uuidColumnName, uuid)).Limit(1).LoadContext(ctx, &id) + _, err = tx.Select(idColumnName).From(vectorTableName).Where(m.dbr.Eq(uuidColumnName, uuid)).Limit(1).LoadContext(ctx, &id) if err != nil { return err } diff --git a/internal/db/rdb/mysql/mysql_test.go b/internal/db/rdb/mysql/mysql_test.go index 233fff342b..81331a0c44 100644 --- a/internal/db/rdb/mysql/mysql_test.go +++ b/internal/db/rdb/mysql/mysql_test.go @@ -131,7 +131,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -414,7 +413,6 @@ func Test_mySQLClient_Open(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -570,7 +568,7 @@ func Test_mySQLClient_Close(t *testing.T) { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if reflect.DeepEqual(m.connected.Load().(bool), false) { + if m.connected.Load().(bool) { return errors.Errorf("Close failed") } return nil @@ -631,12 +629,11 @@ func Test_mySQLClient_Close(t *testing.T) { if err := test.checkFunc(test.want, err, m); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_mySQLClient_GetMeta(t *testing.T) { +func Test_mySQLClient_GetVector(t *testing.T) { type args struct { ctx context.Context uuid string @@ -664,7 +661,7 @@ func Test_mySQLClient_GetMeta(t *testing.T) { dbr dbr.DBR } type want struct { - want MetaVector + want Vector err error } type test struct { @@ -672,11 +669,11 @@ func Test_mySQLClient_GetMeta(t *testing.T) { args args fields fields want want - checkFunc func(want, MetaVector, error) error + checkFunc func(want, Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got MetaVector, err error) error { + defaultCheckFunc := func(w want, got Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -748,7 +745,7 @@ func Test_mySQLClient_GetMeta(t *testing.T) { func() test { uuid := "vdaas-01" return test{ - name: "return (nil, error) when meta is not found", + name: "return (nil, error) when vector is not found", args: args{ ctx: context.Background(), uuid: "vdaas-01", @@ -767,8 +764,8 @@ func Test_mySQLClient_GetMeta(t *testing.T) { return s } s.LoadContextFunc = func(ctx context.Context, value interface{}) (int, error) { - var mv *meta - if reflect.TypeOf(value) == reflect.TypeOf(&mv) { + var d *data + if reflect.TypeOf(value) == reflect.TypeOf(&d) { return 1, nil } return 0, errors.New("not found") @@ -793,7 +790,7 @@ func Test_mySQLClient_GetMeta(t *testing.T) { }(), func() test { uuid := "vdaas-01" - m := &meta{ + m := &data{ ID: 1, UUID: uuid, Vector: []byte("0.1,0.2"), @@ -818,7 +815,7 @@ func Test_mySQLClient_GetMeta(t *testing.T) { return s } s.LoadContextFunc = func(ctx context.Context, value interface{}) (int, error) { - var mv *meta + var mv *data var pp []podIP if reflect.TypeOf(value) == reflect.TypeOf(&mv) { mv = m @@ -849,7 +846,7 @@ func Test_mySQLClient_GetMeta(t *testing.T) { }(), func() test { uuid := "vdaas-01" - m := &meta{ + m := &data{ ID: 1, UUID: uuid, Vector: []byte("0.1,0.2"), @@ -861,7 +858,7 @@ func Test_mySQLClient_GetMeta(t *testing.T) { }, } return test{ - name: "return (metaVector, nil) when select success", + name: "return (vector, nil) when select success", args: args{ ctx: context.Background(), uuid: uuid, @@ -880,7 +877,7 @@ func Test_mySQLClient_GetMeta(t *testing.T) { return s } s.LoadContextFunc = func(ctx context.Context, value interface{}) (int, error) { - var mv *meta + var mv *data var pp []podIP if reflect.TypeOf(value) == reflect.TypeOf(&mv) { mv = m @@ -907,8 +904,8 @@ func Test_mySQLClient_GetMeta(t *testing.T) { }, }, want: want{ - want: &metaVector{ - meta: *m, + want: &vector{ + data: *m, podIPs: p, }, }, @@ -934,11 +931,10 @@ func Test_mySQLClient_GetMeta(t *testing.T) { dbr: test.fields.dbr, } - got, err := m.GetMeta(test.args.ctx, test.args.uuid) + got, err := m.GetVector(test.args.ctx, test.args.uuid) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1051,7 +1047,7 @@ func Test_mySQLClient_GetIPs(t *testing.T) { func() test { uuid := "vdaas-01" return test{ - name: "return (nil, error) when meta is not found", + name: "return (nil, error) when data is not found", args: args{ ctx: context.Background(), uuid: uuid, @@ -1247,14 +1243,13 @@ func Test_mySQLClient_GetIPs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_validateMeta(t *testing.T) { +func Test_validateVector(t *testing.T) { type args struct { - meta MetaVector + data Vector } type want struct { err error @@ -1275,22 +1270,22 @@ func Test_validateMeta(t *testing.T) { } tests := []test{ func() test { - m := new(metaVector) - m.meta.Vector = []byte("0.1,0.2,0.9") + m := new(vector) + m.data.Vector = []byte("0.1,0.2,0.9") return test{ - name: "return nil when the len(MetaVector) > 0", + name: "return nil when the len(Vector) > 0", args: args{ - meta: m, + data: m, }, want: want{}, } }(), func() test { - m := new(metaVector) + m := new(vector) return test{ - name: "return error when the len(MetaVector) is 0", + name: "return error when the len(Vector) is 0", args: args{ - meta: m, + data: m, }, want: want{ err: errors.ErrRequiredMemberNotFilled("vector"), @@ -1312,19 +1307,18 @@ func Test_validateMeta(t *testing.T) { test.checkFunc = defaultCheckFunc } - err := validateMeta(test.args.meta) + err := validateVector(test.args.data) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_mySQLClient_SetMeta(t *testing.T) { +func Test_mySQLClient_SetVector(t *testing.T) { type args struct { ctx context.Context - mv MetaVector + mv Vector } type fields struct { session dbr.Session @@ -1351,7 +1345,7 @@ func Test_mySQLClient_SetMeta(t *testing.T) { } tests := []test{ func() test { - m := new(metaVector) + m := new(vector) return test{ name: "return error when mysql connection is closed", args: args{ @@ -1370,7 +1364,7 @@ func Test_mySQLClient_SetMeta(t *testing.T) { } }(), func() test { - m := new(metaVector) + m := new(vector) err := errors.New("session.Begin error") return test{ name: "return error when session.Begin fails", @@ -1395,9 +1389,9 @@ func Test_mySQLClient_SetMeta(t *testing.T) { } }(), func() test { - m := new(metaVector) + m := new(vector) return test{ - name: "return error when meta vector is invalid", + name: "return error when data vector is invalid", args: args{ ctx: context.Background(), mv: m, @@ -1424,8 +1418,8 @@ func Test_mySQLClient_SetMeta(t *testing.T) { } }(), func() test { - m := new(metaVector) - m.meta.Vector = []byte("0.1,0.2,0.9") + m := new(vector) + m.data.Vector = []byte("0.1,0.2,0.9") err := errors.New("insertbysql ExecContext error") return test{ name: "return error when insertbysql ExecContext returns error", @@ -1462,8 +1456,8 @@ func Test_mySQLClient_SetMeta(t *testing.T) { } }(), func() test { - m := new(metaVector) - m.meta.Vector = []byte("0.1,0.2,0.9") + m := new(vector) + m.data.Vector = []byte("0.1,0.2,0.9") err := errors.New("loadcontext error") return test{ name: "return error when select loadcontext returns error", @@ -1521,8 +1515,8 @@ func Test_mySQLClient_SetMeta(t *testing.T) { } }(), func() test { - m := new(metaVector) - m.meta.Vector = []byte("0.1,0.2,0.9") + m := new(vector) + m.data.Vector = []byte("0.1,0.2,0.9") return test{ name: "return error when elem not found by uuid", args: args{ @@ -1585,8 +1579,8 @@ func Test_mySQLClient_SetMeta(t *testing.T) { } }(), func() test { - m := new(metaVector) - m.meta.Vector = []byte("0.1,0.2,0.9") + m := new(vector) + m.data.Vector = []byte("0.1,0.2,0.9") m.podIPs = []podIP{ { ID: 1, @@ -1667,8 +1661,8 @@ func Test_mySQLClient_SetMeta(t *testing.T) { } }(), func() test { - m := new(metaVector) - m.meta.Vector = []byte("0.1,0.2,0.9") + m := new(vector) + m.data.Vector = []byte("0.1,0.2,0.9") m.podIPs = []podIP{ { ID: 1, @@ -1762,8 +1756,8 @@ func Test_mySQLClient_SetMeta(t *testing.T) { } }(), func() test { - m := new(metaVector) - m.meta.Vector = []byte("0.1,0.2,0.9") + m := new(vector) + m.data.Vector = []byte("0.1,0.2,0.9") m.podIPs = []podIP{ { ID: 1, @@ -1860,8 +1854,8 @@ func Test_mySQLClient_SetMeta(t *testing.T) { } }(), func() test { - m := new(metaVector) - m.meta.Vector = []byte("0.1,0.2,0.9") + m := new(vector) + m.data.Vector = []byte("0.1,0.2,0.9") m.podIPs = []podIP{ { ID: 1, @@ -1869,7 +1863,7 @@ func Test_mySQLClient_SetMeta(t *testing.T) { }, } return test{ - name: "return nil when setMeta ends with success", + name: "return nil when setVector ends with success", args: args{ ctx: context.Background(), mv: m, @@ -1974,19 +1968,18 @@ func Test_mySQLClient_SetMeta(t *testing.T) { dbr: test.fields.dbr, } - err := m.SetMeta(test.args.ctx, test.args.mv) + err := m.SetVector(test.args.ctx, test.args.mv) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_mySQLClient_SetMetas(t *testing.T) { +func Test_mySQLClient_SetVectors(t *testing.T) { type args struct { ctx context.Context - metas []MetaVector + datas []Vector } type fields struct { session dbr.Session @@ -2013,12 +2006,12 @@ func Test_mySQLClient_SetMetas(t *testing.T) { } tests := []test{ func() test { - var m []MetaVector + var m []Vector return test{ name: "return error when mysql connection is closed", args: args{ ctx: context.Background(), - metas: m, + datas: m, }, fields: fields{ connected: func() (v atomic.Value) { @@ -2032,13 +2025,13 @@ func Test_mySQLClient_SetMetas(t *testing.T) { } }(), func() test { - var m []MetaVector + var m []Vector err := errors.New("session.Begin error") return test{ name: "return error when session.Begin fails", args: args{ ctx: context.Background(), - metas: m, + datas: m, }, fields: fields{ session: &dbr.MockSession{ @@ -2057,13 +2050,13 @@ func Test_mySQLClient_SetMetas(t *testing.T) { } }(), func() test { - var m []MetaVector - m = append(m, new(metaVector)) + var m []Vector + m = append(m, new(vector)) return test{ - name: "return error when meta vector is invalid", + name: "return error when data vector is invalid", args: args{ ctx: context.Background(), - metas: m, + datas: m, }, fields: fields{ session: &dbr.MockSession{ @@ -2087,16 +2080,16 @@ func Test_mySQLClient_SetMetas(t *testing.T) { } }(), func() test { - meta := new(metaVector) - meta.meta.Vector = []byte("0.1,0.2,0.9") - var m []MetaVector - m = append(m, meta) + data := new(vector) + data.data.Vector = []byte("0.1,0.2,0.9") + var m []Vector + m = append(m, data) err := errors.New("insertbysql ExecContext error") return test{ name: "return error when insertbysql ExecContext returns error", args: args{ ctx: context.Background(), - metas: m, + datas: m, }, fields: fields{ session: &dbr.MockSession{ @@ -2127,16 +2120,16 @@ func Test_mySQLClient_SetMetas(t *testing.T) { } }(), func() test { - meta := new(metaVector) - meta.meta.Vector = []byte("0.1,0.2,0.9") - var m []MetaVector - m = append(m, meta) + data := new(vector) + data.data.Vector = []byte("0.1,0.2,0.9") + var m []Vector + m = append(m, data) err := errors.New("loadcontext error") return test{ name: "return error when select loadcontext returns error", args: args{ ctx: context.Background(), - metas: m, + datas: m, }, fields: fields{ session: &dbr.MockSession{ @@ -2188,15 +2181,15 @@ func Test_mySQLClient_SetMetas(t *testing.T) { } }(), func() test { - meta := new(metaVector) - meta.meta.Vector = []byte("0.1,0.2,0.9") - var m []MetaVector - m = append(m, meta) + data := new(vector) + data.data.Vector = []byte("0.1,0.2,0.9") + var m []Vector + m = append(m, data) return test{ name: "return error when elem not found by uuid", args: args{ ctx: context.Background(), - metas: m, + datas: m, }, fields: fields{ session: &dbr.MockSession{ @@ -2254,23 +2247,23 @@ func Test_mySQLClient_SetMetas(t *testing.T) { } }(), func() test { - meta := new(metaVector) - meta.meta.Vector = []byte("0.1,0.2,0.9") - meta.podIPs = []podIP{ + data := new(vector) + data.data.Vector = []byte("0.1,0.2,0.9") + data.podIPs = []podIP{ { ID: 1, IP: "192.168.1.12", }, } - var m []MetaVector - m = append(m, meta) + var m []Vector + m = append(m, data) err := errors.New("delete ExecContext error") return test{ name: "return error when delete ExecContext returns error", args: args{ ctx: context.Background(), - metas: m, + datas: m, }, fields: fields{ session: &dbr.MockSession{ @@ -2339,22 +2332,22 @@ func Test_mySQLClient_SetMetas(t *testing.T) { } }(), func() test { - meta := new(metaVector) - meta.meta.Vector = []byte("0.1,0.2,0.9") - meta.podIPs = []podIP{ + data := new(vector) + data.data.Vector = []byte("0.1,0.2,0.9") + data.podIPs = []podIP{ { ID: 1, IP: "192.168.1.12", }, } - var m []MetaVector - m = append(m, meta) + var m []Vector + m = append(m, data) err := errors.New("insert ExecContext error") return test{ name: "return error when insert ExecContext returns error", args: args{ ctx: context.Background(), - metas: m, + datas: m, }, fields: fields{ session: &dbr.MockSession{ @@ -2436,22 +2429,22 @@ func Test_mySQLClient_SetMetas(t *testing.T) { } }(), func() test { - meta := new(metaVector) - meta.meta.Vector = []byte("0.1,0.2,0.9") - meta.podIPs = []podIP{ + data := new(vector) + data.data.Vector = []byte("0.1,0.2,0.9") + data.podIPs = []podIP{ { ID: 1, IP: "192.168.1.12", }, } - var m []MetaVector - m = append(m, meta) + var m []Vector + m = append(m, data) err := errors.New("tx.Commit error") return test{ name: "return error when tx.Commit returns error", args: args{ ctx: context.Background(), - metas: m, + datas: m, }, fields: fields{ session: &dbr.MockSession{ @@ -2536,21 +2529,21 @@ func Test_mySQLClient_SetMetas(t *testing.T) { } }(), func() test { - meta := new(metaVector) - meta.meta.Vector = []byte("0.1,0.2,0.9") - meta.podIPs = []podIP{ + data := new(vector) + data.data.Vector = []byte("0.1,0.2,0.9") + data.podIPs = []podIP{ { ID: 1, IP: "192.168.1.12", }, } - var m []MetaVector - m = append(m, meta) + var m []Vector + m = append(m, data) return test{ - name: "return nil when setMeta ends with success", + name: "return nil when setVector ends with success", args: args{ ctx: context.Background(), - metas: m, + datas: m, }, fields: fields{ session: &dbr.MockSession{ @@ -2652,16 +2645,15 @@ func Test_mySQLClient_SetMetas(t *testing.T) { dbr: test.fields.dbr, } - err := m.SetMetas(test.args.ctx, test.args.metas...) + err := m.SetVectors(test.args.ctx, test.args.datas...) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_mySQLClient_deleteMeta(t *testing.T) { +func Test_mySQLClient_deleteVector(t *testing.T) { type args struct { ctx context.Context val interface{} @@ -2758,9 +2750,9 @@ func Test_mySQLClient_deleteMeta(t *testing.T) { } }(), func() test { - err := errors.New("metaVectorTableName error") + err := errors.New("vectorTableName error") return test{ - name: "return error when DeleteFromFunc(metaVectorTableName) returns error", + name: "return error when DeleteFromFunc(vectorTableName) returns error", args: args{ ctx: context.Background(), val: "vald-01", @@ -2776,7 +2768,7 @@ func Test_mySQLClient_deleteMeta(t *testing.T) { DeleteFromFunc: func(table string) dbr.DeleteStmt { s := new(dbr.MockDelete) s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { - if table == "meta_vector" { + if table == "backup_vector" { return nil, err } return nil, nil @@ -2912,16 +2904,15 @@ func Test_mySQLClient_deleteMeta(t *testing.T) { dbr: test.fields.dbr, } - err := m.deleteMeta(test.args.ctx, test.args.val) + err := m.deleteVector(test.args.ctx, test.args.val) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_mySQLClient_DeleteMeta(t *testing.T) { +func Test_mySQLClient_DeleteVector(t *testing.T) { type args struct { ctx context.Context uuid string @@ -2952,7 +2943,7 @@ func Test_mySQLClient_DeleteMeta(t *testing.T) { tests := []test{ func() test { return test{ - name: "return nil when deleteMeta success with empty-uuid", + name: "return nil when deleteVector success with empty-uuid", args: args{ ctx: context.Background(), uuid: "", @@ -2993,7 +2984,7 @@ func Test_mySQLClient_DeleteMeta(t *testing.T) { }(), func() test { return test{ - name: "return nil when deleteMeta success with uuid", + name: "return nil when deleteVector success with uuid", args: args{ ctx: context.Background(), uuid: "vald-01", @@ -3052,16 +3043,15 @@ func Test_mySQLClient_DeleteMeta(t *testing.T) { dbr: test.fields.dbr, } - err := m.DeleteMeta(test.args.ctx, test.args.uuid) + err := m.DeleteVector(test.args.ctx, test.args.uuid) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_mySQLClient_DeleteMetas(t *testing.T) { +func Test_mySQLClient_DeleteVectors(t *testing.T) { type args struct { ctx context.Context uuids []string @@ -3092,7 +3082,7 @@ func Test_mySQLClient_DeleteMetas(t *testing.T) { tests := []test{ func() test { return test{ - name: "return nil when deleteMetas success with empty uuids", + name: "return nil when deleteVectors success with empty uuids", args: args{ ctx: context.Background(), uuids: []string{}, @@ -3133,7 +3123,7 @@ func Test_mySQLClient_DeleteMetas(t *testing.T) { }(), func() test { return test{ - name: "return nil when deleteMetas success with uuids", + name: "return nil when deleteVectors success with uuids", args: args{ ctx: context.Background(), uuids: []string{ @@ -3195,11 +3185,10 @@ func Test_mySQLClient_DeleteMetas(t *testing.T) { dbr: test.fields.dbr, } - err := m.DeleteMetas(test.args.ctx, test.args.uuids...) + err := m.DeleteVectors(test.args.ctx, test.args.uuids...) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -3570,7 +3559,6 @@ func Test_mySQLClient_SetIPs(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -3768,7 +3756,6 @@ func Test_mySQLClient_RemoveIPs(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/db/rdb/mysql/option.go b/internal/db/rdb/mysql/option.go index dbe3921283..4891502f4b 100644 --- a/internal/db/rdb/mysql/option.go +++ b/internal/db/rdb/mysql/option.go @@ -29,17 +29,15 @@ import ( // Option represents the functional option for mySQLClient. type Option func(*mySQLClient) error -var ( - defaultOpts = []Option{ - WithCharset("utf8mb4"), - WithTimezone("Local"), - WithInitialPingDuration("30ms"), - WithInitialPingTimeLimit("5m"), - // WithConnectionLifeTimeLimit("2m"), - // WithMaxOpenConns(40), - // WithMaxIdleConns(50), - } -) +var defaultOpts = []Option{ + WithCharset("utf8mb4"), + WithTimezone("Local"), + WithInitialPingDuration("30ms"), + WithInitialPingTimeLimit("5m"), + // WithConnectionLifeTimeLimit("2m"), + // WithMaxOpenConns(40), + // WithMaxIdleConns(50), +} // WithTimezone returns the option to set the timezone. func WithTimezone(tz string) Option { diff --git a/internal/db/rdb/mysql/option_test.go b/internal/db/rdb/mysql/option_test.go index c1a3d0cc4a..04d62e950d 100644 --- a/internal/db/rdb/mysql/option_test.go +++ b/internal/db/rdb/mysql/option_test.go @@ -29,12 +29,10 @@ import ( "go.uber.org/goleak" ) -var ( - // Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - } -) +// Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), +} func TestWithTimezone(t *testing.T) { type T = mySQLClient diff --git a/internal/db/rdb/mysql/set.go b/internal/db/rdb/mysql/set.go index 22a56dff98..0e88df8996 100644 --- a/internal/db/rdb/mysql/set.go +++ b/internal/db/rdb/mysql/set.go @@ -19,10 +19,10 @@ package mysql import "context" type Setter interface { - SetMeta(ctx context.Context, meta MetaVector) error - SetMetas(ctx context.Context, metas ...MetaVector) error - DeleteMeta(ctx context.Context, uuid string) error - DeleteMetas(ctx context.Context, uuids ...string) error + SetVector(ctx context.Context, vec Vector) error + SetVectors(ctx context.Context, vecs ...Vector) error + DeleteVector(ctx context.Context, uuid string) error + DeleteVectors(ctx context.Context, uuids ...string) error SetIPs(ctx context.Context, uuid string, ips ...string) error RemoveIPs(ctx context.Context, ips ...string) error } diff --git a/internal/db/storage/blob/s3/option.go b/internal/db/storage/blob/s3/option.go index 7d47ec5bf6..986d193e15 100644 --- a/internal/db/storage/blob/s3/option.go +++ b/internal/db/storage/blob/s3/option.go @@ -27,11 +27,9 @@ import ( // Option represents the functional option for client. type Option func(c *client) error -var ( - defaultOpts = []Option{ - WithErrGroup(errgroup.Get()), - } -) +var defaultOpts = []Option{ + WithErrGroup(errgroup.Get()), +} // WithErrGroup returns the option to set the eg. func WithErrGroup(eg errgroup.Group) Option { diff --git a/internal/db/storage/blob/s3/option_test.go b/internal/db/storage/blob/s3/option_test.go index d9cbb876d5..0a45588c89 100644 --- a/internal/db/storage/blob/s3/option_test.go +++ b/internal/db/storage/blob/s3/option_test.go @@ -29,12 +29,10 @@ import ( "go.uber.org/goleak" ) -var ( - // Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - } -) +// Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), +} func TestWithErrGroup(t *testing.T) { type T = client diff --git a/internal/db/storage/blob/s3/reader/io/io_test.go b/internal/db/storage/blob/s3/reader/io/io_test.go new file mode 100644 index 0000000000..ffcde9fc51 --- /dev/null +++ b/internal/db/storage/blob/s3/reader/io/io_test.go @@ -0,0 +1,253 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package io + +import ( + "context" + "io" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type want struct { + want IO + } + type test struct { + name string + want want + checkFunc func(want, IO) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got IO) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := New() + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_ctxio_NewReaderWithContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + r io.Reader + } + type want struct { + want io.Reader + err error + } + type test struct { + name string + args args + c *ctxio + want want + checkFunc func(want, io.Reader, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got io.Reader, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &ctxio{} + + got, err := c.NewReaderWithContext(test.args.ctx, test.args.r) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_ctxio_NewReadCloserWithContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + r io.ReadCloser + } + type want struct { + want io.ReadCloser + err error + } + type test struct { + name string + args args + c *ctxio + want want + checkFunc func(want, io.ReadCloser, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got io.ReadCloser, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + c := &ctxio{} + + got, err := c.NewReadCloserWithContext(test.args.ctx, test.args.r) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/db/storage/blob/s3/reader/option.go b/internal/db/storage/blob/s3/reader/option.go index e50ec68099..15951d6e6d 100644 --- a/internal/db/storage/blob/s3/reader/option.go +++ b/internal/db/storage/blob/s3/reader/option.go @@ -26,16 +26,14 @@ import ( // Option represents the functional option for reader. type Option func(r *reader) -var ( - defaultOpts = []Option{ - WithErrGroup(errgroup.Get()), - WithMaxChunkSize(512 * 1024 * 1024), - WithBackoff(false), - func(r *reader) { - r.ctxio = io.New() - }, - } -) +var defaultOpts = []Option{ + WithErrGroup(errgroup.Get()), + WithMaxChunkSize(512 * 1024 * 1024), + WithBackoff(false), + func(r *reader) { + r.ctxio = io.New() + }, +} // WithErrGroup returns the option to set the eg. func WithErrGroup(eg errgroup.Group) Option { diff --git a/internal/db/storage/blob/s3/reader/option_test.go b/internal/db/storage/blob/s3/reader/option_test.go index d924ff6b35..e180ad1065 100644 --- a/internal/db/storage/blob/s3/reader/option_test.go +++ b/internal/db/storage/blob/s3/reader/option_test.go @@ -29,12 +29,10 @@ import ( "go.uber.org/goleak" ) -var ( - // Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - } -) +// Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), +} func TestWithErrGroup(t *testing.T) { type T = reader @@ -438,7 +436,6 @@ func TestWithBackoff(t *testing.T) { if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/db/storage/blob/s3/reader/reader.go b/internal/db/storage/blob/s3/reader/reader.go index eadd8686b2..b4ed41c32b 100644 --- a/internal/db/storage/blob/s3/reader/reader.go +++ b/internal/db/storage/blob/s3/reader/reader.go @@ -49,6 +49,7 @@ type reader struct { backoffEnabled bool backoffOpts []backoff.Option + bo backoff.Backoff maxChunkSize int64 } @@ -87,6 +88,10 @@ func (r *reader) Open(ctx context.Context) (err error) { var offset int64 + if r.backoffEnabled { + r.bo = backoff.New(r.backoffOpts...) + } + for { select { case <-ctx.Done(): @@ -128,14 +133,13 @@ func (r *reader) Open(ctx context.Context) (err error) { } func (r *reader) getObjectWithBackoff(ctx context.Context, offset, length int64) (io.Reader, error) { - getFunc := func() (interface{}, error) { + if !r.backoffEnabled || r.bo == nil { return r.getObject(ctx, offset, length) } - - b := backoff.New(r.backoffOpts...) - defer b.Close() - - res, err := b.Do(ctx, getFunc) + res, err := r.bo.Do(ctx, func(ctx context.Context) (interface{}, bool, error) { + res, err := r.getObject(ctx, offset, length) + return res, err != nil, err + }) if err != nil { return nil, err } @@ -157,7 +161,6 @@ func (r *reader) getObject(ctx context.Context, offset, length int64) (io.Reader ), }, ) - if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { @@ -199,6 +202,9 @@ func (r *reader) getObject(ctx context.Context, offset, length int64) (io.Reader // Close closes the reader. func (r *reader) Close() error { + if r.bo != nil { + defer r.bo.Close() + } if r.pr != nil { return r.pr.Close() } diff --git a/internal/db/storage/blob/s3/reader/reader_mock_test.go b/internal/db/storage/blob/s3/reader/reader_mock_test.go new file mode 100644 index 0000000000..a272ec4a85 --- /dev/null +++ b/internal/db/storage/blob/s3/reader/reader_mock_test.go @@ -0,0 +1,501 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package reader provides the reader functions for handling with s3. +// This package is wrapping package of "https://github.com/aws/aws-sdk-go". +package reader + +import ( + "context" + "io" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/vdaas/vald/internal/db/storage/blob/s3/sdk/s3" + "github.com/vdaas/vald/internal/db/storage/blob/s3/sdk/s3/s3iface" + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestMockS3API_GetObjectWithContext(t *testing.T) { + t.Parallel() + type args struct { + ctx aws.Context + in *s3.GetObjectInput + opts []request.Option + } + type fields struct { + S3API s3iface.S3API + GetObjectWithContextFunc func(aws.Context, *s3.GetObjectInput, ...request.Option) (*s3.GetObjectOutput, error) + } + type want struct { + want *s3.GetObjectOutput + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, *s3.GetObjectOutput, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got *s3.GetObjectOutput, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + in: nil, + opts: nil, + }, + fields: fields { + S3API: nil, + GetObjectWithContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + in: nil, + opts: nil, + }, + fields: fields { + S3API: nil, + GetObjectWithContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockS3API{ + S3API: test.fields.S3API, + GetObjectWithContextFunc: test.fields.GetObjectWithContextFunc, + } + + got, err := m.GetObjectWithContext(test.args.ctx, test.args.in, test.args.opts...) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockIO_NewReaderWithContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + r io.Reader + } + type fields struct { + NewReaderWithContextFunc func(ctx context.Context, r io.Reader) (io.Reader, error) + NewReadCloserWithContextFunc func(ctx context.Context, r io.ReadCloser) (io.ReadCloser, error) + } + type want struct { + want io.Reader + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, io.Reader, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got io.Reader, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + r: nil, + }, + fields: fields { + NewReaderWithContextFunc: nil, + NewReadCloserWithContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + r: nil, + }, + fields: fields { + NewReaderWithContextFunc: nil, + NewReadCloserWithContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockIO{ + NewReaderWithContextFunc: test.fields.NewReaderWithContextFunc, + NewReadCloserWithContextFunc: test.fields.NewReadCloserWithContextFunc, + } + + got, err := m.NewReaderWithContext(test.args.ctx, test.args.r) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockIO_NewReadCloserWithContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + r io.ReadCloser + } + type fields struct { + NewReaderWithContextFunc func(ctx context.Context, r io.Reader) (io.Reader, error) + NewReadCloserWithContextFunc func(ctx context.Context, r io.ReadCloser) (io.ReadCloser, error) + } + type want struct { + want io.ReadCloser + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, io.ReadCloser, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got io.ReadCloser, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + r: nil, + }, + fields: fields { + NewReaderWithContextFunc: nil, + NewReadCloserWithContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + r: nil, + }, + fields: fields { + NewReaderWithContextFunc: nil, + NewReadCloserWithContextFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockIO{ + NewReaderWithContextFunc: test.fields.NewReaderWithContextFunc, + NewReadCloserWithContextFunc: test.fields.NewReadCloserWithContextFunc, + } + + got, err := m.NewReadCloserWithContext(test.args.ctx, test.args.r) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockReadCloser_Read(t *testing.T) { + t.Parallel() + type args struct { + p []byte + } + type fields struct { + ReadFunc func(p []byte) (n int, err error) + CloseFunc func() error + } + type want struct { + wantN int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotN int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotN, w.wantN) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotN, w.wantN) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + p: nil, + }, + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + p: nil, + }, + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockReadCloser{ + ReadFunc: test.fields.ReadFunc, + CloseFunc: test.fields.CloseFunc, + } + + gotN, err := m.Read(test.args.p) + if err := test.checkFunc(test.want, gotN, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestMockReadCloser_Close(t *testing.T) { + t.Parallel() + type fields struct { + ReadFunc func(p []byte) (n int, err error) + CloseFunc func() error + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + ReadFunc: nil, + CloseFunc: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &MockReadCloser{ + ReadFunc: test.fields.ReadFunc, + CloseFunc: test.fields.CloseFunc, + } + + err := m.Close() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/db/storage/blob/s3/reader/reader_test.go b/internal/db/storage/blob/s3/reader/reader_test.go index bd7485c827..da0301e27f 100644 --- a/internal/db/storage/blob/s3/reader/reader_test.go +++ b/internal/db/storage/blob/s3/reader/reader_test.go @@ -120,7 +120,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -569,7 +568,6 @@ func Test_reader_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -682,7 +680,6 @@ func Test_reader_Read(t *testing.T) { if err := test.checkFunc(test.want, gotN, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/db/storage/blob/s3/s3_test.go b/internal/db/storage/blob/s3/s3_test.go index 398f9d7856..37d0be55eb 100644 --- a/internal/db/storage/blob/s3/s3_test.go +++ b/internal/db/storage/blob/s3/s3_test.go @@ -100,7 +100,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -200,7 +199,6 @@ func Test_client_Open(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -290,7 +288,6 @@ func Test_client_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -397,7 +394,6 @@ func Test_client_Reader(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -504,7 +500,6 @@ func Test_client_Writer(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/db/storage/blob/s3/sdk/s3/s3manager/s3manager_test.go b/internal/db/storage/blob/s3/sdk/s3/s3manager/s3manager_test.go new file mode 100644 index 0000000000..324e734e23 --- /dev/null +++ b/internal/db/storage/blob/s3/sdk/s3/s3manager/s3manager_test.go @@ -0,0 +1,164 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package s3manager + +import ( + "reflect" + "testing" + + "github.com/vdaas/vald/internal/db/storage/blob/s3/sdk/s3/s3iface" + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type want struct { + want S3Manager + } + type test struct { + name string + want want + checkFunc func(want, S3Manager) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got S3Manager) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := New() + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_s3mngr_NewUploaderWithClient(t *testing.T) { + t.Parallel() + type args struct { + svc s3iface.S3API + options []func(*Uploader) + } + type want struct { + want UploadClient + } + type test struct { + name string + args args + s *s3mngr + want want + checkFunc func(want, UploadClient) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got UploadClient) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + svc: nil, + options: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + svc: nil, + options: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &s3mngr{} + + got := s.NewUploaderWithClient(test.args.svc, test.args.options...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/db/storage/blob/s3/session/option.go b/internal/db/storage/blob/s3/session/option.go index d58a31aa5a..6230f5e0ff 100644 --- a/internal/db/storage/blob/s3/session/option.go +++ b/internal/db/storage/blob/s3/session/option.go @@ -25,21 +25,19 @@ import ( // Option represents the functional option for session. type Option func(s *sess) error -var ( - defaultOpts = []Option{ - WithMaxRetries(-1), - WithForcePathStyle(false), - WithUseAccelerate(false), - WithUseARNRegion(false), - WithUseDualStack(false), - WithEnableSSL(true), - WithEnableParamValidation(true), - WithEnable100Continue(true), - WithEnableContentMD5Validation(true), - WithEnableEndpointDiscovery(false), - WithEnableEndpointHostPrefix(true), - } -) +var defaultOpts = []Option{ + WithMaxRetries(-1), + WithForcePathStyle(false), + WithUseAccelerate(false), + WithUseARNRegion(false), + WithUseDualStack(false), + WithEnableSSL(true), + WithEnableParamValidation(true), + WithEnable100Continue(true), + WithEnableContentMD5Validation(true), + WithEnableEndpointDiscovery(false), + WithEnableEndpointHostPrefix(true), +} // WithEndpoint returns the option to set the endpoint. func WithEndpoint(ep string) Option { diff --git a/internal/db/storage/blob/s3/session/session.go b/internal/db/storage/blob/s3/session/session.go index dafbe9f562..95bf42913f 100644 --- a/internal/db/storage/blob/s3/session/session.go +++ b/internal/db/storage/blob/s3/session/session.go @@ -49,12 +49,12 @@ type sess struct { client *http.Client } -// Session represents the interface to get AWS S3 session +// Session represents the interface to get AWS S3 session. type Session interface { Session() (*session.Session, error) } -// New returns the session implementation +// New returns the session implementation. func New(opts ...Option) Session { s := new(sess) for _, opt := range append(defaultOpts, opts...) { @@ -66,7 +66,7 @@ func New(opts ...Option) Session { return s } -// Session returns the AWS S3 session or any error occurred +// Session returns the AWS S3 session or any error occurred. func (s *sess) Session() (*session.Session, error) { cfg := aws.NewConfig().WithRegion(s.region) diff --git a/internal/db/storage/blob/s3/session/session_test.go b/internal/db/storage/blob/s3/session/session_test.go index f18c65649d..a974439aea 100644 --- a/internal/db/storage/blob/s3/session/session_test.go +++ b/internal/db/storage/blob/s3/session/session_test.go @@ -181,7 +181,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -481,7 +480,7 @@ func Test_sess_Session(t *testing.T) { Config: &aws.Config{ Region: atop(""), Credentials: nil, - //DisableSSL: btop(false), + // DisableSSL: btop(false), HTTPClient: &http.Client{}, LogLevel: aws.LogLevel(aws.LogLevelType(uint(0))), MaxRetries: itop(0), @@ -524,7 +523,7 @@ func Test_sess_Session(t *testing.T) { DisableEndpointHostPrefix: btop(true), STSRegionalEndpoint: endpoints.LegacySTSEndpoint, S3UsEast1RegionalEndpoint: endpoints.LegacyS3UsEast1Endpoint, - //DisableParamValidation: btop(true), + // DisableParamValidation: btop(true), }, }, }, @@ -544,7 +543,7 @@ func Test_sess_Session(t *testing.T) { LogLevel: aws.LogLevel(aws.LogLevelType(uint(0))), MaxRetries: itop(0), S3ForcePathStyle: btop(false), - //S3Disable100Continue: btop(true), + // S3Disable100Continue: btop(true), S3UseAccelerate: btop(false), S3DisableContentMD5Validation: btop(true), S3UseARNRegion: btop(false), @@ -608,7 +607,7 @@ func Test_sess_Session(t *testing.T) { S3UseARNRegion: btop(false), UseDualStack: btop(false), EnableEndpointDiscovery: btop(false), - //DisableEndpointHostPrefix: btop(true), + // DisableEndpointHostPrefix: btop(true), STSRegionalEndpoint: endpoints.LegacySTSEndpoint, S3UsEast1RegionalEndpoint: endpoints.LegacyS3UsEast1Endpoint, DisableParamValidation: btop(true), @@ -686,7 +685,6 @@ func Test_sess_Session(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/db/storage/blob/s3/writer/option.go b/internal/db/storage/blob/s3/writer/option.go index 88d0bfd9e4..0d67c5b330 100644 --- a/internal/db/storage/blob/s3/writer/option.go +++ b/internal/db/storage/blob/s3/writer/option.go @@ -26,13 +26,11 @@ import ( // Option represents the functional option for writer. type Option func(w *writer) error -var ( - defaultOpts = []Option{ - WithErrGroup(errgroup.Get()), - WithContentType("application/octet-stream"), - WithMaxPartSize(64 * 1024 * 1024), - } -) +var defaultOpts = []Option{ + WithErrGroup(errgroup.Get()), + WithContentType("application/octet-stream"), + WithMaxPartSize(64 * 1024 * 1024), +} // WithErrGroup returns the option to set eg for writer. func WithErrGroup(eg errgroup.Group) Option { diff --git a/internal/db/storage/blob/s3/writer/option_test.go b/internal/db/storage/blob/s3/writer/option_test.go index 541cd639cf..edfc20613c 100644 --- a/internal/db/storage/blob/s3/writer/option_test.go +++ b/internal/db/storage/blob/s3/writer/option_test.go @@ -27,11 +27,9 @@ import ( "go.uber.org/goleak" ) -var ( - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - } -) +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), +} func TestWithErrGroup(t *testing.T) { type T = writer diff --git a/internal/db/storage/blob/s3/writer/writer_test.go b/internal/db/storage/blob/s3/writer/writer_test.go index 913398af97..97032f4e40 100644 --- a/internal/db/storage/blob/s3/writer/writer_test.go +++ b/internal/db/storage/blob/s3/writer/writer_test.go @@ -219,7 +219,6 @@ func Test_writer_Open(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -318,7 +317,6 @@ func Test_writer_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -434,7 +432,6 @@ func Test_writer_Write(t *testing.T) { if err := test.checkFunc(test.want, gotN, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/encoding/json/json_test.go b/internal/encoding/json/json_test.go index 8ec8345d66..cb9070d1d3 100644 --- a/internal/encoding/json/json_test.go +++ b/internal/encoding/json/json_test.go @@ -300,7 +300,6 @@ func TestUnmarshal(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -375,7 +374,6 @@ func TestMarshal(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/errgroup/group_test.go b/internal/errgroup/group_test.go index 0e950f16a8..f892c12a8d 100644 --- a/internal/errgroup/group_test.go +++ b/internal/errgroup/group_test.go @@ -27,7 +27,6 @@ import ( "time" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) diff --git a/internal/errors/backup.go b/internal/errors/backup.go index cca5391e8d..ba72744812 100644 --- a/internal/errors/backup.go +++ b/internal/errors/backup.go @@ -17,6 +17,4 @@ // Package errors provides error types and function package errors -var ( - ErrInvalidBackupConfig = New("invalid backup config") -) +var ErrInvalidBackupConfig = New("invalid backup config") diff --git a/internal/errors/benchmark.go b/internal/errors/benchmark.go index afba087e2e..36438f224a 100644 --- a/internal/errors/benchmark.go +++ b/internal/errors/benchmark.go @@ -17,6 +17,4 @@ // Package errors provides benchmark error package errors -var ( - ErrInvalidCoreMode = New("invalid core mode") -) +var ErrInvalidCoreMode = New("invalid core mode") diff --git a/internal/errors/blob.go b/internal/errors/blob.go index 853f0b34e6..f190f72561 100644 --- a/internal/errors/blob.go +++ b/internal/errors/blob.go @@ -18,7 +18,7 @@ package errors var ( - // BlobStorage + // BlobStorage. NewErrBlobNoSuchBucket = func(err error, name string) error { return &ErrBlobNoSuchBucket{ err: Wrap(err, Errorf("bucket %s not found", name).Error()), diff --git a/internal/errors/blob_test.go b/internal/errors/blob_test.go index df8bd60554..6c1dc1f198 100644 --- a/internal/errors/blob_test.go +++ b/internal/errors/blob_test.go @@ -93,7 +93,6 @@ func TestErrBlobNoSuchBucket_Error(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -164,7 +163,6 @@ func TestIsErrBlobNoSuchBucket(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -238,7 +236,6 @@ func TestErrBlobNoSuchKey_Error(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -309,7 +306,156 @@ func TestIsErrBlobNoSuchKey(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } + }) + } +} + +func TestErrBlobNoSuchBucket_Unwrap(t *testing.T) { + t.Parallel() + type fields struct { + err error + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !Is(err, w.err) { + return Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + err: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + err: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + e := &ErrBlobNoSuchBucket{ + err: test.fields.err, + } + + err := e.Unwrap() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrBlobNoSuchKey_Unwrap(t *testing.T) { + t.Parallel() + type fields struct { + err error + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !Is(err, w.err) { + return Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + err: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + err: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + e := &ErrBlobNoSuchKey{ + err: test.fields.err, + } + + err := e.Unwrap() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } }) } } diff --git a/internal/errors/cache.go b/internal/errors/cache.go index 5694f5209f..a8b27a22ca 100644 --- a/internal/errors/cache.go +++ b/internal/errors/cache.go @@ -17,6 +17,4 @@ // Package errors provides error types and function package errors -var ( - ErrInvalidCacherType = New("invalid cacher type") -) +var ErrInvalidCacherType = New("invalid cacher type") diff --git a/internal/errors/cassandra.go b/internal/errors/cassandra.go index 8e9a3f23bf..7b48012384 100644 --- a/internal/errors/cassandra.go +++ b/internal/errors/cassandra.go @@ -19,7 +19,7 @@ package errors var ( - // Cassandra + // Cassandra. ErrCassandraInvalidConsistencyType = func(consistency string) error { return Errorf("consistetncy type %q is not defined", consistency) } diff --git a/internal/errors/cassandra_test.go b/internal/errors/cassandra_test.go index 6e888177bc..d03f987571 100644 --- a/internal/errors/cassandra_test.go +++ b/internal/errors/cassandra_test.go @@ -20,6 +20,8 @@ package errors import ( "reflect" "testing" + + "go.uber.org/goleak" ) func TestErrCassandraNotFoundIdentity_Error(t *testing.T) { @@ -90,7 +92,6 @@ func TestErrCassandraNotFoundIdentity_Error(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -160,7 +161,6 @@ func TestIsErrCassandraNotFound(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -233,7 +233,6 @@ func TestErrCassandraUnavailableIdentity_Error(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -303,7 +302,156 @@ func TestIsErrCassandraUnavailable(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } + }) + } +} +func TestErrCassandraNotFoundIdentity_Unwrap(t *testing.T) { + t.Parallel() + type fields struct { + err error + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !Is(err, w.err) { + return Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + err: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + err: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + e := &ErrCassandraNotFoundIdentity{ + err: test.fields.err, + } + + err := e.Unwrap() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrCassandraUnavailableIdentity_Unwrap(t *testing.T) { + t.Parallel() + type fields struct { + err error + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !Is(err, w.err) { + return Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + err: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + err: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + e := &ErrCassandraUnavailableIdentity{ + err: test.fields.err, + } + + err := e.Unwrap() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } }) } } diff --git a/internal/errors/client.go b/internal/errors/client.go index a85fda3753..9be6123e1b 100644 --- a/internal/errors/client.go +++ b/internal/errors/client.go @@ -17,7 +17,5 @@ // Package errors provides error types and function package errors -var ( - // ErrUnsupportedClientMethod is unsupported method error for gRPC/REST client - ErrUnsupportedClientMethod = New("unsupported method") -) +// ErrUnsupportedClientMethod is unsupported method error for gRPC/REST client. +var ErrUnsupportedClientMethod = New("unsupported method") diff --git a/internal/errors/compressor.go b/internal/errors/compressor.go index 5d58578502..8efa67642f 100644 --- a/internal/errors/compressor.go +++ b/internal/errors/compressor.go @@ -18,12 +18,12 @@ package errors var ( - // internal compressor + // internal compressor. ErrInvalidCompressionLevel = func(level int) error { return Errorf("invalid compression level: %d", level) } - // Compressor + // Compressor. ErrCompressorNameNotFound = func(name string) error { return Errorf("compressor %s not found", name) } diff --git a/internal/errors/errors.go b/internal/errors/errors.go index f0c469f959..144c7568d5 100644 --- a/internal/errors/errors.go +++ b/internal/errors/errors.go @@ -22,6 +22,7 @@ import ( "fmt" "reflect" "runtime" + "strings" ) var ( @@ -79,7 +80,7 @@ var ( Wrapf = func(err error, format string, args ...interface{}) error { if err != nil { - if format != "" && len(args) > 0 { + if format != "" && len(args) != 0 { return Wrap(err, fmt.Sprintf(format, args...)) } return err @@ -97,10 +98,20 @@ var ( Unwrap = errors.Unwrap Errorf = func(format string, args ...interface{}) error { - if format != "" && args != nil && len(args) > 0 { + const delim = " " + if format == "" && len(args) == 0 { + return nil + } + if len(args) != 0 { + if format == "" { + for range args { + format += "%v" + delim + } + format = strings.TrimSuffix(format, delim) + } return fmt.Errorf(format, args...) } - return nil + return New(format) } Is = func(err, target error) bool { @@ -110,7 +121,8 @@ var ( isComparable := reflect.TypeOf(target).Comparable() for { - if isComparable && (err == target || err.Error() == target.Error()) { + if isComparable && (err == target || + err.Error() == target.Error()) { return true } if x, ok := err.(interface { diff --git a/internal/errors/gongt.go b/internal/errors/gongt.go index f5d0d1bd9e..3d8b2e738e 100644 --- a/internal/errors/gongt.go +++ b/internal/errors/gongt.go @@ -17,6 +17,4 @@ // Package errors provides benchmark error package errors -var ( - ErrGoNGTNotSupportedMethod = New("not supported method") -) +var ErrGoNGTNotSupportedMethod = New("not supported method") diff --git a/internal/errors/grpc.go b/internal/errors/grpc.go index ab7b3dfe73..e077d5f720 100644 --- a/internal/errors/grpc.go +++ b/internal/errors/grpc.go @@ -19,7 +19,7 @@ package errors var ( - // gRPC + // gRPC. ErrgRPCClientConnectionClose = func(name string, err error) error { return Wrapf(err, "%s's gRPC connection close error", name) diff --git a/internal/errors/http.go b/internal/errors/http.go index a68560d69c..7df8fbecd0 100644 --- a/internal/errors/http.go +++ b/internal/errors/http.go @@ -20,7 +20,7 @@ package errors import "time" var ( - // HTTP + // HTTP. ErrInvalidAPIConfig = New("invalid api config") diff --git a/internal/errors/io.go b/internal/errors/io.go index 5cc21cf9ac..c00bdcefbc 100644 --- a/internal/errors/io.go +++ b/internal/errors/io.go @@ -18,7 +18,7 @@ package errors var ( - // io + // io. NewErrContextNotProvided = func() error { return New("context not provided") } diff --git a/internal/errors/k8s.go b/internal/errors/k8s.go index e4cc951505..d81723ec8e 100644 --- a/internal/errors/k8s.go +++ b/internal/errors/k8s.go @@ -17,6 +17,4 @@ // Package errors provides error types and function package errors -var ( - ErrInvalidReconcilerConfig = New("invalid reconciler config") -) +var ErrInvalidReconcilerConfig = New("invalid reconciler config") diff --git a/internal/errors/meta.go b/internal/errors/meta.go index ace1704723..92eaa55f00 100644 --- a/internal/errors/meta.go +++ b/internal/errors/meta.go @@ -17,6 +17,4 @@ // Package errors provides error types and function package errors -var ( - ErrInvalidMetaDataConfig = New("invalid metadata config") -) +var ErrInvalidMetaDataConfig = New("invalid metadata config") diff --git a/internal/errors/mysql.go b/internal/errors/mysql.go index 20ac4d2d7a..c305e33944 100644 --- a/internal/errors/mysql.go +++ b/internal/errors/mysql.go @@ -18,7 +18,7 @@ package errors var ( - // MySQL + // MySQL. ErrMySQLConnectionPingFailed = New("error MySQL connection ping failed") NewErrMySQLNotFoundIdentity = func() error { diff --git a/internal/errors/mysql_test.go b/internal/errors/mysql_test.go index b20f4a8a02..9eead829dd 100644 --- a/internal/errors/mysql_test.go +++ b/internal/errors/mysql_test.go @@ -21,11 +21,11 @@ import ( "reflect" "testing" - "github.com/pkg/errors" "go.uber.org/goleak" ) func TestErrMySQLNotFoundIdentity_Error(t *testing.T) { + t.Parallel() type fields struct { err error } @@ -74,8 +74,10 @@ func TestErrMySQLNotFoundIdentity_Error(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -94,12 +96,12 @@ func TestErrMySQLNotFoundIdentity_Error(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestErrMySQLNotFoundIdentity_Unwrap(t *testing.T) { + t.Parallel() type fields struct { err error } @@ -115,8 +117,8 @@ func TestErrMySQLNotFoundIdentity_Unwrap(t *testing.T) { afterFunc func() } defaultCheckFunc := func(w want, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + if !Is(err, w.err) { + return Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } return nil } @@ -148,8 +150,10 @@ func TestErrMySQLNotFoundIdentity_Unwrap(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -168,12 +172,12 @@ func TestErrMySQLNotFoundIdentity_Unwrap(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestIsErrMySQLNotFound(t *testing.T) { + t.Parallel() type args struct { err error } @@ -222,8 +226,10 @@ func TestIsErrMySQLNotFound(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -239,12 +245,12 @@ func TestIsErrMySQLNotFound(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestErrMySQLInvalidArgumentIdentity_Error(t *testing.T) { + t.Parallel() type fields struct { err error } @@ -293,8 +299,10 @@ func TestErrMySQLInvalidArgumentIdentity_Error(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -313,12 +321,12 @@ func TestErrMySQLInvalidArgumentIdentity_Error(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestErrMySQLInvalidArgumentIdentity_Unwrap(t *testing.T) { + t.Parallel() type fields struct { err error } @@ -334,8 +342,8 @@ func TestErrMySQLInvalidArgumentIdentity_Unwrap(t *testing.T) { afterFunc func() } defaultCheckFunc := func(w want, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + if !Is(err, w.err) { + return Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } return nil } @@ -367,8 +375,10 @@ func TestErrMySQLInvalidArgumentIdentity_Unwrap(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -387,12 +397,12 @@ func TestErrMySQLInvalidArgumentIdentity_Unwrap(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestIsErrMySQLInvalidArgument(t *testing.T) { + t.Parallel() type args struct { err error } @@ -441,8 +451,10 @@ func TestIsErrMySQLInvalidArgument(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -458,7 +470,6 @@ func TestIsErrMySQLInvalidArgument(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/errors/net.go b/internal/errors/net.go index 433b10b2c4..9246e8052f 100644 --- a/internal/errors/net.go +++ b/internal/errors/net.go @@ -20,17 +20,17 @@ package errors import "time" var ( - // tcp + // tcp. - // ErrFailedInitDialer defines the init dialer error + // ErrFailedInitDialer defines the init dialer error. ErrFailedInitDialer = New("failed to init dialer") - // ErrInvalidDNSConfig defines the invalid DNS config error + // ErrInvalidDNSConfig defines the invalid DNS config error. ErrInvalidDNSConfig = func(dnsRefreshDur, dnsCacheExp time.Duration) error { return Errorf("dnsRefreshDuration > dnsCacheExp, %s, %s", dnsRefreshDur, dnsCacheExp) } - // net + // net. - // ErrNoPortAvailiable defines no port available error + // ErrNoPortAvailiable defines no port available error. ErrNoPortAvailable = New("no port available") ) diff --git a/internal/errors/ngt.go b/internal/errors/ngt.go index ca3002c85f..4ac5c8a0e6 100644 --- a/internal/errors/ngt.go +++ b/internal/errors/ngt.go @@ -18,7 +18,7 @@ package errors var ( - //NGT + // NGT. ErrCreateProperty = func(err error) error { return Wrap(err, "failed to create property") @@ -39,6 +39,10 @@ var ( return Errorf("supported dimension limit exceed:\trequired = %d,\tlimit = %d", current, limit) } + ErrIncompatibleDimensionSize = func(req, dim int) error { + return Errorf("incompatible dimension size detected\trequested: %d,\tconfigured: %d", req, dim) + } + ErrUnsupportedObjectType = New("unsupported ObjectType") ErrUnsupportedDistanceType = New("unsupported DistanceType") @@ -69,7 +73,7 @@ var ( ErrUncommittedIndexNotFound = New("uncommitted indexes are not found") - // ErrCAPINotImplemented raises using not implemented function in C API + // ErrCAPINotImplemented raises using not implemented function in C API. ErrCAPINotImplemented = New("not implemented in C API") ErrUUIDAlreadyExists = func(uuid string, oid uint) error { diff --git a/internal/errors/observability.go b/internal/errors/observability.go index b361688566..9ba73834ad 100644 --- a/internal/errors/observability.go +++ b/internal/errors/observability.go @@ -17,8 +17,6 @@ // Package errors provides error types and function package errors -var ( - ErrCollectorNotFound = func() error { - return New("observability.collector not found") - } -) +var ErrCollectorNotFound = func() error { + return New("observability.collector not found") +} diff --git a/internal/errors/option.go b/internal/errors/option.go index cab7349b30..c1960f7335 100644 --- a/internal/errors/option.go +++ b/internal/errors/option.go @@ -15,7 +15,7 @@ // package errors -// ErrInvalidOption represent the invalid option error +// ErrInvalidOption represent the invalid option error. type ErrInvalidOption struct { err error origin error @@ -58,7 +58,7 @@ func (e *ErrInvalidOption) Unwrap() error { ErrCriticalOption */ -// ErrCriticalOption represent the critical option error +// ErrCriticalOption represent the critical option error. type ErrCriticalOption struct { err error origin error diff --git a/internal/errors/option_test.go b/internal/errors/option_test.go new file mode 100644 index 0000000000..131ffb761f --- /dev/null +++ b/internal/errors/option_test.go @@ -0,0 +1,742 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package errors + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestNewErrInvalidOption(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + name string + val interface{} + errs []error + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !Is(err, w.err) { + return Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + name: "", + val: nil, + errs: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + name: "", + val: nil, + errs: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := NewErrInvalidOption(test.args.name, test.args.val, test.args.errs...) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := NewErrInvalidOption(test.args.name, test.args.val, test.args.errs...) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestErrInvalidOption_Error(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type fields struct { + err error + origin error + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + fields fields + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func() + afterFunc func() + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !Is(err, w.err) { + return Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + err: nil, + origin: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + err: nil, + origin: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := e.Error() + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := e.Error() + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestErrInvalidOption_Unwrap(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type fields struct { + err error + origin error + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + fields fields + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func() + afterFunc func() + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !Is(err, w.err) { + return Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + err: nil, + origin: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + err: nil, + origin: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := e.Unwrap() + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := e.Unwrap() + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestNewErrCriticalOption(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + name string + val interface{} + errs []error + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !Is(err, w.err) { + return Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + name: "", + val: nil, + errs: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + name: "", + val: nil, + errs: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := NewErrCriticalOption(test.args.name, test.args.val, test.args.errs...) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := NewErrCriticalOption(test.args.name, test.args.val, test.args.errs...) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestErrCriticalOption_Error(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type fields struct { + err error + origin error + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + fields fields + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func() + afterFunc func() + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !Is(err, w.err) { + return Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + err: nil, + origin: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + err: nil, + origin: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := e.Error() + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := e.Error() + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestErrCriticalOption_Unwrap(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type fields struct { + err error + origin error + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + fields fields + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func() + afterFunc func() + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !Is(err, w.err) { + return Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + err: nil, + origin: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + err: nil, + origin: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := e.Unwrap() + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := e.Unwrap() + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/internal/errors/redis.go b/internal/errors/redis.go index ad2caf7e2e..d27b40d0ea 100644 --- a/internal/errors/redis.go +++ b/internal/errors/redis.go @@ -19,7 +19,7 @@ package errors var ( - // Redis + // Redis. ErrRedisInvalidKVVKPrefix = func(kv, vk string) error { return Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", kv, vk) } diff --git a/internal/errors/redis_test.go b/internal/errors/redis_test.go index 91a14511e1..c2db70419c 100644 --- a/internal/errors/redis_test.go +++ b/internal/errors/redis_test.go @@ -20,6 +20,8 @@ package errors import ( "reflect" "testing" + + "go.uber.org/goleak" ) func TestErrRedisNotFoundIdentity_Error(t *testing.T) { @@ -90,7 +92,6 @@ func TestErrRedisNotFoundIdentity_Error(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -160,7 +161,81 @@ func TestIsErrRedisNotFound(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } + }) + } +} + +func TestErrRedisNotFoundIdentity_Unwrap(t *testing.T) { + t.Parallel() + type fields struct { + err error + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !Is(err, w.err) { + return Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + err: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + err: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + e := &ErrRedisNotFoundIdentity{ + err: test.fields.err, + } + err := e.Unwrap() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } }) } } diff --git a/internal/errors/runner.go b/internal/errors/runner.go index 06a252f028..e49dfa4a45 100644 --- a/internal/errors/runner.go +++ b/internal/errors/runner.go @@ -27,22 +27,22 @@ var ( } ErrStartFunc = func(name string, err error) error { - return Wrapf(err, "error occured in runner.Start at %s", name) + return Wrapf(err, "error occurred in runner.Start at %s", name) } ErrPreStopFunc = func(name string, err error) error { - return Wrapf(err, "error occured in runner.PreStop at %s", name) + return Wrapf(err, "error occurred in runner.PreStop at %s", name) } ErrStopFunc = func(name string, err error) error { - return Wrapf(err, "error occured in runner.Stop at %s", name) + return Wrapf(err, "error occurred in runner.Stop at %s", name) } ErrPostStopFunc = func(name string, err error) error { - return Wrapf(err, "error occured in runner.PostStop at %s", name) + return Wrapf(err, "error occurred in runner.PostStop at %s", name) } ErrRunnerWait = func(name string, err error) error { - return Wrapf(err, "error occured in runner.Wait at %s", name) + return Wrapf(err, "error occurred in runner.Wait at %s", name) } ) diff --git a/internal/errors/runtime.go b/internal/errors/runtime.go index de6eafe397..60046d337e 100644 --- a/internal/errors/runtime.go +++ b/internal/errors/runtime.go @@ -20,7 +20,7 @@ package errors import "runtime" var ( - // Runtime + // Runtime. ErrPanicRecovered = func(err error, rec interface{}) error { return Wrap(err, Errorf("panic recovered: %v", rec).Error()) @@ -31,6 +31,6 @@ var ( } ErrRuntimeError = func(err error, r runtime.Error) error { - return Wrap(err, Errorf("system paniced caused by runtime error: %v", r).Error()) + return Wrap(err, Errorf("system panicked caused by runtime error: %v", r).Error()) } ) diff --git a/internal/errors/tls.go b/internal/errors/tls.go index 72bc5b0f69..6185ded870 100644 --- a/internal/errors/tls.go +++ b/internal/errors/tls.go @@ -18,12 +18,12 @@ package errors var ( - //TLS + // TLS. - // ErrTLSDisabled is error variable, it's replesents config error that tls is disabled by config + // ErrTLSDisabled is error variable, it's replesents config error that tls is disabled by config. ErrTLSDisabled = New("tls feature is disabled") - // ErrTLSCertOrKeyNotFound is error variable, it's replesents tls cert or key not found error + // ErrTLSCertOrKeyNotFound is error variable, it's replesents tls cert or key not found error. ErrTLSCertOrKeyNotFound = New("cert or key file path not found") ErrCertificationFailed = New("certification failed") diff --git a/internal/errors/unit.go b/internal/errors/unit.go index b621480e15..5255a92d9a 100644 --- a/internal/errors/unit.go +++ b/internal/errors/unit.go @@ -17,8 +17,6 @@ // Package errors provides error types and function package errors -var ( - ErrParseUnitFailed = func(s string) error { - return Errorf("failed to parse: '%s'", s) - } -) +var ErrParseUnitFailed = func(s string) error { + return Errorf("failed to parse: '%s'", s) +} diff --git a/internal/file/watch/option.go b/internal/file/watch/option.go index 5f9ff9a588..7ba5ca9a4f 100644 --- a/internal/file/watch/option.go +++ b/internal/file/watch/option.go @@ -24,9 +24,7 @@ import ( type Option func(w *watch) error -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithErrGroup(eg errgroup.Group) Option { return func(w *watch) error { @@ -98,6 +96,7 @@ func WithOnDelete(f func(ctx context.Context, name string) error) Option { return nil } } + func WithOnWrite(f func(ctx context.Context, name string) error) Option { return func(w *watch) error { if f != nil { diff --git a/internal/file/watch/option_test.go b/internal/file/watch/option_test.go index 262b186a87..3fb6e4885e 100644 --- a/internal/file/watch/option_test.go +++ b/internal/file/watch/option_test.go @@ -141,8 +141,8 @@ func TestWithDirs(t *testing.T) { want: want{ obj: &T{ dirs: map[string]struct{}{ - "vdaas": struct{}{}, - "vald": struct{}{}, + "vdaas": {}, + "vald": {}, }, }, }, @@ -157,15 +157,15 @@ func TestWithDirs(t *testing.T) { }, field: field{ dirs: map[string]struct{}{ - "team": struct{}{}, + "team": {}, }, }, want: want{ obj: &T{ dirs: map[string]struct{}{ - "team": struct{}{}, - "vdaas": struct{}{}, - "vald": struct{}{}, + "team": {}, + "vdaas": {}, + "vald": {}, }, }, }, diff --git a/internal/file/watch/watch_test.go b/internal/file/watch/watch_test.go index 699de106fa..2a3ec0404e 100644 --- a/internal/file/watch/watch_test.go +++ b/internal/file/watch/watch_test.go @@ -32,14 +32,12 @@ import ( "go.uber.org/goleak" ) -var ( - // Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - goleak.IgnoreTopFunction("syscall.Syscall6"), - goleak.IgnoreTopFunction("syscall.syscall6"), - } -) +// Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), + goleak.IgnoreTopFunction("syscall.Syscall6"), + goleak.IgnoreTopFunction("syscall.syscall6"), +} func TestMain(m *testing.M) { log.Init() @@ -156,7 +154,7 @@ func Test_watch_init(t *testing.T) { name: "returns no such file or directory error when file not exists", fields: fields{ dirs: map[string]struct{}{ - "vald.go": struct{}{}, + "vald.go": {}, }, }, want: want{ @@ -168,7 +166,7 @@ func Test_watch_init(t *testing.T) { name: "returns no such file or directory error when directory not exists", fields: fields{ dirs: map[string]struct{}{ - "test": struct{}{}, + "test": {}, }, }, want: want{ @@ -180,8 +178,8 @@ func Test_watch_init(t *testing.T) { name: "returns no such file or directory error when some file not exists", fields: fields{ dirs: map[string]struct{}{ - "watch.go": struct{}{}, - "vald.go": struct{}{}, + "watch.go": {}, + "vald.go": {}, }, }, want: want{ @@ -193,9 +191,9 @@ func Test_watch_init(t *testing.T) { name: "returns nil when watcher already created and initialize success", fields: fields{ dirs: map[string]struct{}{ - "../watch": struct{}{}, - "watch.go": struct{}{}, - "watch_test.go": struct{}{}, + "../watch": {}, + "watch.go": {}, + "watch_test.go": {}, }, w: func() *fsnotify.Watcher { w, _ := fsnotify.NewWatcher() @@ -223,9 +221,9 @@ func Test_watch_init(t *testing.T) { name: "returns nil when initialize success", fields: fields{ dirs: map[string]struct{}{ - "../watch": struct{}{}, - "watch.go": struct{}{}, - "watch_test.go": struct{}{}, + "../watch": {}, + "watch.go": {}, + "watch_test.go": {}, }, }, checkFunc: func(w want, got *watch, err error) error { @@ -334,7 +332,7 @@ func Test_watch_Start(t *testing.T) { w: w, eg: errgroup.Get(), dirs: map[string]struct{}{ - "vald": struct{}{}, + "vald": {}, }, } }, @@ -727,7 +725,6 @@ func Test_watch_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -788,16 +785,16 @@ func Test_watch_Add(t *testing.T) { }, fields: fields{ dirs: map[string]struct{}{ - "watch_test.go": struct{}{}, + "watch_test.go": {}, }, }, want: want{ err: nil, want: &watch{ dirs: map[string]struct{}{ - "watch_test.go": struct{}{}, - "./watch.go": struct{}{}, - "./option.go": struct{}{}, + "watch_test.go": {}, + "./watch.go": {}, + "./option.go": {}, }, }, }, @@ -817,7 +814,7 @@ func Test_watch_Add(t *testing.T) { err: nil, want: &watch{ dirs: map[string]struct{}{ - "../watch": struct{}{}, + "../watch": {}, }, }, }, @@ -838,7 +835,7 @@ func Test_watch_Add(t *testing.T) { err: syscall.Errno(0x2), want: &watch{ dirs: map[string]struct{}{ - "watch.go": struct{}{}, + "watch.go": {}, }, }, }, @@ -945,15 +942,15 @@ func Test_watch_Remove(t *testing.T) { }, fields: fields{ dirs: map[string]struct{}{ - "watch.go": struct{}{}, - "watch_test.go": struct{}{}, - "option.go": struct{}{}, + "watch.go": {}, + "watch_test.go": {}, + "option.go": {}, }, }, want: want{ want: &watch{ dirs: map[string]struct{}{ - "option.go": struct{}{}, + "option.go": {}, }, }, err: nil, @@ -969,7 +966,7 @@ func Test_watch_Remove(t *testing.T) { }, fields: fields{ dirs: map[string]struct{}{ - "../watch": struct{}{}, + "../watch": {}, }, }, want: want{ @@ -991,14 +988,14 @@ func Test_watch_Remove(t *testing.T) { }, fields: fields{ dirs: map[string]struct{}{ - "watch.go": struct{}{}, - "watch_test.go": struct{}{}, + "watch.go": {}, + "watch_test.go": {}, }, }, want: want{ want: &watch{ dirs: map[string]struct{}{ - "watch_test.go": struct{}{}, + "watch_test.go": {}, }, }, err: fmt.Errorf("can't remove non-existent"), @@ -1098,9 +1095,9 @@ func Test_watch_Stop(t *testing.T) { }, fields: fields{ dirs: map[string]struct{}{ - "../watch": struct{}{}, - "watch.go": struct{}{}, - "watch_test.go": struct{}{}, + "../watch": {}, + "watch.go": {}, + "watch_test.go": {}, }, }, beforeFunc: func(t *testing.T, fields *fields, args args) { @@ -1129,7 +1126,7 @@ func Test_watch_Stop(t *testing.T) { }, fields: fields{ dirs: map[string]struct{}{ - "watch.go": struct{}{}, + "watch.go": {}, }, }, want: want{ diff --git a/internal/info/info_test.go b/internal/info/info_test.go index 070a53d54d..19bca65d0b 100644 --- a/internal/info/info_test.go +++ b/internal/info/info_test.go @@ -83,7 +83,6 @@ func TestString(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -143,7 +142,6 @@ func TestGet(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -260,7 +258,6 @@ func TestDetail_String(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -377,7 +374,6 @@ func TestDetail_Get(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -423,7 +419,6 @@ func TestDetail_prepare(t *testing.T) { CGOEnabled = "true" NGTVersion = "v1.11.6" BuildCPUInfoFlags = "\t\tavx512f avx512dq\t" - } tests := []test{ { diff --git a/internal/io/io_test.go b/internal/io/io_test.go new file mode 100644 index 0000000000..dda5abed52 --- /dev/null +++ b/internal/io/io_test.go @@ -0,0 +1,691 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package io provides io functions +package io + +import ( + "bytes" + "context" + "io" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNewReaderWithContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + r io.Reader + } + type want struct { + want io.Reader + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, io.Reader, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got io.Reader, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got error = %v, want %v", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got = %v, want %v", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got, err := NewReaderWithContext(test.args.ctx, test.args.r) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestNewReadCloserWithContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + r io.ReadCloser + } + type want struct { + want io.ReadCloser + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, io.ReadCloser, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got io.ReadCloser, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got error = %v, want %v", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got = %v, want %v", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got, err := NewReadCloserWithContext(test.args.ctx, test.args.r) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_ctxReader_Read(t *testing.T) { + t.Parallel() + type args struct { + p []byte + } + type fields struct { + ctx context.Context + r io.Reader + } + type want struct { + wantN int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotN int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got error = %v, want %v", err, w.err) + } + if !reflect.DeepEqual(gotN, w.wantN) { + return errors.Errorf("got = %v, want %v", gotN, w.wantN) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + p: nil, + }, + fields: fields { + ctx: nil, + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + p: nil, + }, + fields: fields { + ctx: nil, + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &ctxReader{ + ctx: test.fields.ctx, + r: test.fields.r, + } + + gotN, err := r.Read(test.args.p) + if err := test.checkFunc(test.want, gotN, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_ctxReader_Close(t *testing.T) { + t.Parallel() + type fields struct { + ctx context.Context + r io.Reader + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got error = %v, want %v", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + ctx: nil, + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + ctx: nil, + r: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &ctxReader{ + ctx: test.fields.ctx, + r: test.fields.r, + } + + err := r.Close() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestNewWriterWithContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type want struct { + want io.Writer + wantW string + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, io.Writer, string, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got io.Writer, gotW string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got error = %v, want %v", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got = %v, want %v", got, w.want) + } + if !reflect.DeepEqual(gotW, w.wantW) { + return errors.Errorf("got = %v, want %v", gotW, w.wantW) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + w := &bytes.Buffer{} + + got, err := NewWriterWithContext(test.args.ctx, w) + if err := test.checkFunc(test.want, got, w.String(), err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestNewWriteCloserWithContext(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + w io.WriteCloser + } + type want struct { + want io.WriteCloser + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, io.WriteCloser, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got io.WriteCloser, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got error = %v, want %v", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got = %v, want %v", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + w: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + w: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got, err := NewWriteCloserWithContext(test.args.ctx, test.args.w) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_ctxWriter_Write(t *testing.T) { + t.Parallel() + type args struct { + p []byte + } + type fields struct { + ctx context.Context + w io.Writer + } + type want struct { + wantN int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotN int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got error = %v, want %v", err, w.err) + } + if !reflect.DeepEqual(gotN, w.wantN) { + return errors.Errorf("got = %v, want %v", gotN, w.wantN) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + p: nil, + }, + fields: fields { + ctx: nil, + w: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + p: nil, + }, + fields: fields { + ctx: nil, + w: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + w := &ctxWriter{ + ctx: test.fields.ctx, + w: test.fields.w, + } + + gotN, err := w.Write(test.args.p) + if err := test.checkFunc(test.want, gotN, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_ctxWriter_Close(t *testing.T) { + t.Parallel() + type fields struct { + ctx context.Context + w io.Writer + } + type want struct { + err error + } + type test struct { + name string + fields fields + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got error = %v, want %v", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + ctx: nil, + w: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + ctx: nil, + w: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + w := &ctxWriter{ + ctx: test.fields.ctx, + w: test.fields.w, + } + + err := w.Close() + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/io/ioutil/ioutil.go b/internal/io/ioutil/ioutil.go new file mode 100644 index 0000000000..3fb77a483c --- /dev/null +++ b/internal/io/ioutil/ioutil.go @@ -0,0 +1,52 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package ioutil provides utility function for I/O +package ioutil + +import ( + "bytes" + "os" + + "github.com/vdaas/vald/internal/safety" +) + +func ReadFile(path string) ([]byte, error) { + f, err := os.OpenFile(path, os.O_RDONLY, os.ModePerm) + if err != nil { + return nil, err + } + defer f.Close() + + var n int64 = bytes.MinRead + if fi, err := f.Stat(); err == nil { + if size := fi.Size() + bytes.MinRead; size > n { + n = size + } + } + + buf := bytes.NewBuffer(make([]byte, 0, n)) + + err = safety.RecoverFunc(func() (err error) { + _, err = buf.ReadFrom(f) + return err + })() + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/hack/swagger/main_test.go b/internal/io/ioutil/ioutil_test.go similarity index 76% rename from hack/swagger/main_test.go rename to internal/io/ioutil/ioutil_test.go index a41a655117..104e164a04 100644 --- a/hack/swagger/main_test.go +++ b/internal/io/ioutil/ioutil_test.go @@ -14,35 +14,41 @@ // limitations under the License. // -package swagger +// Package ioutil provides utility function for I/O +package ioutil import ( + "reflect" "testing" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) -func TestParse(t *testing.T) { +func TestReadFile(t *testing.T) { + t.Parallel() type args struct { path string } type want struct { - err error + want []byte + err error } type test struct { name string args args want want - checkFunc func(want, error) error + checkFunc func(want, []byte, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, got []byte, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } return nil } tests := []test{ @@ -73,9 +79,11 @@ func TestParse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -86,11 +94,10 @@ func TestParse(t *testing.T) { test.checkFunc = defaultCheckFunc } - err := Parse(test.args.path) - if err := test.checkFunc(test.want, err); err != nil { + got, err := ReadFile(test.args.path) + if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/k8s/metrics/node/node.go b/internal/k8s/metrics/node/node.go index 5502c5ce13..18f3817a6b 100644 --- a/internal/k8s/metrics/node/node.go +++ b/internal/k8s/metrics/node/node.go @@ -22,7 +22,6 @@ import ( "time" "github.com/vdaas/vald/internal/k8s" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" metrics "k8s.io/metrics/pkg/apis/metrics/v1beta1" diff --git a/internal/k8s/metrics/node/node_test.go b/internal/k8s/metrics/node/node_test.go index 6e5d092bc8..4529b65e8b 100644 --- a/internal/k8s/metrics/node/node_test.go +++ b/internal/k8s/metrics/node/node_test.go @@ -23,13 +23,12 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - - "go.uber.org/goleak" ) func TestNew(t *testing.T) { @@ -98,7 +97,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -202,7 +200,6 @@ func Test_reconciler_Reconcile(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -292,7 +289,6 @@ func Test_reconciler_GetName(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -395,7 +391,6 @@ func Test_reconciler_NewReconciler(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -485,7 +480,6 @@ func Test_reconciler_For(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -575,7 +569,6 @@ func Test_reconciler_Owns(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -669,7 +662,6 @@ func Test_reconciler_Watches(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/k8s/metrics/node/option.go b/internal/k8s/metrics/node/option.go index 3db4578f2a..f55f0996e8 100644 --- a/internal/k8s/metrics/node/option.go +++ b/internal/k8s/metrics/node/option.go @@ -21,9 +21,7 @@ import "sigs.k8s.io/controller-runtime/pkg/manager" type Option func(*reconciler) error -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithControllerName(name string) Option { return func(r *reconciler) error { diff --git a/internal/k8s/metrics/node/option_test.go b/internal/k8s/metrics/node/option_test.go index 96b5f32383..20f165d70b 100644 --- a/internal/k8s/metrics/node/option_test.go +++ b/internal/k8s/metrics/node/option_test.go @@ -20,9 +20,8 @@ package node import ( "testing" - "sigs.k8s.io/controller-runtime/pkg/manager" - "go.uber.org/goleak" + "sigs.k8s.io/controller-runtime/pkg/manager" ) func TestWithControllerName(t *testing.T) { diff --git a/internal/k8s/metrics/pod/option.go b/internal/k8s/metrics/pod/option.go index 8b88ddbddb..748e13b506 100644 --- a/internal/k8s/metrics/pod/option.go +++ b/internal/k8s/metrics/pod/option.go @@ -21,9 +21,7 @@ import "sigs.k8s.io/controller-runtime/pkg/manager" type Option func(*reconciler) error -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithControllerName(name string) Option { return func(r *reconciler) error { diff --git a/internal/k8s/metrics/pod/option_test.go b/internal/k8s/metrics/pod/option_test.go index 48657ef141..b8f460e80a 100644 --- a/internal/k8s/metrics/pod/option_test.go +++ b/internal/k8s/metrics/pod/option_test.go @@ -20,9 +20,8 @@ package pod import ( "testing" - "sigs.k8s.io/controller-runtime/pkg/manager" - "go.uber.org/goleak" + "sigs.k8s.io/controller-runtime/pkg/manager" ) func TestWithControllerName(t *testing.T) { diff --git a/internal/k8s/metrics/pod/pod.go b/internal/k8s/metrics/pod/pod.go index 79937b315d..d16da69db3 100644 --- a/internal/k8s/metrics/pod/pod.go +++ b/internal/k8s/metrics/pod/pod.go @@ -22,7 +22,6 @@ import ( "time" "github.com/vdaas/vald/internal/k8s" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" metrics "k8s.io/metrics/pkg/apis/metrics/v1beta1" diff --git a/internal/k8s/metrics/pod/pod_test.go b/internal/k8s/metrics/pod/pod_test.go index e535d9e5b5..96954a2bfb 100644 --- a/internal/k8s/metrics/pod/pod_test.go +++ b/internal/k8s/metrics/pod/pod_test.go @@ -23,13 +23,12 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - - "go.uber.org/goleak" ) func TestNew(t *testing.T) { @@ -98,7 +97,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -202,7 +200,6 @@ func Test_reconciler_Reconcile(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -292,7 +289,6 @@ func Test_reconciler_GetName(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -395,7 +391,6 @@ func Test_reconciler_NewReconciler(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -485,7 +480,6 @@ func Test_reconciler_For(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -575,7 +569,6 @@ func Test_reconciler_Owns(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -669,7 +662,6 @@ func Test_reconciler_Watches(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/k8s/node/node.go b/internal/k8s/node/node.go index f50022e570..12c9852dfa 100644 --- a/internal/k8s/node/node.go +++ b/internal/k8s/node/node.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" diff --git a/internal/k8s/node/node_test.go b/internal/k8s/node/node_test.go index c8b3c727b8..595ef461ff 100644 --- a/internal/k8s/node/node_test.go +++ b/internal/k8s/node/node_test.go @@ -23,14 +23,13 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" "k8s.io/apimachinery/pkg/runtime" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - - "go.uber.org/goleak" ) func TestNew(t *testing.T) { @@ -99,7 +98,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -207,7 +205,6 @@ func Test_reconciler_Reconcile(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -301,7 +298,6 @@ func Test_reconciler_GetName(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -408,7 +404,6 @@ func Test_reconciler_NewReconciler(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -502,7 +497,6 @@ func Test_reconciler_For(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -596,7 +590,6 @@ func Test_reconciler_Owns(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -694,7 +687,6 @@ func Test_reconciler_Watches(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/k8s/node/option.go b/internal/k8s/node/option.go index 8f44f18f0d..817273835f 100644 --- a/internal/k8s/node/option.go +++ b/internal/k8s/node/option.go @@ -21,9 +21,7 @@ import "sigs.k8s.io/controller-runtime/pkg/manager" type Option func(*reconciler) error -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithControllerName(name string) Option { return func(r *reconciler) error { diff --git a/internal/k8s/node/option_test.go b/internal/k8s/node/option_test.go index 840ce1dc55..e5ec0fa772 100644 --- a/internal/k8s/node/option_test.go +++ b/internal/k8s/node/option_test.go @@ -20,9 +20,8 @@ package node import ( "testing" - "sigs.k8s.io/controller-runtime/pkg/manager" - "go.uber.org/goleak" + "sigs.k8s.io/controller-runtime/pkg/manager" ) func TestWithControllerName(t *testing.T) { diff --git a/internal/k8s/option.go b/internal/k8s/option.go index b47bcc4161..d7a73aa22a 100644 --- a/internal/k8s/option.go +++ b/internal/k8s/option.go @@ -24,11 +24,9 @@ import ( type Option func(*controller) error -var ( - defaultOpts = []Option{ - WithErrGroup(errgroup.Get()), - } -) +var defaultOpts = []Option{ + WithErrGroup(errgroup.Get()), +} func WithErrGroup(eg errgroup.Group) Option { return func(c *controller) error { diff --git a/internal/k8s/option_test.go b/internal/k8s/option_test.go index cde60becb8..79b0934423 100644 --- a/internal/k8s/option_test.go +++ b/internal/k8s/option_test.go @@ -21,9 +21,8 @@ import ( "testing" "github.com/vdaas/vald/internal/errgroup" - "sigs.k8s.io/controller-runtime/pkg/manager" - "go.uber.org/goleak" + "sigs.k8s.io/controller-runtime/pkg/manager" ) func TestWithErrGroup(t *testing.T) { diff --git a/internal/k8s/pod/option.go b/internal/k8s/pod/option.go index 227861936a..8afd766fe0 100644 --- a/internal/k8s/pod/option.go +++ b/internal/k8s/pod/option.go @@ -21,9 +21,7 @@ import "sigs.k8s.io/controller-runtime/pkg/manager" type Option func(*reconciler) error -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithControllerName(name string) Option { return func(r *reconciler) error { diff --git a/internal/k8s/pod/option_test.go b/internal/k8s/pod/option_test.go index b15b6e92f8..f714af78c3 100644 --- a/internal/k8s/pod/option_test.go +++ b/internal/k8s/pod/option_test.go @@ -20,9 +20,8 @@ package pod import ( "testing" - "sigs.k8s.io/controller-runtime/pkg/manager" - "go.uber.org/goleak" + "sigs.k8s.io/controller-runtime/pkg/manager" ) func TestWithControllerName(t *testing.T) { diff --git a/internal/k8s/pod/pod.go b/internal/k8s/pod/pod.go index 8fd962181d..4b33457011 100644 --- a/internal/k8s/pod/pod.go +++ b/internal/k8s/pod/pod.go @@ -24,7 +24,6 @@ import ( "github.com/vdaas/vald/internal/k8s" "github.com/vdaas/vald/internal/log" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" diff --git a/internal/k8s/pod/pod_test.go b/internal/k8s/pod/pod_test.go index 798e8e38c8..54df424e74 100644 --- a/internal/k8s/pod/pod_test.go +++ b/internal/k8s/pod/pod_test.go @@ -23,13 +23,12 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - - "go.uber.org/goleak" ) func TestNew(t *testing.T) { @@ -98,7 +97,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -206,7 +204,6 @@ func Test_reconciler_Reconcile(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -300,7 +297,6 @@ func Test_reconciler_GetName(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -407,7 +403,6 @@ func Test_reconciler_NewReconciler(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -501,7 +496,6 @@ func Test_reconciler_For(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -595,7 +589,6 @@ func Test_reconciler_Owns(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -693,7 +686,6 @@ func Test_reconciler_Watches(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/k8s/reconciler_test.go b/internal/k8s/reconciler_test.go index 4ef4df3c2f..5a8d8399a9 100644 --- a/internal/k8s/reconciler_test.go +++ b/internal/k8s/reconciler_test.go @@ -24,9 +24,8 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" - "sigs.k8s.io/controller-runtime/pkg/manager" - "go.uber.org/goleak" + "sigs.k8s.io/controller-runtime/pkg/manager" ) func TestNew(t *testing.T) { @@ -99,7 +98,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotCl, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -207,7 +205,6 @@ func Test_controller_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/log/format/format_test.go b/internal/log/format/format_test.go index 844aa2ea33..9fc6d58639 100644 --- a/internal/log/format/format_test.go +++ b/internal/log/format/format_test.go @@ -92,7 +92,6 @@ func TestFormat_String(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/log/glg/glg_test.go b/internal/log/glg/glg_test.go index 91220fdc71..3d354a5be9 100644 --- a/internal/log/glg/glg_test.go +++ b/internal/log/glg/glg_test.go @@ -27,7 +27,6 @@ import ( "github.com/vdaas/vald/internal/log/level" "github.com/vdaas/vald/internal/log/mock" "github.com/vdaas/vald/internal/log/retry" - "go.uber.org/goleak" ) @@ -1004,7 +1003,6 @@ func Test_logger_setLevelMode(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1100,7 +1098,6 @@ func Test_logger_setLogFormat(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/log/glg/option.go b/internal/log/glg/option.go index 30efd2d3e4..6fb5e3f1bd 100644 --- a/internal/log/glg/option.go +++ b/internal/log/glg/option.go @@ -25,13 +25,11 @@ import ( type Option func(l *logger) -var ( - defaultOpts = []Option{ - WithGlg(glg.Get()), - WithLevel(level.DEBUG.String()), - WithRetry(retry.New()), - } -) +var defaultOpts = []Option{ + WithGlg(glg.Get()), + WithLevel(level.DEBUG.String()), + WithRetry(retry.New()), +} func WithGlg(g *glg.Glg) Option { return func(l *logger) { diff --git a/internal/log/level/level.go b/internal/log/level/level.go index 4110ea1c69..8df1b4b536 100644 --- a/internal/log/level/level.go +++ b/internal/log/level/level.go @@ -23,19 +23,19 @@ type Level uint8 const ( Unknown Level = iota - // DEBUG is debug log level + // DEBUG is debug log level. DEBUG - // INFO is info log level + // INFO is info log level. INFO - // WARN is warning log level + // WARN is warning log level. WARN - // ERRO is error log level + // ERRO is error log level. ERROR - // FATAL is fatal log level + // FATAL is fatal log level. FATAL ) diff --git a/internal/log/log_test.go b/internal/log/log_test.go index 23ee3b3993..478d6da16d 100644 --- a/internal/log/log_test.go +++ b/internal/log/log_test.go @@ -29,12 +29,10 @@ import ( "go.uber.org/goleak" ) -var ( - // Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - } -) +// Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), +} func TestInit(t *testing.T) { type args struct { diff --git a/internal/log/logger/type_test.go b/internal/log/logger/type_test.go index 1f35cb4a83..b6c0319143 100644 --- a/internal/log/logger/type_test.go +++ b/internal/log/logger/type_test.go @@ -109,7 +109,6 @@ func TestType_String(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -264,7 +263,6 @@ func TestAtot(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/log/mock/logger.go b/internal/log/mock/logger.go index 1b2cdb1e69..c14f385f13 100644 --- a/internal/log/mock/logger.go +++ b/internal/log/mock/logger.go @@ -15,7 +15,7 @@ // package mock -// Logger represents struct of each log level function +// Logger represents struct of each log level function. type Logger struct { DebugFunc func(vals ...interface{}) DebugfFunc func(format string, vals ...interface{}) @@ -29,52 +29,52 @@ type Logger struct { FatalfFunc func(format string, vals ...interface{}) } -// Debug calls DebugFunc of Logger +// Debug calls DebugFunc of Logger. func (l *Logger) Debug(vals ...interface{}) { l.DebugFunc(vals...) } -// Debugf calls DebugfFunc of Logger +// Debugf calls DebugfFunc of Logger. func (l *Logger) Debugf(format string, vals ...interface{}) { l.DebugfFunc(format, vals...) } -// Info calls InfoFunc of Logger +// Info calls InfoFunc of Logger. func (l *Logger) Info(vals ...interface{}) { l.InfoFunc(vals...) } -// Infof calls InfofFunc of Logger +// Infof calls InfofFunc of Logger. func (l *Logger) Infof(format string, vals ...interface{}) { l.InfofFunc(format, vals...) } -// Warn calls WarnFunc of Logger +// Warn calls WarnFunc of Logger. func (l *Logger) Warn(vals ...interface{}) { l.WarnFunc(vals...) } -// Warnf calls WarnfFunc of Logger +// Warnf calls WarnfFunc of Logger. func (l *Logger) Warnf(format string, vals ...interface{}) { l.WarnfFunc(format, vals...) } -// Error calls ErrorFunc of Logger +// Error calls ErrorFunc of Logger. func (l *Logger) Error(vals ...interface{}) { l.ErrorFunc(vals...) } -// Errorf calls ErrorfFunc of Logger +// Errorf calls ErrorfFunc of Logger. func (l *Logger) Errorf(format string, vals ...interface{}) { l.ErrorfFunc(format, vals...) } -// Fatal calls FatalFunc of Logger +// Fatal calls FatalFunc of Logger. func (l *Logger) Fatal(vals ...interface{}) { l.FatalFunc(vals...) } -// Fatalf calls FatalfFunc of Logger +// Fatalf calls FatalfFunc of Logger. func (l *Logger) Fatalf(format string, vals ...interface{}) { l.FatalfFunc(format, vals...) } diff --git a/internal/log/mock/retry.go b/internal/log/mock/retry.go index 1db1c70620..e211e1dab6 100644 --- a/internal/log/mock/retry.go +++ b/internal/log/mock/retry.go @@ -15,7 +15,7 @@ // package mock -// Retry represents struct of mock retry structure +// Retry represents struct of mock retry structure. type Retry struct { OutFunc func( fn func(vals ...interface{}) error, @@ -29,7 +29,7 @@ type Retry struct { ) } -// Out calls OutFunc +// Out calls OutFunc. func (r *Retry) Out( fn func(vals ...interface{}) error, vals ...interface{}, @@ -37,7 +37,7 @@ func (r *Retry) Out( r.OutFunc(fn, vals...) } -// Outf calls OutfFunc +// Outf calls OutfFunc. func (r *Retry) Outf( fn func(format string, vals ...interface{}) error, format string, vals ...interface{}, diff --git a/internal/log/option.go b/internal/log/option.go index b1889325e9..91712bbb5f 100644 --- a/internal/log/option.go +++ b/internal/log/option.go @@ -25,21 +25,19 @@ import ( type Option func(*option) -var ( - defaultOptions = []Option{ - WithLevel(level.DEBUG.String()), - WithLogger( - glg.New( - glg.WithRetry( - retry.New( - retry.WithError(Error), - retry.WithWarn(Warn), - ), +var defaultOptions = []Option{ + WithLevel(level.DEBUG.String()), + WithLogger( + glg.New( + glg.WithRetry( + retry.New( + retry.WithError(Error), + retry.WithWarn(Warn), ), ), ), - } -) + ), +} type option struct { logType logger.Type diff --git a/internal/log/retry/option_test.go b/internal/log/retry/option_test.go index 31746531d4..ba96cf87b9 100644 --- a/internal/log/retry/option_test.go +++ b/internal/log/retry/option_test.go @@ -20,7 +20,6 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) diff --git a/internal/log/retry/retry_test.go b/internal/log/retry/retry_test.go index 05f761c0db..584186bfd4 100644 --- a/internal/log/retry/retry_test.go +++ b/internal/log/retry/retry_test.go @@ -20,7 +20,6 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) diff --git a/internal/net/grpc/client.go b/internal/net/grpc/client.go index 542fccf51b..e21a2b5355 100644 --- a/internal/net/grpc/client.go +++ b/internal/net/grpc/client.go @@ -28,15 +28,18 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc/pool" + "github.com/vdaas/vald/internal/observability/trace" "github.com/vdaas/vald/internal/safety" "google.golang.org/grpc" ) -type Server = grpc.Server -type ServerOption = grpc.ServerOption -type CallOption = grpc.CallOption -type DialOption = pool.DialOption -type ClientConn = pool.ClientConn +type ( + Server = grpc.Server + ServerOption = grpc.ServerOption + CallOption = grpc.CallOption + DialOption = pool.DialOption + ClientConn = pool.ClientConn +) type Client interface { StartConnectionMonitor(ctx context.Context) (<-chan error, error) @@ -197,20 +200,33 @@ func (g *gRPCClient) StartConnectionMonitor(ctx context.Context) (<-chan error, func (g *gRPCClient) Range(ctx context.Context, f func(ctx context.Context, addr string, conn *ClientConn, copts ...CallOption) error) (rerr error) { + ctx, span := trace.StartSpan(ctx, "vald/internal/grpc/Client.Range") + defer func() { + if span != nil { + span.End() + } + }() g.conns.Range(func(addr string, p pool.Conn) bool { + ctx, span := trace.StartSpan(ctx, "vald/internal/grpc/Client.Range/"+addr) + defer func() { + if span != nil { + span.End() + } + }() select { case <-ctx.Done(): return false default: var err error if g.bo != nil { - _, err = g.bo.Do(ctx, func() (r interface{}, err error) { - return nil, p.Do(func(conn *ClientConn) (err error) { + _, err = g.bo.Do(ctx, func(ctx context.Context) (r interface{}, ret bool, err error) { + err = p.Do(func(conn *ClientConn) (err error) { if conn == nil { return errors.ErrGRPCClientConnNotFound(addr) } return f(ctx, addr, conn, g.copts...) }) + return nil, err != nil, err }) } else { err = p.Do(func(conn *ClientConn) (err error) { @@ -235,29 +251,42 @@ func (g *gRPCClient) Range(ctx context.Context, func (g *gRPCClient) RangeConcurrent(ctx context.Context, concurrency int, f func(ctx context.Context, addr string, conn *ClientConn, copts ...CallOption) error) error { + ctx, span := trace.StartSpan(ctx, "vald/internal/grpc/Client.RangeConcurrent") + defer func() { + if span != nil { + span.End() + } + }() eg, egctx := errgroup.New(ctx) eg.Limitation(concurrency) g.conns.Range(func(addr string, p pool.Conn) bool { eg.Go(safety.RecoverFunc(func() (err error) { + ctx, span := trace.StartSpan(ctx, "vald/internal/grpc/Client.RangeConcurrent/"+addr) + defer func() { + if span != nil { + span.End() + } + }() select { case <-egctx.Done(): return nil default: if g.bo != nil { - _, err = g.bo.Do(egctx, func() (r interface{}, err error) { - return nil, p.Do(func(conn *ClientConn) (err error) { + _, err = g.bo.Do(ctx, func(ctx context.Context) (r interface{}, ret bool, err error) { + err = p.Do(func(conn *ClientConn) (err error) { if conn == nil { return errors.ErrGRPCClientConnNotFound(addr) } - return f(egctx, addr, conn, g.copts...) + return f(ctx, addr, conn, g.copts...) }) + return nil, err != nil, err }) } else { err = p.Do(func(conn *ClientConn) (err error) { if conn == nil { return errors.ErrGRPCClientConnNotFound(addr) } - return f(egctx, addr, conn, g.copts...) + return f(ctx, addr, conn, g.copts...) }) } if err != nil { @@ -277,6 +306,12 @@ func (g *gRPCClient) RangeConcurrent(ctx context.Context, func (g *gRPCClient) OrderedRange(ctx context.Context, orders []string, f func(ctx context.Context, addr string, conn *ClientConn, copts ...CallOption) error) (rerr error) { + ctx, span := trace.StartSpan(ctx, "vald/internal/grpc/Client.OrderedRange") + defer func() { + if span != nil { + span.End() + } + }() if orders == nil { log.Warn("no order found for OrderedRange") return g.Range(ctx, f) @@ -289,14 +324,21 @@ func (g *gRPCClient) OrderedRange(ctx context.Context, default: p, ok := g.conns.Load(addr) if ok { + ctx, span := trace.StartSpan(ctx, "vald/internal/grpc/Client.OrderedRange/"+addr) + defer func() { + if span != nil { + span.End() + } + }() if g.bo != nil { - _, err = g.bo.Do(ctx, func() (r interface{}, err error) { - return nil, p.Do(func(conn *ClientConn) (err error) { + _, err = g.bo.Do(ctx, func(ctx context.Context) (r interface{}, ret bool, err error) { + err = p.Do(func(conn *ClientConn) (err error) { if conn == nil { return errors.ErrGRPCClientConnNotFound(addr) } return f(ctx, addr, conn, g.copts...) }) + return nil, err != nil, err }) } else { err = p.Do(func(conn *ClientConn) (err error) { @@ -324,6 +366,12 @@ func (g *gRPCClient) OrderedRangeConcurrent(ctx context.Context, orders []string, concurrency int, f func(ctx context.Context, addr string, conn *ClientConn, copts ...CallOption) error) (err error) { + ctx, span := trace.StartSpan(ctx, "vald/internal/grpc/Client.OrderedRangeConcurrent") + defer func() { + if span != nil { + span.End() + } + }() if orders == nil { log.Warn("no order found for OrderedRangeConcurrent") return g.RangeConcurrent(ctx, concurrency, f) @@ -335,25 +383,32 @@ func (g *gRPCClient) OrderedRangeConcurrent(ctx context.Context, p, ok := g.conns.Load(addr) if ok { eg.Go(safety.RecoverFunc(func() (err error) { + ctx, span := trace.StartSpan(ctx, "vald/internal/grpc/Client.OrderedRangeConcurrent/"+addr) + defer func() { + if span != nil { + span.End() + } + }() select { case <-egctx.Done(): return nil default: if g.bo != nil { - _, err = g.bo.Do(egctx, func() (r interface{}, err error) { - return nil, p.Do(func(conn *ClientConn) (err error) { + _, err = g.bo.Do(ctx, func(ctx context.Context) (r interface{}, ret bool, err error) { + err = p.Do(func(conn *ClientConn) (err error) { if conn == nil { return errors.ErrGRPCClientConnNotFound(addr) } - return f(egctx, addr, conn, g.copts...) + return f(ctx, addr, conn, g.copts...) }) + return nil, err != nil, err }) } else { err = p.Do(func(conn *ClientConn) (err error) { if conn == nil { return errors.ErrGRPCClientConnNotFound(addr) } - return f(egctx, addr, conn, g.copts...) + return f(ctx, addr, conn, g.copts...) }) } if err != nil { @@ -375,12 +430,18 @@ func (g *gRPCClient) OrderedRangeConcurrent(ctx context.Context, func (g *gRPCClient) Do(ctx context.Context, addr string, f func(ctx context.Context, conn *ClientConn, copts ...CallOption) (interface{}, error)) (data interface{}, err error) { + ctx, span := trace.StartSpan(ctx, "vald/internal/grpc/Client.Do/"+addr) + defer func() { + if span != nil { + span.End() + } + }() p, ok := g.conns.Load(addr) if !ok { return nil, errors.ErrGRPCClientConnNotFound(addr) } if g.bo != nil { - data, err = g.bo.Do(ctx, func() (r interface{}, err error) { + data, err = g.bo.Do(ctx, func(ctx context.Context) (r interface{}, ret bool, err error) { err = p.Do(func(conn *ClientConn) (err error) { if conn == nil { return errors.ErrGRPCClientConnNotFound(addr) @@ -389,9 +450,9 @@ func (g *gRPCClient) Do(ctx context.Context, addr string, return err }) if err != nil { - return nil, err + return nil, err != nil, err } - return r, err + return r, false, nil }) } else { err = p.Do(func(conn *ClientConn) (err error) { diff --git a/internal/net/grpc/client_test.go b/internal/net/grpc/client_test.go index 189d61f049..1e818cbb53 100644 --- a/internal/net/grpc/client_test.go +++ b/internal/net/grpc/client_test.go @@ -26,11 +26,11 @@ import ( "github.com/vdaas/vald/internal/backoff" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -79,9 +79,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -96,12 +98,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotC); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gRPCClient_StartConnectionMonitor(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -113,6 +115,7 @@ func Test_gRPCClient_StartConnectionMonitor(t *testing.T) { hcDur time.Duration prDur time.Duration enablePoolRebalance bool + resolveDNS bool dopts []DialOption copts []CallOption roccd string @@ -157,6 +160,7 @@ func Test_gRPCClient_StartConnectionMonitor(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -184,6 +188,7 @@ func Test_gRPCClient_StartConnectionMonitor(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -197,9 +202,11 @@ func Test_gRPCClient_StartConnectionMonitor(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -217,6 +224,7 @@ func Test_gRPCClient_StartConnectionMonitor(t *testing.T) { hcDur: test.fields.hcDur, prDur: test.fields.prDur, enablePoolRebalance: test.fields.enablePoolRebalance, + resolveDNS: test.fields.resolveDNS, dopts: test.fields.dopts, copts: test.fields.copts, roccd: test.fields.roccd, @@ -228,12 +236,12 @@ func Test_gRPCClient_StartConnectionMonitor(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gRPCClient_Range(t *testing.T) { + t.Parallel() type args struct { ctx context.Context f func(ctx context.Context, addr string, conn *ClientConn, copts ...CallOption) error @@ -246,6 +254,7 @@ func Test_gRPCClient_Range(t *testing.T) { hcDur time.Duration prDur time.Duration enablePoolRebalance bool + resolveDNS bool dopts []DialOption copts []CallOption roccd string @@ -287,6 +296,7 @@ func Test_gRPCClient_Range(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -315,6 +325,7 @@ func Test_gRPCClient_Range(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -328,9 +339,11 @@ func Test_gRPCClient_Range(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -348,6 +361,7 @@ func Test_gRPCClient_Range(t *testing.T) { hcDur: test.fields.hcDur, prDur: test.fields.prDur, enablePoolRebalance: test.fields.enablePoolRebalance, + resolveDNS: test.fields.resolveDNS, dopts: test.fields.dopts, copts: test.fields.copts, roccd: test.fields.roccd, @@ -359,12 +373,12 @@ func Test_gRPCClient_Range(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gRPCClient_RangeConcurrent(t *testing.T) { + t.Parallel() type args struct { ctx context.Context concurrency int @@ -378,6 +392,7 @@ func Test_gRPCClient_RangeConcurrent(t *testing.T) { hcDur time.Duration prDur time.Duration enablePoolRebalance bool + resolveDNS bool dopts []DialOption copts []CallOption roccd string @@ -420,6 +435,7 @@ func Test_gRPCClient_RangeConcurrent(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -449,6 +465,7 @@ func Test_gRPCClient_RangeConcurrent(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -462,9 +479,11 @@ func Test_gRPCClient_RangeConcurrent(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -482,6 +501,7 @@ func Test_gRPCClient_RangeConcurrent(t *testing.T) { hcDur: test.fields.hcDur, prDur: test.fields.prDur, enablePoolRebalance: test.fields.enablePoolRebalance, + resolveDNS: test.fields.resolveDNS, dopts: test.fields.dopts, copts: test.fields.copts, roccd: test.fields.roccd, @@ -493,12 +513,12 @@ func Test_gRPCClient_RangeConcurrent(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gRPCClient_OrderedRange(t *testing.T) { + t.Parallel() type args struct { ctx context.Context orders []string @@ -512,6 +532,7 @@ func Test_gRPCClient_OrderedRange(t *testing.T) { hcDur time.Duration prDur time.Duration enablePoolRebalance bool + resolveDNS bool dopts []DialOption copts []CallOption roccd string @@ -554,6 +575,7 @@ func Test_gRPCClient_OrderedRange(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -583,6 +605,7 @@ func Test_gRPCClient_OrderedRange(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -596,9 +619,11 @@ func Test_gRPCClient_OrderedRange(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -616,6 +641,7 @@ func Test_gRPCClient_OrderedRange(t *testing.T) { hcDur: test.fields.hcDur, prDur: test.fields.prDur, enablePoolRebalance: test.fields.enablePoolRebalance, + resolveDNS: test.fields.resolveDNS, dopts: test.fields.dopts, copts: test.fields.copts, roccd: test.fields.roccd, @@ -627,12 +653,12 @@ func Test_gRPCClient_OrderedRange(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gRPCClient_OrderedRangeConcurrent(t *testing.T) { + t.Parallel() type args struct { ctx context.Context orders []string @@ -647,6 +673,7 @@ func Test_gRPCClient_OrderedRangeConcurrent(t *testing.T) { hcDur time.Duration prDur time.Duration enablePoolRebalance bool + resolveDNS bool dopts []DialOption copts []CallOption roccd string @@ -690,6 +717,7 @@ func Test_gRPCClient_OrderedRangeConcurrent(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -720,6 +748,7 @@ func Test_gRPCClient_OrderedRangeConcurrent(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -733,9 +762,11 @@ func Test_gRPCClient_OrderedRangeConcurrent(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -753,6 +784,7 @@ func Test_gRPCClient_OrderedRangeConcurrent(t *testing.T) { hcDur: test.fields.hcDur, prDur: test.fields.prDur, enablePoolRebalance: test.fields.enablePoolRebalance, + resolveDNS: test.fields.resolveDNS, dopts: test.fields.dopts, copts: test.fields.copts, roccd: test.fields.roccd, @@ -764,12 +796,12 @@ func Test_gRPCClient_OrderedRangeConcurrent(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gRPCClient_Do(t *testing.T) { + t.Parallel() type args struct { ctx context.Context addr string @@ -783,6 +815,7 @@ func Test_gRPCClient_Do(t *testing.T) { hcDur time.Duration prDur time.Duration enablePoolRebalance bool + resolveDNS bool dopts []DialOption copts []CallOption roccd string @@ -829,6 +862,7 @@ func Test_gRPCClient_Do(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -858,6 +892,7 @@ func Test_gRPCClient_Do(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -871,9 +906,11 @@ func Test_gRPCClient_Do(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -891,6 +928,7 @@ func Test_gRPCClient_Do(t *testing.T) { hcDur: test.fields.hcDur, prDur: test.fields.prDur, enablePoolRebalance: test.fields.enablePoolRebalance, + resolveDNS: test.fields.resolveDNS, dopts: test.fields.dopts, copts: test.fields.copts, roccd: test.fields.roccd, @@ -902,12 +940,12 @@ func Test_gRPCClient_Do(t *testing.T) { if err := test.checkFunc(test.want, gotData, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gRPCClient_GetDialOption(t *testing.T) { + t.Parallel() type fields struct { addrs []string poolSize uint64 @@ -916,6 +954,7 @@ func Test_gRPCClient_GetDialOption(t *testing.T) { hcDur time.Duration prDur time.Duration enablePoolRebalance bool + resolveDNS bool dopts []DialOption copts []CallOption roccd string @@ -952,6 +991,7 @@ func Test_gRPCClient_GetDialOption(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -976,6 +1016,7 @@ func Test_gRPCClient_GetDialOption(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -989,9 +1030,11 @@ func Test_gRPCClient_GetDialOption(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1009,6 +1052,7 @@ func Test_gRPCClient_GetDialOption(t *testing.T) { hcDur: test.fields.hcDur, prDur: test.fields.prDur, enablePoolRebalance: test.fields.enablePoolRebalance, + resolveDNS: test.fields.resolveDNS, dopts: test.fields.dopts, copts: test.fields.copts, roccd: test.fields.roccd, @@ -1020,12 +1064,12 @@ func Test_gRPCClient_GetDialOption(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gRPCClient_GetCallOption(t *testing.T) { + t.Parallel() type fields struct { addrs []string poolSize uint64 @@ -1034,6 +1078,7 @@ func Test_gRPCClient_GetCallOption(t *testing.T) { hcDur time.Duration prDur time.Duration enablePoolRebalance bool + resolveDNS bool dopts []DialOption copts []CallOption roccd string @@ -1070,6 +1115,7 @@ func Test_gRPCClient_GetCallOption(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -1094,6 +1140,7 @@ func Test_gRPCClient_GetCallOption(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -1107,9 +1154,11 @@ func Test_gRPCClient_GetCallOption(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1127,6 +1176,7 @@ func Test_gRPCClient_GetCallOption(t *testing.T) { hcDur: test.fields.hcDur, prDur: test.fields.prDur, enablePoolRebalance: test.fields.enablePoolRebalance, + resolveDNS: test.fields.resolveDNS, dopts: test.fields.dopts, copts: test.fields.copts, roccd: test.fields.roccd, @@ -1138,12 +1188,12 @@ func Test_gRPCClient_GetCallOption(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gRPCClient_Connect(t *testing.T) { + t.Parallel() type args struct { ctx context.Context addr string @@ -1157,6 +1207,7 @@ func Test_gRPCClient_Connect(t *testing.T) { hcDur time.Duration prDur time.Duration enablePoolRebalance bool + resolveDNS bool dopts []DialOption copts []CallOption roccd string @@ -1199,6 +1250,7 @@ func Test_gRPCClient_Connect(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -1228,6 +1280,7 @@ func Test_gRPCClient_Connect(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -1241,9 +1294,11 @@ func Test_gRPCClient_Connect(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1261,6 +1316,7 @@ func Test_gRPCClient_Connect(t *testing.T) { hcDur: test.fields.hcDur, prDur: test.fields.prDur, enablePoolRebalance: test.fields.enablePoolRebalance, + resolveDNS: test.fields.resolveDNS, dopts: test.fields.dopts, copts: test.fields.copts, roccd: test.fields.roccd, @@ -1272,12 +1328,12 @@ func Test_gRPCClient_Connect(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gRPCClient_Disconnect(t *testing.T) { + t.Parallel() type args struct { addr string } @@ -1289,6 +1345,7 @@ func Test_gRPCClient_Disconnect(t *testing.T) { hcDur time.Duration prDur time.Duration enablePoolRebalance bool + resolveDNS bool dopts []DialOption copts []CallOption roccd string @@ -1329,6 +1386,7 @@ func Test_gRPCClient_Disconnect(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -1356,6 +1414,7 @@ func Test_gRPCClient_Disconnect(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -1369,9 +1428,11 @@ func Test_gRPCClient_Disconnect(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1389,6 +1450,7 @@ func Test_gRPCClient_Disconnect(t *testing.T) { hcDur: test.fields.hcDur, prDur: test.fields.prDur, enablePoolRebalance: test.fields.enablePoolRebalance, + resolveDNS: test.fields.resolveDNS, dopts: test.fields.dopts, copts: test.fields.copts, roccd: test.fields.roccd, @@ -1400,12 +1462,12 @@ func Test_gRPCClient_Disconnect(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gRPCClient_Close(t *testing.T) { + t.Parallel() type fields struct { addrs []string poolSize uint64 @@ -1414,6 +1476,7 @@ func Test_gRPCClient_Close(t *testing.T) { hcDur time.Duration prDur time.Duration enablePoolRebalance bool + resolveDNS bool dopts []DialOption copts []CallOption roccd string @@ -1450,6 +1513,7 @@ func Test_gRPCClient_Close(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -1474,6 +1538,7 @@ func Test_gRPCClient_Close(t *testing.T) { hcDur: nil, prDur: nil, enablePoolRebalance: false, + resolveDNS: false, dopts: nil, copts: nil, roccd: "", @@ -1487,9 +1552,11 @@ func Test_gRPCClient_Close(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1507,6 +1574,7 @@ func Test_gRPCClient_Close(t *testing.T) { hcDur: test.fields.hcDur, prDur: test.fields.prDur, enablePoolRebalance: test.fields.enablePoolRebalance, + resolveDNS: test.fields.resolveDNS, dopts: test.fields.dopts, copts: test.fields.copts, roccd: test.fields.roccd, @@ -1518,7 +1586,6 @@ func Test_gRPCClient_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/grpc/grpcconns_test.go b/internal/net/grpc/grpcconns_test.go index ce2f712fe8..cf517a3e42 100644 --- a/internal/net/grpc/grpcconns_test.go +++ b/internal/net/grpc/grpcconns_test.go @@ -26,9 +26,11 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc/pool" + "go.uber.org/goleak" ) func Test_newEntryGrpcConns(t *testing.T) { + t.Parallel() type args struct { i pool.Conn } @@ -77,8 +79,11 @@ func Test_newEntryGrpcConns(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -93,12 +98,12 @@ func Test_newEntryGrpcConns(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_grpcConns_Load(t *testing.T) { + t.Parallel() type args struct { key string } @@ -170,8 +175,11 @@ func Test_grpcConns_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -192,12 +200,12 @@ func Test_grpcConns_Load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryGrpcConns_load(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -250,8 +258,11 @@ func Test_entryGrpcConns_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -269,12 +280,12 @@ func Test_entryGrpcConns_load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_grpcConns_Store(t *testing.T) { + t.Parallel() type args struct { key string value pool.Conn @@ -341,8 +352,11 @@ func Test_grpcConns_Store(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -368,6 +382,7 @@ func Test_grpcConns_Store(t *testing.T) { } func Test_entryGrpcConns_tryStore(t *testing.T) { + t.Parallel() type args struct { i *pool.Conn } @@ -426,8 +441,11 @@ func Test_entryGrpcConns_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -445,12 +463,12 @@ func Test_entryGrpcConns_tryStore(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryGrpcConns_unexpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -499,8 +517,11 @@ func Test_entryGrpcConns_unexpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -518,12 +539,12 @@ func Test_entryGrpcConns_unexpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotWasExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryGrpcConns_storeLocked(t *testing.T) { + t.Parallel() type args struct { i *pool.Conn } @@ -578,8 +599,11 @@ func Test_entryGrpcConns_storeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -602,6 +626,7 @@ func Test_entryGrpcConns_storeLocked(t *testing.T) { } func Test_grpcConns_Delete(t *testing.T) { + t.Parallel() type args struct { key string } @@ -665,8 +690,11 @@ func Test_grpcConns_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -692,6 +720,7 @@ func Test_grpcConns_Delete(t *testing.T) { } func Test_entryGrpcConns_delete(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -740,8 +769,11 @@ func Test_entryGrpcConns_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -759,12 +791,12 @@ func Test_entryGrpcConns_delete(t *testing.T) { if err := test.checkFunc(test.want, gotHadValue); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_grpcConns_Range(t *testing.T) { + t.Parallel() type args struct { f func(key string, value pool.Conn) bool } @@ -828,8 +860,11 @@ func Test_grpcConns_Range(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -855,6 +890,7 @@ func Test_grpcConns_Range(t *testing.T) { } func Test_grpcConns_missLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -908,8 +944,11 @@ func Test_grpcConns_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -935,6 +974,7 @@ func Test_grpcConns_missLocked(t *testing.T) { } func Test_grpcConns_dirtyLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -988,8 +1028,11 @@ func Test_grpcConns_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1015,6 +1058,7 @@ func Test_grpcConns_dirtyLocked(t *testing.T) { } func Test_entryGrpcConns_tryExpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -1063,8 +1107,11 @@ func Test_entryGrpcConns_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1082,7 +1129,6 @@ func Test_entryGrpcConns_tryExpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotIsExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/grpc/interceptor.go b/internal/net/grpc/interceptor.go index ee34e00118..03805302c7 100644 --- a/internal/net/grpc/interceptor.go +++ b/internal/net/grpc/interceptor.go @@ -24,8 +24,10 @@ import ( "google.golang.org/grpc" ) -type UnaryServerInterceptor = grpc.UnaryServerInterceptor -type StreamServerInterceptor = grpc.StreamServerInterceptor +type ( + UnaryServerInterceptor = grpc.UnaryServerInterceptor + StreamServerInterceptor = grpc.StreamServerInterceptor +) var ( UnaryInterceptor = grpc.UnaryInterceptor diff --git a/internal/net/grpc/interceptor_test.go b/internal/net/grpc/interceptor_test.go index 8333667790..30e9ab4514 100644 --- a/internal/net/grpc/interceptor_test.go +++ b/internal/net/grpc/interceptor_test.go @@ -26,6 +26,7 @@ import ( ) func TestRecoverInterceptor(t *testing.T) { + t.Parallel() type want struct { want UnaryServerInterceptor } @@ -64,9 +65,11 @@ func TestRecoverInterceptor(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -81,12 +84,12 @@ func TestRecoverInterceptor(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestRecoverStreamInterceptor(t *testing.T) { + t.Parallel() type want struct { want StreamServerInterceptor } @@ -125,9 +128,11 @@ func TestRecoverStreamInterceptor(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -142,7 +147,6 @@ func TestRecoverStreamInterceptor(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/grpc/metric/client_option.go b/internal/net/grpc/metric/client_option.go index 4ab3f46075..30a4e102ed 100644 --- a/internal/net/grpc/metric/client_option.go +++ b/internal/net/grpc/metric/client_option.go @@ -19,6 +19,4 @@ package metric type ClientOption func(*ClientHandler) -var ( - clientDefaultOpts = []ClientOption{} -) +var clientDefaultOpts = []ClientOption{} diff --git a/internal/net/grpc/metric/client_test.go b/internal/net/grpc/metric/client_test.go index c3094c8b40..933469317a 100644 --- a/internal/net/grpc/metric/client_test.go +++ b/internal/net/grpc/metric/client_test.go @@ -22,9 +22,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNewClientHandler(t *testing.T) { + t.Parallel() type args struct { opts []ClientOption } @@ -73,8 +75,11 @@ func TestNewClientHandler(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -89,7 +94,6 @@ func TestNewClientHandler(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/grpc/metric/server_option.go b/internal/net/grpc/metric/server_option.go index 77a30bcc50..df14a4a0c4 100644 --- a/internal/net/grpc/metric/server_option.go +++ b/internal/net/grpc/metric/server_option.go @@ -19,6 +19,4 @@ package metric type ServerOption func(*ServerHandler) -var ( - serverDefaultOpts = []ServerOption{} -) +var serverDefaultOpts = []ServerOption{} diff --git a/internal/net/grpc/metric/server_test.go b/internal/net/grpc/metric/server_test.go index 6d8381706e..cd4bd26ccf 100644 --- a/internal/net/grpc/metric/server_test.go +++ b/internal/net/grpc/metric/server_test.go @@ -22,9 +22,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNewServerHandler(t *testing.T) { + t.Parallel() type args struct { opts []ServerOption } @@ -73,8 +75,11 @@ func TestNewServerHandler(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -89,7 +94,6 @@ func TestNewServerHandler(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/grpc/option.go b/internal/net/grpc/option.go index a470493331..3657dc3a18 100644 --- a/internal/net/grpc/option.go +++ b/internal/net/grpc/option.go @@ -34,16 +34,14 @@ import ( type Option func(*gRPCClient) -var ( - defaultOpts = []Option{ - WithConnectionPoolSize(3), - WithEnableConnectionPoolRebalance(false), - WithConnectionPoolRebalanceDuration("1h"), - WithErrGroup(errgroup.Get()), - WithHealthCheckDuration("10s"), - WithResolveDNS(true), - } -) +var defaultOpts = []Option{ + WithConnectionPoolSize(3), + WithEnableConnectionPoolRebalance(false), + WithConnectionPoolRebalanceDuration("1h"), + WithErrGroup(errgroup.Get()), + WithHealthCheckDuration("10s"), + WithResolveDNS(true), +} func WithAddrs(addrs ...string) Option { return func(g *gRPCClient) { @@ -161,6 +159,7 @@ func WithWaitForReady(flg bool) Option { ) } } + func WithMaxRetryRPCBufferSize(size int) Option { return func(g *gRPCClient) { if size > 1 { @@ -170,6 +169,7 @@ func WithMaxRetryRPCBufferSize(size int) Option { } } } + func WithMaxRecvMsgSize(size int) Option { return func(g *gRPCClient) { if size > 1 { @@ -179,6 +179,7 @@ func WithMaxRecvMsgSize(size int) Option { } } } + func WithMaxSendMsgSize(size int) Option { return func(g *gRPCClient) { if size > 1 { @@ -188,6 +189,7 @@ func WithMaxSendMsgSize(size int) Option { } } } + func WithWriteBufferSize(size int) Option { return func(g *gRPCClient) { if size > 1 { @@ -197,6 +199,7 @@ func WithWriteBufferSize(size int) Option { } } } + func WithReadBufferSize(size int) Option { return func(g *gRPCClient) { if size > 1 { @@ -206,6 +209,7 @@ func WithReadBufferSize(size int) Option { } } } + func WithInitialWindowSize(size int) Option { return func(g *gRPCClient) { if size > 1 { @@ -215,6 +219,7 @@ func WithInitialWindowSize(size int) Option { } } } + func WithInitialConnectionWindowSize(size int) Option { return func(g *gRPCClient) { if size > 1 { @@ -224,6 +229,7 @@ func WithInitialConnectionWindowSize(size int) Option { } } } + func WithMaxMsgSize(size int) Option { return func(g *gRPCClient) { if size > 1 { diff --git a/internal/net/grpc/option_test.go b/internal/net/grpc/option_test.go index 129af59244..d745c9d592 100644 --- a/internal/net/grpc/option_test.go +++ b/internal/net/grpc/option_test.go @@ -24,12 +24,13 @@ import ( "github.com/vdaas/vald/internal/backoff" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/net/tcp" - "google.golang.org/grpc" - "go.uber.org/goleak" + "google.golang.org/grpc" ) func TestWithAddrs(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { addrs []string @@ -67,7 +68,7 @@ func TestWithAddrs(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -103,9 +104,11 @@ func TestWithAddrs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -126,7 +129,7 @@ func TestWithAddrs(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -134,7 +137,7 @@ func TestWithAddrs(t *testing.T) { got := WithAddrs(test.args.addrs...) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -143,6 +146,8 @@ func TestWithAddrs(t *testing.T) { } func TestWithHealthCheckDuration(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -180,7 +185,7 @@ func TestWithHealthCheckDuration(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -216,9 +221,11 @@ func TestWithHealthCheckDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -239,7 +246,7 @@ func TestWithHealthCheckDuration(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -247,7 +254,7 @@ func TestWithHealthCheckDuration(t *testing.T) { got := WithHealthCheckDuration(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -256,6 +263,8 @@ func TestWithHealthCheckDuration(t *testing.T) { } func TestWithConnectionPoolRebalanceDuration(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -293,7 +302,7 @@ func TestWithConnectionPoolRebalanceDuration(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -329,9 +338,11 @@ func TestWithConnectionPoolRebalanceDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -352,7 +363,7 @@ func TestWithConnectionPoolRebalanceDuration(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -360,7 +371,124 @@ func TestWithConnectionPoolRebalanceDuration(t *testing.T) { got := WithConnectionPoolRebalanceDuration(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithResolveDNS(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + flg bool + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + flg: false, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + flg: false, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithResolveDNS(test.args.flg) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithResolveDNS(test.args.flg) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -369,6 +497,8 @@ func TestWithConnectionPoolRebalanceDuration(t *testing.T) { } func TestWithEnableConnectionPoolRebalance(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { flg bool @@ -406,7 +536,7 @@ func TestWithEnableConnectionPoolRebalance(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -442,9 +572,11 @@ func TestWithEnableConnectionPoolRebalance(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -465,7 +597,7 @@ func TestWithEnableConnectionPoolRebalance(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -473,7 +605,7 @@ func TestWithEnableConnectionPoolRebalance(t *testing.T) { got := WithEnableConnectionPoolRebalance(test.args.flg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -482,6 +614,8 @@ func TestWithEnableConnectionPoolRebalance(t *testing.T) { } func TestWithConnectionPoolSize(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { size int @@ -519,7 +653,7 @@ func TestWithConnectionPoolSize(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -555,9 +689,11 @@ func TestWithConnectionPoolSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -578,7 +714,7 @@ func TestWithConnectionPoolSize(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -586,7 +722,7 @@ func TestWithConnectionPoolSize(t *testing.T) { got := WithConnectionPoolSize(test.args.size) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -595,6 +731,8 @@ func TestWithConnectionPoolSize(t *testing.T) { } func TestWithDialOptions(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { opts []grpc.DialOption @@ -632,7 +770,7 @@ func TestWithDialOptions(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -668,9 +806,11 @@ func TestWithDialOptions(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -691,7 +831,7 @@ func TestWithDialOptions(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -699,7 +839,7 @@ func TestWithDialOptions(t *testing.T) { got := WithDialOptions(test.args.opts...) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -708,6 +848,8 @@ func TestWithDialOptions(t *testing.T) { } func TestWithMaxBackoffDelay(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -745,7 +887,7 @@ func TestWithMaxBackoffDelay(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -781,9 +923,11 @@ func TestWithMaxBackoffDelay(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -804,7 +948,7 @@ func TestWithMaxBackoffDelay(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -812,7 +956,7 @@ func TestWithMaxBackoffDelay(t *testing.T) { got := WithMaxBackoffDelay(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -821,6 +965,8 @@ func TestWithMaxBackoffDelay(t *testing.T) { } func TestWithCallOptions(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { opts []grpc.CallOption @@ -858,7 +1004,7 @@ func TestWithCallOptions(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -894,9 +1040,11 @@ func TestWithCallOptions(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -917,7 +1065,7 @@ func TestWithCallOptions(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -925,7 +1073,7 @@ func TestWithCallOptions(t *testing.T) { got := WithCallOptions(test.args.opts...) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -934,6 +1082,8 @@ func TestWithCallOptions(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -971,7 +1121,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1007,9 +1157,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1030,7 +1182,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1038,7 +1190,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1047,6 +1199,8 @@ func TestWithErrGroup(t *testing.T) { } func TestWithBackoff(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { bo backoff.Backoff @@ -1084,7 +1238,7 @@ func TestWithBackoff(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1120,9 +1274,11 @@ func TestWithBackoff(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1143,7 +1299,7 @@ func TestWithBackoff(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1151,7 +1307,7 @@ func TestWithBackoff(t *testing.T) { got := WithBackoff(test.args.bo) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1160,6 +1316,8 @@ func TestWithBackoff(t *testing.T) { } func TestWithWaitForReady(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { flg bool @@ -1197,7 +1355,7 @@ func TestWithWaitForReady(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1233,9 +1391,11 @@ func TestWithWaitForReady(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1256,7 +1416,7 @@ func TestWithWaitForReady(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1264,7 +1424,7 @@ func TestWithWaitForReady(t *testing.T) { got := WithWaitForReady(test.args.flg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1273,6 +1433,8 @@ func TestWithWaitForReady(t *testing.T) { } func TestWithMaxRetryRPCBufferSize(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { size int @@ -1310,7 +1472,7 @@ func TestWithMaxRetryRPCBufferSize(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1346,9 +1508,11 @@ func TestWithMaxRetryRPCBufferSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1369,7 +1533,7 @@ func TestWithMaxRetryRPCBufferSize(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1377,7 +1541,7 @@ func TestWithMaxRetryRPCBufferSize(t *testing.T) { got := WithMaxRetryRPCBufferSize(test.args.size) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1386,6 +1550,8 @@ func TestWithMaxRetryRPCBufferSize(t *testing.T) { } func TestWithMaxRecvMsgSize(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { size int @@ -1423,7 +1589,7 @@ func TestWithMaxRecvMsgSize(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1459,9 +1625,11 @@ func TestWithMaxRecvMsgSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1482,7 +1650,7 @@ func TestWithMaxRecvMsgSize(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1490,7 +1658,7 @@ func TestWithMaxRecvMsgSize(t *testing.T) { got := WithMaxRecvMsgSize(test.args.size) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1499,6 +1667,8 @@ func TestWithMaxRecvMsgSize(t *testing.T) { } func TestWithMaxSendMsgSize(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { size int @@ -1536,7 +1706,7 @@ func TestWithMaxSendMsgSize(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1572,9 +1742,11 @@ func TestWithMaxSendMsgSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1595,7 +1767,7 @@ func TestWithMaxSendMsgSize(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1603,7 +1775,7 @@ func TestWithMaxSendMsgSize(t *testing.T) { got := WithMaxSendMsgSize(test.args.size) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1612,6 +1784,8 @@ func TestWithMaxSendMsgSize(t *testing.T) { } func TestWithWriteBufferSize(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { size int @@ -1649,7 +1823,7 @@ func TestWithWriteBufferSize(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1685,9 +1859,11 @@ func TestWithWriteBufferSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1708,7 +1884,7 @@ func TestWithWriteBufferSize(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1716,7 +1892,7 @@ func TestWithWriteBufferSize(t *testing.T) { got := WithWriteBufferSize(test.args.size) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1725,6 +1901,8 @@ func TestWithWriteBufferSize(t *testing.T) { } func TestWithReadBufferSize(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { size int @@ -1762,7 +1940,7 @@ func TestWithReadBufferSize(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1798,9 +1976,11 @@ func TestWithReadBufferSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1821,7 +2001,7 @@ func TestWithReadBufferSize(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1829,7 +2009,7 @@ func TestWithReadBufferSize(t *testing.T) { got := WithReadBufferSize(test.args.size) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1838,6 +2018,8 @@ func TestWithReadBufferSize(t *testing.T) { } func TestWithInitialWindowSize(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { size int @@ -1875,7 +2057,7 @@ func TestWithInitialWindowSize(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1911,9 +2093,11 @@ func TestWithInitialWindowSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1934,7 +2118,7 @@ func TestWithInitialWindowSize(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1942,7 +2126,7 @@ func TestWithInitialWindowSize(t *testing.T) { got := WithInitialWindowSize(test.args.size) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1951,6 +2135,8 @@ func TestWithInitialWindowSize(t *testing.T) { } func TestWithInitialConnectionWindowSize(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { size int @@ -1988,7 +2174,7 @@ func TestWithInitialConnectionWindowSize(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -2024,9 +2210,11 @@ func TestWithInitialConnectionWindowSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2047,7 +2235,7 @@ func TestWithInitialConnectionWindowSize(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -2055,7 +2243,7 @@ func TestWithInitialConnectionWindowSize(t *testing.T) { got := WithInitialConnectionWindowSize(test.args.size) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -2064,6 +2252,8 @@ func TestWithInitialConnectionWindowSize(t *testing.T) { } func TestWithMaxMsgSize(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { size int @@ -2101,7 +2291,7 @@ func TestWithMaxMsgSize(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -2137,9 +2327,11 @@ func TestWithMaxMsgSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2160,7 +2352,7 @@ func TestWithMaxMsgSize(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -2168,7 +2360,7 @@ func TestWithMaxMsgSize(t *testing.T) { got := WithMaxMsgSize(test.args.size) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -2177,6 +2369,8 @@ func TestWithMaxMsgSize(t *testing.T) { } func TestWithInsecure(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { flg bool @@ -2214,7 +2408,7 @@ func TestWithInsecure(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -2250,9 +2444,11 @@ func TestWithInsecure(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2273,7 +2469,7 @@ func TestWithInsecure(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -2281,7 +2477,7 @@ func TestWithInsecure(t *testing.T) { got := WithInsecure(test.args.flg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -2290,6 +2486,8 @@ func TestWithInsecure(t *testing.T) { } func TestWithDialTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -2327,7 +2525,7 @@ func TestWithDialTimeout(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -2363,9 +2561,11 @@ func TestWithDialTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2386,7 +2586,7 @@ func TestWithDialTimeout(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -2394,7 +2594,7 @@ func TestWithDialTimeout(t *testing.T) { got := WithDialTimeout(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -2403,6 +2603,8 @@ func TestWithDialTimeout(t *testing.T) { } func TestWithKeepaliveParams(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { t string @@ -2442,7 +2644,7 @@ func TestWithKeepaliveParams(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -2482,9 +2684,11 @@ func TestWithKeepaliveParams(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2505,7 +2709,7 @@ func TestWithKeepaliveParams(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -2513,7 +2717,7 @@ func TestWithKeepaliveParams(t *testing.T) { got := WithKeepaliveParams(test.args.t, test.args.to, test.args.permitWithoutStream) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -2522,6 +2726,8 @@ func TestWithKeepaliveParams(t *testing.T) { } func TestWithDialer(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { der tcp.Dialer @@ -2559,7 +2765,7 @@ func TestWithDialer(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -2595,9 +2801,11 @@ func TestWithDialer(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2618,7 +2826,7 @@ func TestWithDialer(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -2626,7 +2834,7 @@ func TestWithDialer(t *testing.T) { got := WithDialer(test.args.der) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -2635,6 +2843,8 @@ func TestWithDialer(t *testing.T) { } func TestWithTLSConfig(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { cfg *tls.Config @@ -2672,7 +2882,7 @@ func TestWithTLSConfig(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -2708,9 +2918,11 @@ func TestWithTLSConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2731,7 +2943,7 @@ func TestWithTLSConfig(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -2739,7 +2951,7 @@ func TestWithTLSConfig(t *testing.T) { got := WithTLSConfig(test.args.cfg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -2748,6 +2960,8 @@ func TestWithTLSConfig(t *testing.T) { } func TestWithOldConnCloseDuration(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -2785,7 +2999,7 @@ func TestWithOldConnCloseDuration(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -2821,9 +3035,11 @@ func TestWithOldConnCloseDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2844,7 +3060,7 @@ func TestWithOldConnCloseDuration(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -2852,7 +3068,7 @@ func TestWithOldConnCloseDuration(t *testing.T) { got := WithOldConnCloseDuration(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/internal/net/grpc/pool/option.go b/internal/net/grpc/pool/option.go index 7220bae574..242735deb8 100644 --- a/internal/net/grpc/pool/option.go +++ b/internal/net/grpc/pool/option.go @@ -24,16 +24,14 @@ import ( type Option func(*pool) -var ( - defaultOpts = []Option{ - WithSize(3), - WithStartPort(80), - WithEndPort(65535), - WithDialTimeout("1s"), - WithOldConnCloseDuration("1s"), - WithResolveDNS(true), - } -) +var defaultOpts = []Option{ + WithSize(3), + WithStartPort(80), + WithEndPort(65535), + WithDialTimeout("1s"), + WithOldConnCloseDuration("1s"), + WithResolveDNS(true), +} func WithAddr(addr string) Option { return func(p *pool) { @@ -106,7 +104,7 @@ func WithSize(size uint64) Option { func WithDialOptions(opts ...DialOption) Option { return func(p *pool) { - if opts != nil && len(opts) > 0 { + if len(opts) > 0 { if len(p.dopts) > 0 { p.dopts = append(p.dopts, opts...) } else { diff --git a/internal/net/grpc/pool/option_test.go b/internal/net/grpc/pool/option_test.go index cfda3c1a3d..a3bfc19fe7 100644 --- a/internal/net/grpc/pool/option_test.go +++ b/internal/net/grpc/pool/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/internal/backoff" - "go.uber.org/goleak" ) func TestWithAddr(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { addr string @@ -63,7 +64,7 @@ func TestWithAddr(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithAddr(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithAddr(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithAddr(t *testing.T) { got := WithAddr(test.args.addr) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -139,6 +142,8 @@ func TestWithAddr(t *testing.T) { } func TestWithHost(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { host string @@ -176,7 +181,7 @@ func TestWithHost(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -212,9 +217,11 @@ func TestWithHost(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -235,7 +242,7 @@ func TestWithHost(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -243,7 +250,7 @@ func TestWithHost(t *testing.T) { got := WithHost(test.args.host) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -252,6 +259,8 @@ func TestWithHost(t *testing.T) { } func TestWithPort(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { port int @@ -289,7 +298,7 @@ func TestWithPort(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -325,9 +334,11 @@ func TestWithPort(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -348,7 +359,7 @@ func TestWithPort(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -356,7 +367,7 @@ func TestWithPort(t *testing.T) { got := WithPort(test.args.port) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -365,6 +376,8 @@ func TestWithPort(t *testing.T) { } func TestWithStartPort(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { port int @@ -402,7 +415,7 @@ func TestWithStartPort(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -438,9 +451,11 @@ func TestWithStartPort(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -461,7 +476,7 @@ func TestWithStartPort(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -469,7 +484,7 @@ func TestWithStartPort(t *testing.T) { got := WithStartPort(test.args.port) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -478,6 +493,8 @@ func TestWithStartPort(t *testing.T) { } func TestWithEndPort(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { port int @@ -515,7 +532,7 @@ func TestWithEndPort(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -551,9 +568,11 @@ func TestWithEndPort(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -574,7 +593,7 @@ func TestWithEndPort(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -582,7 +601,124 @@ func TestWithEndPort(t *testing.T) { got := WithEndPort(test.args.port) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithResolveDNS(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + flg bool + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + flg: false, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + flg: false, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithResolveDNS(test.args.flg) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithResolveDNS(test.args.flg) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -591,6 +727,8 @@ func TestWithEndPort(t *testing.T) { } func TestWithBackoff(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { bo backoff.Backoff @@ -628,7 +766,7 @@ func TestWithBackoff(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -664,9 +802,11 @@ func TestWithBackoff(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -687,7 +827,7 @@ func TestWithBackoff(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -695,7 +835,7 @@ func TestWithBackoff(t *testing.T) { got := WithBackoff(test.args.bo) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -704,6 +844,8 @@ func TestWithBackoff(t *testing.T) { } func TestWithSize(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { size uint64 @@ -741,7 +883,7 @@ func TestWithSize(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -777,9 +919,11 @@ func TestWithSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -800,7 +944,7 @@ func TestWithSize(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -808,7 +952,7 @@ func TestWithSize(t *testing.T) { got := WithSize(test.args.size) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -817,6 +961,8 @@ func TestWithSize(t *testing.T) { } func TestWithDialOptions(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { opts []DialOption @@ -854,7 +1000,7 @@ func TestWithDialOptions(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -890,9 +1036,11 @@ func TestWithDialOptions(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -913,7 +1061,7 @@ func TestWithDialOptions(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -921,7 +1069,7 @@ func TestWithDialOptions(t *testing.T) { got := WithDialOptions(test.args.opts...) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -930,6 +1078,8 @@ func TestWithDialOptions(t *testing.T) { } func TestWithDialTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -967,7 +1117,7 @@ func TestWithDialTimeout(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1003,9 +1153,11 @@ func TestWithDialTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1026,7 +1178,7 @@ func TestWithDialTimeout(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1034,7 +1186,7 @@ func TestWithDialTimeout(t *testing.T) { got := WithDialTimeout(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -1043,6 +1195,8 @@ func TestWithDialTimeout(t *testing.T) { } func TestWithOldConnCloseDuration(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -1080,7 +1234,7 @@ func TestWithOldConnCloseDuration(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -1116,9 +1270,11 @@ func TestWithOldConnCloseDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1139,7 +1295,7 @@ func TestWithOldConnCloseDuration(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -1147,7 +1303,7 @@ func TestWithOldConnCloseDuration(t *testing.T) { got := WithOldConnCloseDuration(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/internal/net/grpc/pool/pool.go b/internal/net/grpc/pool/pool.go index ca149ce4f9..c7b1aae5ce 100644 --- a/internal/net/grpc/pool/pool.go +++ b/internal/net/grpc/pool/pool.go @@ -20,7 +20,6 @@ package pool import ( "context" "fmt" - "math" "sort" "strings" "sync/atomic" @@ -34,8 +33,10 @@ import ( "google.golang.org/grpc/connectivity" ) -type ClientConn = grpc.ClientConn -type DialOption = grpc.DialOption +type ( + ClientConn = grpc.ClientConn + DialOption = grpc.DialOption +) type Conn interface { Connect(context.Context) (Conn, error) @@ -146,7 +147,7 @@ func (p *pool) Connect(ctx context.Context) (c Conn, err error) { pc, ok = p.load(i) ) if ok && pc != nil && pc.addr == addr && isHealthy(pc.conn) { - // TODO maybe we should check neigbour pool slice if new addrs come. + // TODO maybe we should check neighbour pool slice if new addrs come. continue } log.Debugf("establishing balanced connection to %s", addr) @@ -252,7 +253,7 @@ func (p *pool) dial(ctx context.Context, addr string) (conn *ClientConn, err err if p.bo != nil { var res interface{} retry := 0 - res, err = p.bo.Do(ctx, func() (interface{}, error) { + res, err = p.bo.Do(ctx, func(ctx context.Context) (r interface{}, ret bool, err error) { log.Debugf("dialing to %s with backoff, retry: %d", addr, retry) ctx, cancel := context.WithTimeout(ctx, p.dialTimeout) defer cancel() @@ -263,7 +264,7 @@ func (p *pool) dial(ctx context.Context, addr string) (conn *ClientConn, err err log.Debugf("failed to dial to %s: %s", addr, err) } retry++ - return nil, err + return nil, err != nil, err } if !isHealthy(conn) { if conn != nil { @@ -271,9 +272,9 @@ func (p *pool) dial(ctx context.Context, addr string) (conn *ClientConn, err err log.Debugf("connection for %s is unhealthy: %s", addr, err) } retry++ - return nil, errors.ErrGRPCClientConnNotFound(addr) + return nil, true, errors.ErrGRPCClientConnNotFound(addr) } - return conn, nil + return conn, false, nil }) var ok bool conn, ok = res.(*ClientConn) @@ -345,9 +346,6 @@ func (p *pool) get(retry uint64) (*ClientConn, bool) { } return nil, false } - if atomic.LoadUint64(&p.current) >= math.MaxUint64-2 { - atomic.StoreUint64(&p.current, 0) - } if res := p.pool[atomic.AddUint64(&p.current, 1)%p.Len()].Load(); res != nil { if pc, ok := res.(*poolConn); ok && pc != nil && isHealthy(pc.conn) { @@ -377,6 +375,7 @@ func (p *pool) lookupIPAddr(ctx context.Context) (ips []string, err error) { if len(addrs) == 0 { return nil, errors.ErrGRPCLookupIPAddrNotFound(p.host) } + ips = make([]string, 0, len(addrs)) const network = "tcp" @@ -411,6 +410,7 @@ func (p *pool) lookupIPAddr(ctx context.Context) (ips []string, err error) { if len(ips) == 0 { return nil, errors.ErrGRPCLookupIPAddrNotFound(p.host) } + sort.Strings(ips) return ips, nil diff --git a/internal/net/grpc/pool/pool_bench_test.go b/internal/net/grpc/pool/pool_bench_test.go deleted file mode 100644 index 625698030a..0000000000 --- a/internal/net/grpc/pool/pool_bench_test.go +++ /dev/null @@ -1,206 +0,0 @@ -// -// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package pool provides grpc client connection pool -package pool - -import ( - "context" - "sync" - "testing" - - "github.com/vdaas/vald/apis/grpc/discoverer" - "github.com/vdaas/vald/apis/grpc/payload" - "github.com/vdaas/vald/internal/log" - "github.com/vdaas/vald/internal/log/level" - "github.com/vdaas/vald/internal/net" - "google.golang.org/grpc" -) - -const ( - DefaultServerAddr = "localhost:5001" - DefaultPoolSize = 10 -) - -type server struct { - discoverer.DiscovererServer -} - -func init() { - testing.Init() - log.Init(log.WithLevel(level.ERROR.String())) -} - -func (s *server) Pods(context.Context, *payload.Discoverer_Request) (*payload.Info_Pods, error) { - return &payload.Info_Pods{ - Pods: []*payload.Info_Pod{ - { - Name: "vald is high scalable distributed high-speed approximate nearest neighbor search engine", - }, - }, - }, nil -} - -func (s *server) Nodes(context.Context, *payload.Discoverer_Request) (*payload.Info_Nodes, error) { - return new(payload.Info_Nodes), nil -} - -func ListenAndServe(b *testing.B, addr string) func() { - lis, err := net.Listen("tcp", addr) - if err != nil { - b.Error(err) - } - - s := grpc.NewServer() - discoverer.RegisterDiscovererServer(s, new(server)) - - wg := new(sync.WaitGroup) - wg.Add(1) - - go func() { - wg.Done() - if err := s.Serve(lis); err != nil { - b.Error(err) - } - }() - - wg.Wait() - return func() { - s.Stop() - } -} - -func do(b *testing.B, conn *ClientConn) { - b.Helper() - _, err := discoverer.NewDiscovererClient(conn).Nodes(context.Background(), new(payload.Discoverer_Request)) - if err != nil { - b.Error(err) - } -} - -func Benchmark_ConnPool(b *testing.B) { - defer ListenAndServe(b, DefaultServerAddr)() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - pool, err := New(ctx, - WithAddr(DefaultServerAddr), - WithSize(DefaultPoolSize), - WithDialOptions(grpc.WithInsecure()), - ) - if err != nil { - b.Error(err) - } - pool, err = pool.Connect(ctx) - if err != nil { - b.Error(err) - } - - b.StopTimer() - b.ResetTimer() - b.ReportAllocs() - b.StartTimer() - for i := 0; i < b.N; i++ { - conn, ok := pool.Get() - if ok { - do(b, conn) - } - } - b.StopTimer() -} - -func Benchmark_StaticDial(b *testing.B) { - defer ListenAndServe(b, DefaultServerAddr)() - - conn, err := grpc.DialContext(context.Background(), DefaultServerAddr, grpc.WithInsecure()) - if err != nil { - b.Error(err) - } - - conns := new(sync.Map) - conns.Store(DefaultServerAddr, conn) - - b.StopTimer() - b.ResetTimer() - b.ReportAllocs() - b.StartTimer() - for i := 0; i < b.N; i++ { - val, ok := conns.Load(DefaultServerAddr) - if ok { - do(b, val.(*ClientConn)) - } - } - b.StopTimer() -} - -func BenchmarkParallel_ConnPool(b *testing.B) { - defer ListenAndServe(b, DefaultServerAddr)() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - pool, err := New(ctx, - WithAddr(DefaultServerAddr), - WithSize(DefaultPoolSize), - WithDialOptions(grpc.WithInsecure()), - ) - if err != nil { - b.Error(err) - } - pool, err = pool.Connect(ctx) - if err != nil { - b.Error(err) - } - - b.StopTimer() - b.ResetTimer() - b.ReportAllocs() - b.StartTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - conn, ok := pool.Get() - if ok { - do(b, conn) - } - } - }) - b.StopTimer() -} - -func BenchmarkParallel_StaticDial(b *testing.B) { - defer ListenAndServe(b, DefaultServerAddr)() - - conn, err := grpc.DialContext(context.Background(), DefaultServerAddr, grpc.WithInsecure()) - if err != nil { - b.Error(err) - } - - conns := new(sync.Map) - conns.Store(DefaultServerAddr, conn) - - b.StopTimer() - b.ResetTimer() - b.ReportAllocs() - b.StartTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - val, ok := conns.Load(DefaultServerAddr) - if ok { - do(b, val.(*ClientConn)) - } - } - }) - b.StopTimer() -} diff --git a/internal/net/grpc/pool/pool_test.go b/internal/net/grpc/pool/pool_test.go index 8f845f7747..1c5c84e3bc 100644 --- a/internal/net/grpc/pool/pool_test.go +++ b/internal/net/grpc/pool/pool_test.go @@ -26,11 +26,11 @@ import ( "github.com/vdaas/vald/internal/backoff" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { ctx context.Context opts []Option @@ -86,9 +86,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -103,12 +105,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotC, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_pool_Connect(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -127,6 +129,7 @@ func Test_pool_Connect(t *testing.T) { roccd time.Duration closing atomic.Value isIP bool + resolveDNS bool reconnectHash string } type want struct { @@ -174,6 +177,7 @@ func Test_pool_Connect(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -204,6 +208,7 @@ func Test_pool_Connect(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -213,9 +218,11 @@ func Test_pool_Connect(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -240,6 +247,7 @@ func Test_pool_Connect(t *testing.T) { roccd: test.fields.roccd, closing: test.fields.closing, isIP: test.fields.isIP, + resolveDNS: test.fields.resolveDNS, reconnectHash: test.fields.reconnectHash, } @@ -247,12 +255,12 @@ func Test_pool_Connect(t *testing.T) { if err := test.checkFunc(test.want, gotC, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_pool_load(t *testing.T) { + t.Parallel() type args struct { idx int } @@ -271,6 +279,7 @@ func Test_pool_load(t *testing.T) { roccd time.Duration closing atomic.Value isIP bool + resolveDNS bool reconnectHash string } type want struct { @@ -318,6 +327,7 @@ func Test_pool_load(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -348,6 +358,7 @@ func Test_pool_load(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -357,9 +368,11 @@ func Test_pool_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -384,6 +397,7 @@ func Test_pool_load(t *testing.T) { roccd: test.fields.roccd, closing: test.fields.closing, isIP: test.fields.isIP, + resolveDNS: test.fields.resolveDNS, reconnectHash: test.fields.reconnectHash, } @@ -391,12 +405,12 @@ func Test_pool_load(t *testing.T) { if err := test.checkFunc(test.want, gotPc, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_pool_connect(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -415,6 +429,7 @@ func Test_pool_connect(t *testing.T) { roccd time.Duration closing atomic.Value isIP bool + resolveDNS bool reconnectHash string } type want struct { @@ -462,6 +477,7 @@ func Test_pool_connect(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -492,6 +508,7 @@ func Test_pool_connect(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -501,9 +518,11 @@ func Test_pool_connect(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -528,6 +547,7 @@ func Test_pool_connect(t *testing.T) { roccd: test.fields.roccd, closing: test.fields.closing, isIP: test.fields.isIP, + resolveDNS: test.fields.resolveDNS, reconnectHash: test.fields.reconnectHash, } @@ -535,12 +555,12 @@ func Test_pool_connect(t *testing.T) { if err := test.checkFunc(test.want, gotC, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_pool_Disconnect(t *testing.T) { + t.Parallel() type fields struct { pool []atomic.Value startPort uint16 @@ -556,6 +576,7 @@ func Test_pool_Disconnect(t *testing.T) { roccd time.Duration closing atomic.Value isIP bool + resolveDNS bool reconnectHash string } type want struct { @@ -595,6 +616,7 @@ func Test_pool_Disconnect(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -622,6 +644,7 @@ func Test_pool_Disconnect(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -631,9 +654,11 @@ func Test_pool_Disconnect(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -658,6 +683,7 @@ func Test_pool_Disconnect(t *testing.T) { roccd: test.fields.roccd, closing: test.fields.closing, isIP: test.fields.isIP, + resolveDNS: test.fields.resolveDNS, reconnectHash: test.fields.reconnectHash, } @@ -665,12 +691,12 @@ func Test_pool_Disconnect(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_pool_dial(t *testing.T) { + t.Parallel() type args struct { ctx context.Context addr string @@ -690,6 +716,7 @@ func Test_pool_dial(t *testing.T) { roccd time.Duration closing atomic.Value isIP bool + resolveDNS bool reconnectHash string } type want struct { @@ -738,6 +765,7 @@ func Test_pool_dial(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -769,6 +797,7 @@ func Test_pool_dial(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -778,9 +807,11 @@ func Test_pool_dial(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -805,6 +836,7 @@ func Test_pool_dial(t *testing.T) { roccd: test.fields.roccd, closing: test.fields.closing, isIP: test.fields.isIP, + resolveDNS: test.fields.resolveDNS, reconnectHash: test.fields.reconnectHash, } @@ -812,12 +844,12 @@ func Test_pool_dial(t *testing.T) { if err := test.checkFunc(test.want, gotConn, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_pool_IsHealthy(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -836,6 +868,7 @@ func Test_pool_IsHealthy(t *testing.T) { roccd time.Duration closing atomic.Value isIP bool + resolveDNS bool reconnectHash string } type want struct { @@ -879,6 +912,7 @@ func Test_pool_IsHealthy(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -909,6 +943,7 @@ func Test_pool_IsHealthy(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -918,9 +953,11 @@ func Test_pool_IsHealthy(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -945,6 +982,7 @@ func Test_pool_IsHealthy(t *testing.T) { roccd: test.fields.roccd, closing: test.fields.closing, isIP: test.fields.isIP, + resolveDNS: test.fields.resolveDNS, reconnectHash: test.fields.reconnectHash, } @@ -952,12 +990,12 @@ func Test_pool_IsHealthy(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_pool_Do(t *testing.T) { + t.Parallel() type args struct { f func(conn *ClientConn) error } @@ -976,6 +1014,7 @@ func Test_pool_Do(t *testing.T) { roccd time.Duration closing atomic.Value isIP bool + resolveDNS bool reconnectHash string } type want struct { @@ -1019,6 +1058,7 @@ func Test_pool_Do(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -1049,6 +1089,7 @@ func Test_pool_Do(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -1058,9 +1099,11 @@ func Test_pool_Do(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1085,6 +1128,7 @@ func Test_pool_Do(t *testing.T) { roccd: test.fields.roccd, closing: test.fields.closing, isIP: test.fields.isIP, + resolveDNS: test.fields.resolveDNS, reconnectHash: test.fields.reconnectHash, } @@ -1092,12 +1136,12 @@ func Test_pool_Do(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_pool_Get(t *testing.T) { + t.Parallel() type fields struct { pool []atomic.Value startPort uint16 @@ -1113,6 +1157,7 @@ func Test_pool_Get(t *testing.T) { roccd time.Duration closing atomic.Value isIP bool + resolveDNS bool reconnectHash string } type want struct { @@ -1156,6 +1201,7 @@ func Test_pool_Get(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -1183,6 +1229,7 @@ func Test_pool_Get(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -1192,9 +1239,11 @@ func Test_pool_Get(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1219,6 +1268,7 @@ func Test_pool_Get(t *testing.T) { roccd: test.fields.roccd, closing: test.fields.closing, isIP: test.fields.isIP, + resolveDNS: test.fields.resolveDNS, reconnectHash: test.fields.reconnectHash, } @@ -1226,12 +1276,12 @@ func Test_pool_Get(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_pool_get(t *testing.T) { + t.Parallel() type args struct { retry uint64 } @@ -1250,6 +1300,7 @@ func Test_pool_get(t *testing.T) { roccd time.Duration closing atomic.Value isIP bool + resolveDNS bool reconnectHash string } type want struct { @@ -1297,6 +1348,7 @@ func Test_pool_get(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -1327,6 +1379,7 @@ func Test_pool_get(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -1336,9 +1389,11 @@ func Test_pool_get(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1363,6 +1418,7 @@ func Test_pool_get(t *testing.T) { roccd: test.fields.roccd, closing: test.fields.closing, isIP: test.fields.isIP, + resolveDNS: test.fields.resolveDNS, reconnectHash: test.fields.reconnectHash, } @@ -1370,12 +1426,12 @@ func Test_pool_get(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_pool_Len(t *testing.T) { + t.Parallel() type fields struct { pool []atomic.Value startPort uint16 @@ -1391,6 +1447,7 @@ func Test_pool_Len(t *testing.T) { roccd time.Duration closing atomic.Value isIP bool + resolveDNS bool reconnectHash string } type want struct { @@ -1430,6 +1487,7 @@ func Test_pool_Len(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -1457,6 +1515,7 @@ func Test_pool_Len(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -1466,9 +1525,11 @@ func Test_pool_Len(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1493,6 +1554,7 @@ func Test_pool_Len(t *testing.T) { roccd: test.fields.roccd, closing: test.fields.closing, isIP: test.fields.isIP, + resolveDNS: test.fields.resolveDNS, reconnectHash: test.fields.reconnectHash, } @@ -1500,12 +1562,12 @@ func Test_pool_Len(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_pool_Size(t *testing.T) { + t.Parallel() type fields struct { pool []atomic.Value startPort uint16 @@ -1521,6 +1583,7 @@ func Test_pool_Size(t *testing.T) { roccd time.Duration closing atomic.Value isIP bool + resolveDNS bool reconnectHash string } type want struct { @@ -1560,6 +1623,7 @@ func Test_pool_Size(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -1587,6 +1651,7 @@ func Test_pool_Size(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -1596,9 +1661,11 @@ func Test_pool_Size(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1623,6 +1690,7 @@ func Test_pool_Size(t *testing.T) { roccd: test.fields.roccd, closing: test.fields.closing, isIP: test.fields.isIP, + resolveDNS: test.fields.resolveDNS, reconnectHash: test.fields.reconnectHash, } @@ -1630,12 +1698,12 @@ func Test_pool_Size(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_pool_lookupIPAddr(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -1654,6 +1722,7 @@ func Test_pool_lookupIPAddr(t *testing.T) { roccd time.Duration closing atomic.Value isIP bool + resolveDNS bool reconnectHash string } type want struct { @@ -1701,6 +1770,7 @@ func Test_pool_lookupIPAddr(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -1731,6 +1801,7 @@ func Test_pool_lookupIPAddr(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -1740,9 +1811,11 @@ func Test_pool_lookupIPAddr(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1767,6 +1840,7 @@ func Test_pool_lookupIPAddr(t *testing.T) { roccd: test.fields.roccd, closing: test.fields.closing, isIP: test.fields.isIP, + resolveDNS: test.fields.resolveDNS, reconnectHash: test.fields.reconnectHash, } @@ -1774,12 +1848,12 @@ func Test_pool_lookupIPAddr(t *testing.T) { if err := test.checkFunc(test.want, gotIps, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_pool_Reconnect(t *testing.T) { + t.Parallel() type args struct { ctx context.Context force bool @@ -1799,6 +1873,7 @@ func Test_pool_Reconnect(t *testing.T) { roccd time.Duration closing atomic.Value isIP bool + resolveDNS bool reconnectHash string } type want struct { @@ -1847,6 +1922,7 @@ func Test_pool_Reconnect(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -1878,6 +1954,7 @@ func Test_pool_Reconnect(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -1887,9 +1964,11 @@ func Test_pool_Reconnect(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1914,6 +1993,7 @@ func Test_pool_Reconnect(t *testing.T) { roccd: test.fields.roccd, closing: test.fields.closing, isIP: test.fields.isIP, + resolveDNS: test.fields.resolveDNS, reconnectHash: test.fields.reconnectHash, } @@ -1921,12 +2001,12 @@ func Test_pool_Reconnect(t *testing.T) { if err := test.checkFunc(test.want, gotC, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_pool_scanGRPCPort(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -1945,6 +2025,7 @@ func Test_pool_scanGRPCPort(t *testing.T) { roccd time.Duration closing atomic.Value isIP bool + resolveDNS bool reconnectHash string } type want struct { @@ -1988,6 +2069,7 @@ func Test_pool_scanGRPCPort(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -2018,6 +2100,7 @@ func Test_pool_scanGRPCPort(t *testing.T) { roccd: nil, closing: nil, isIP: false, + resolveDNS: false, reconnectHash: "", }, want: want{}, @@ -2027,9 +2110,11 @@ func Test_pool_scanGRPCPort(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2054,6 +2139,7 @@ func Test_pool_scanGRPCPort(t *testing.T) { roccd: test.fields.roccd, closing: test.fields.closing, isIP: test.fields.isIP, + resolveDNS: test.fields.resolveDNS, reconnectHash: test.fields.reconnectHash, } @@ -2061,12 +2147,12 @@ func Test_pool_scanGRPCPort(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_isGRPCPort(t *testing.T) { + t.Parallel() type args struct { ctx context.Context host string @@ -2121,9 +2207,11 @@ func Test_isGRPCPort(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2138,12 +2226,12 @@ func Test_isGRPCPort(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_isHealthy(t *testing.T) { + t.Parallel() type args struct { conn *ClientConn } @@ -2192,9 +2280,11 @@ func Test_isHealthy(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2209,7 +2299,6 @@ func Test_isHealthy(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/grpc/proto/proto_test.go b/internal/net/grpc/proto/proto_test.go index e45519babd..bb6c7e2b4d 100644 --- a/internal/net/grpc/proto/proto_test.go +++ b/internal/net/grpc/proto/proto_test.go @@ -22,9 +22,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestClone(t *testing.T) { + t.Parallel() type args struct { m Message } @@ -73,8 +75,11 @@ func TestClone(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -89,7 +94,6 @@ func TestClone(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/grpc/status/status.go b/internal/net/grpc/status/status.go index 7653e8347b..5b4457bf75 100644 --- a/internal/net/grpc/status/status.go +++ b/internal/net/grpc/status/status.go @@ -21,7 +21,7 @@ import ( "fmt" "os" - "github.com/vdaas/vald/apis/grpc/errors" + "github.com/vdaas/vald/apis/grpc/v1/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "google.golang.org/grpc/codes" diff --git a/internal/net/grpc/status/status_test.go b/internal/net/grpc/status/status_test.go index 7af60400d0..5460180385 100644 --- a/internal/net/grpc/status/status_test.go +++ b/internal/net/grpc/status/status_test.go @@ -21,15 +21,15 @@ import ( "reflect" "testing" - gerrors "github.com/vdaas/vald/apis/grpc/errors" + gerrors "github.com/vdaas/vald/apis/grpc/v1/errors" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - - "go.uber.org/goleak" ) func Test_newStatus(t *testing.T) { + t.Parallel() type args struct { code codes.Code msg string @@ -87,9 +87,11 @@ func Test_newStatus(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -104,12 +106,12 @@ func Test_newStatus(t *testing.T) { if err := test.checkFunc(test.want, gotSt); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithCanceled(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -164,9 +166,11 @@ func TestWrapWithCanceled(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -181,12 +185,12 @@ func TestWrapWithCanceled(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithUnknown(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -241,9 +245,11 @@ func TestWrapWithUnknown(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -258,12 +264,12 @@ func TestWrapWithUnknown(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithInvalidArgument(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -318,9 +324,11 @@ func TestWrapWithInvalidArgument(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -335,12 +343,12 @@ func TestWrapWithInvalidArgument(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithDeadlineExceeded(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -395,9 +403,11 @@ func TestWrapWithDeadlineExceeded(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -412,12 +422,12 @@ func TestWrapWithDeadlineExceeded(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithNotFound(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -472,9 +482,11 @@ func TestWrapWithNotFound(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -489,12 +501,12 @@ func TestWrapWithNotFound(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithAlreadyExists(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -549,9 +561,11 @@ func TestWrapWithAlreadyExists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -566,12 +580,12 @@ func TestWrapWithAlreadyExists(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithPermissionDenied(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -626,9 +640,11 @@ func TestWrapWithPermissionDenied(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -643,12 +659,12 @@ func TestWrapWithPermissionDenied(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithResourceExhausted(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -703,9 +719,11 @@ func TestWrapWithResourceExhausted(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -720,12 +738,12 @@ func TestWrapWithResourceExhausted(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithFailedPrecondition(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -780,9 +798,11 @@ func TestWrapWithFailedPrecondition(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -797,12 +817,12 @@ func TestWrapWithFailedPrecondition(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithAborted(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -857,9 +877,11 @@ func TestWrapWithAborted(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -874,12 +896,12 @@ func TestWrapWithAborted(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithOutOfRange(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -934,9 +956,11 @@ func TestWrapWithOutOfRange(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -951,12 +975,12 @@ func TestWrapWithOutOfRange(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithUnimplemented(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -1011,9 +1035,11 @@ func TestWrapWithUnimplemented(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1028,12 +1054,12 @@ func TestWrapWithUnimplemented(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithInternal(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -1088,9 +1114,11 @@ func TestWrapWithInternal(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1105,12 +1133,12 @@ func TestWrapWithInternal(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithUnavailable(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -1165,9 +1193,11 @@ func TestWrapWithUnavailable(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1182,12 +1212,12 @@ func TestWrapWithUnavailable(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithDataLoss(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -1242,9 +1272,11 @@ func TestWrapWithDataLoss(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1259,12 +1291,12 @@ func TestWrapWithDataLoss(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestWrapWithUnauthenticated(t *testing.T) { + t.Parallel() type args struct { msg string err error @@ -1319,9 +1351,11 @@ func TestWrapWithUnauthenticated(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1336,12 +1370,12 @@ func TestWrapWithUnauthenticated(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestFromError(t *testing.T) { + t.Parallel() type args struct { err error } @@ -1390,9 +1424,11 @@ func TestFromError(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1407,7 +1443,6 @@ func TestFromError(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/grpc/stream_test.go b/internal/net/grpc/stream_test.go index 5b8b7ecb7d..0a0a04dc8b 100644 --- a/internal/net/grpc/stream_test.go +++ b/internal/net/grpc/stream_test.go @@ -22,12 +22,12 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" - "google.golang.org/grpc" - "go.uber.org/goleak" + "google.golang.org/grpc" ) func TestBidirectionalStream(t *testing.T) { + t.Parallel() type args struct { ctx context.Context stream grpc.ServerStream @@ -88,9 +88,11 @@ func TestBidirectionalStream(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -105,12 +107,12 @@ func TestBidirectionalStream(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestBidirectionalStreamClient(t *testing.T) { + t.Parallel() type args struct { stream grpc.ClientStream dataProvider func() interface{} @@ -168,9 +170,11 @@ func TestBidirectionalStreamClient(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -185,7 +189,6 @@ func TestBidirectionalStreamClient(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/http/client/client_test.go b/internal/net/http/client/client_test.go index 26bef384f7..d0887b6290 100644 --- a/internal/net/http/client/client_test.go +++ b/internal/net/http/client/client_test.go @@ -95,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/http/client/option.go b/internal/net/http/client/option.go index 0613534e4f..cbef302d36 100644 --- a/internal/net/http/client/option.go +++ b/internal/net/http/client/option.go @@ -28,13 +28,11 @@ import ( type Option func(*transport) error -var ( - defaultOptions = []Option{ - WithProxy(http.ProxyFromEnvironment), - WithEnableKeepAlives(true), - WithEnableCompression(true), - } -) +var defaultOptions = []Option{ + WithProxy(http.ProxyFromEnvironment), + WithEnableKeepAlives(true), + WithEnableCompression(true), +} func WithProxy(px func(*http.Request) (*url.URL, error)) Option { return func(tr *transport) error { @@ -54,7 +52,6 @@ func WithDialContext(dx func(ctx context.Context, network, addr string) (net.Con return nil } - } func WithTLSHandshakeTimeout(dur string) Option { diff --git a/internal/net/http/json/json_test.go b/internal/net/http/json/json_test.go index 0ed9727edf..566cfb1760 100644 --- a/internal/net/http/json/json_test.go +++ b/internal/net/http/json/json_test.go @@ -26,7 +26,6 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/http/rest" - "go.uber.org/goleak" ) @@ -471,7 +470,6 @@ func TestDecodeResponse(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -548,7 +546,6 @@ func TestEncodeRequest(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -631,7 +628,6 @@ func TestRequest(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/http/metrics/pprof.go b/internal/net/http/metrics/pprof.go index 9965b8bfcf..3f99e5fe88 100644 --- a/internal/net/http/metrics/pprof.go +++ b/internal/net/http/metrics/pprof.go @@ -25,7 +25,7 @@ import ( "github.com/vdaas/vald/internal/net/http/routing" ) -// NewPProfRoutes returns PProf server route&method information from debug flag +// NewPProfRoutes returns PProf server route&method information from debug flag. func NewPProfHandler() http.Handler { return routing.New( routing.WithRoutes([]routing.Route{ diff --git a/internal/net/http/middleware/option.go b/internal/net/http/middleware/option.go index 1105297782..2e8df60730 100644 --- a/internal/net/http/middleware/option.go +++ b/internal/net/http/middleware/option.go @@ -26,12 +26,10 @@ import ( type TimeoutOption func(*timeout) -var ( - defaultTimeoutOpts = []TimeoutOption{ - WithTimeout("3s"), - WithErrorGroup(errgroup.Get()), - } -) +var defaultTimeoutOpts = []TimeoutOption{ + WithTimeout("3s"), + WithErrorGroup(errgroup.Get()), +} func WithTimeout(dur string) TimeoutOption { return func(t *timeout) { diff --git a/internal/net/http/middleware/timeout_test.go b/internal/net/http/middleware/timeout_test.go index f2418f6651..d19672583d 100644 --- a/internal/net/http/middleware/timeout_test.go +++ b/internal/net/http/middleware/timeout_test.go @@ -26,7 +26,6 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/http/rest" - "go.uber.org/goleak" ) @@ -282,7 +281,6 @@ func Test_timeout_Wrap(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/http/rest/rest.go b/internal/net/http/rest/rest.go index db1ccd6692..1e55a72a45 100644 --- a/internal/net/http/rest/rest.go +++ b/internal/net/http/rest/rest.go @@ -24,19 +24,19 @@ import ( type Func func(http.ResponseWriter, *http.Request) (code int, err error) const ( - // ContentType represents a HTTP header name "Content-Type" + // ContentType represents a HTTP header name "Content-Type". ContentType = "Content-Type" - // ApplicationJSON represents a HTTP content type "application/json" + // ApplicationJSON represents a HTTP content type "application/json". ApplicationJSON = "application/json" - // ProblemJSON represents a HTTP content type "application/problem+json" + // ProblemJSON represents a HTTP content type "application/problem+json". ProblemJSON = "application/problem+json" - // TextPlain represents a HTTP content type "text/plain" + // TextPlain represents a HTTP content type "text/plain". TextPlain = "text/plain" - // CharsetUTF8 represents a UTF-8 charset for HTTP response "charset=UTF-8" + // CharsetUTF8 represents a UTF-8 charset for HTTP response "charset=UTF-8". CharsetUTF8 = "charset=UTF-8" ) diff --git a/internal/net/http/routing/option.go b/internal/net/http/routing/option.go index c10d9392ae..3ef133a127 100644 --- a/internal/net/http/routing/option.go +++ b/internal/net/http/routing/option.go @@ -21,9 +21,7 @@ import "github.com/vdaas/vald/internal/net/http/middleware" type Option func(*router) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithMiddleware(mw middleware.Wrapper) Option { return func(r *router) { diff --git a/internal/net/http/routing/option_test.go b/internal/net/http/routing/option_test.go index d129bc23c2..3101c6aa14 100644 --- a/internal/net/http/routing/option_test.go +++ b/internal/net/http/routing/option_test.go @@ -199,8 +199,8 @@ func TestWithRoutes(t *testing.T) { tests := []test{ func() test { rs := []Route{ - Route{}, - Route{}, + {}, + {}, } return test{ @@ -229,8 +229,8 @@ func TestWithRoutes(t *testing.T) { r := Route{} rs := []Route{ - Route{}, - Route{}, + {}, + {}, } return test{ diff --git a/internal/net/http/routing/router.go b/internal/net/http/routing/router.go index f2247340d5..9a0eb5873c 100644 --- a/internal/net/http/routing/router.go +++ b/internal/net/http/routing/router.go @@ -34,7 +34,7 @@ type router struct { routes []Route } -//New returns Routed http.Handler +// New returns Routed http.Handler. func New(opts ...Option) http.Handler { r := new(router) for _, opt := range append(defaultOpts, opts...) { diff --git a/internal/net/http/routing/router_test.go b/internal/net/http/routing/router_test.go index 62b926053c..af0f146287 100644 --- a/internal/net/http/routing/router_test.go +++ b/internal/net/http/routing/router_test.go @@ -26,7 +26,6 @@ import ( "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/http/middleware" "github.com/vdaas/vald/internal/net/http/rest" - "go.uber.org/goleak" ) @@ -272,7 +271,6 @@ func Test_router_routing(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/http/routing/routes.go b/internal/net/http/routing/routes.go index f5a8946f6a..98fd43f598 100644 --- a/internal/net/http/routing/routes.go +++ b/internal/net/http/routing/routes.go @@ -19,7 +19,7 @@ package routing import "github.com/vdaas/vald/internal/net/http/rest" -//Route struct +// Route struct. type Route struct { Name string Methods []string diff --git a/internal/net/http/transport/option.go b/internal/net/http/transport/option.go index 1433caef1c..eebf13e907 100644 --- a/internal/net/http/transport/option.go +++ b/internal/net/http/transport/option.go @@ -26,11 +26,9 @@ import ( // Option represents the functional option for transport and backoff. type Option func(*ert) -var ( - defaultOpts = []Option{ - WithRoundTripper(http.DefaultTransport), - } -) +var defaultOpts = []Option{ + WithRoundTripper(http.DefaultTransport), +} // WithRoundTripper returns the Option that set the RoundTripper. func WithRoundTripper(tr http.RoundTripper) Option { diff --git a/internal/net/http/transport/option_test.go b/internal/net/http/transport/option_test.go index ae4f64d05d..662b487a61 100644 --- a/internal/net/http/transport/option_test.go +++ b/internal/net/http/transport/option_test.go @@ -27,12 +27,10 @@ import ( "go.uber.org/goleak" ) -var ( - // Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - } -) +// Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), +} func TestWithRoundTripper(t *testing.T) { type T = ert diff --git a/internal/net/http/transport/roundtrip.go b/internal/net/http/transport/roundtrip.go index 5f185c98e2..861641706f 100644 --- a/internal/net/http/transport/roundtrip.go +++ b/internal/net/http/transport/roundtrip.go @@ -18,6 +18,7 @@ package transport import ( + "context" "io" "io/ioutil" "net/http" @@ -32,7 +33,7 @@ type ert struct { bo backoff.Backoff } -// NewExpBackoff returns the backoff roundtripper implementation +// NewExpBackoff returns the backoff roundtripper implementation. func NewExpBackoff(opts ...Option) http.RoundTripper { e := new(ert) for _, opt := range append(defaultOpts, opts...) { @@ -50,28 +51,17 @@ func (e *ert) RoundTrip(req *http.Request) (res *http.Response, err error) { if e.bo == nil { return e.roundTrip(req) } - - var rterr error - _, err = e.bo.Do(req.Context(), func() (interface{}, error) { - r, reqerr := e.roundTrip(req) - if reqerr != nil { - // if the error is retryable, return the error and let backoff to retry. - if errors.Is(reqerr, errors.ErrTransportRetryable) { - return nil, reqerr - } - // if the error is not retryable, return nil error to terminate the backoff execution - rterr = reqerr - return nil, nil + _, err = e.bo.Do(req.Context(), func(ctx context.Context) (interface{}, bool, error) { + r, err := e.roundTrip(req) + if err != nil { + return nil, errors.Is(err, errors.ErrTransportRetryable), err } res = r - return r, nil + return r, false, nil }) if err != nil { return nil, err } - if rterr != nil { - return nil, rterr - } return res, nil } diff --git a/internal/net/http/transport/roundtrip_mock_test.go b/internal/net/http/transport/roundtrip_mock_test.go index 788009ec33..c51af5346e 100644 --- a/internal/net/http/transport/roundtrip_mock_test.go +++ b/internal/net/http/transport/roundtrip_mock_test.go @@ -29,11 +29,11 @@ func (rm *roundTripMock) RoundTrip(req *http.Request) (*http.Response, error) { } type backoffMock struct { - DoFunc func(context.Context, func() (interface{}, error)) (interface{}, error) + DoFunc func(context.Context, func(context.Context) (interface{}, bool, error)) (interface{}, error) CloseFunc func() } -func (bm *backoffMock) Do(ctx context.Context, fn func() (interface{}, error)) (interface{}, error) { +func (bm *backoffMock) Do(ctx context.Context, fn func(context.Context) (interface{}, bool, error)) (interface{}, error) { return bm.DoFunc(ctx, fn) } diff --git a/internal/net/http/transport/roundtrip_test.go b/internal/net/http/transport/roundtrip_test.go index 7a200a0ad6..7978c5b1f6 100644 --- a/internal/net/http/transport/roundtrip_test.go +++ b/internal/net/http/transport/roundtrip_test.go @@ -105,7 +105,6 @@ func TestNewExpBackoff(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -175,8 +174,9 @@ func Test_ert_RoundTrip(t *testing.T) { }, }, bo: &backoffMock{ - DoFunc: func(ctx context.Context, fn func() (interface{}, error)) (interface{}, error) { - return fn() + DoFunc: func(ctx context.Context, fn func(context.Context) (interface{}, bool, error)) (interface{}, error) { + val, _, err := fn(ctx) + return val, err }, }, }, @@ -200,8 +200,9 @@ func Test_ert_RoundTrip(t *testing.T) { }, }, bo: &backoffMock{ - DoFunc: func(ctx context.Context, fn func() (interface{}, error)) (interface{}, error) { - return fn() + DoFunc: func(ctx context.Context, fn func(context.Context) (interface{}, bool, error)) (interface{}, error) { + val, _, err := fn(ctx) + return val, err }, }, }, @@ -225,8 +226,9 @@ func Test_ert_RoundTrip(t *testing.T) { }, }, bo: &backoffMock{ - DoFunc: func(ctx context.Context, fn func() (interface{}, error)) (interface{}, error) { - return fn() + DoFunc: func(ctx context.Context, fn func(context.Context) (interface{}, bool, error)) (interface{}, error) { + val, _, err := fn(ctx) + return val, err }, }, }, @@ -243,7 +245,7 @@ func Test_ert_RoundTrip(t *testing.T) { }, fields: fields{ bo: &backoffMock{ - DoFunc: func(ctx context.Context, fn func() (interface{}, error)) (interface{}, error) { + DoFunc: func(ctx context.Context, fn func(context.Context) (interface{}, bool, error)) (interface{}, error) { return nil, errors.New("error") }, }, @@ -264,8 +266,9 @@ func Test_ert_RoundTrip(t *testing.T) { }, }, bo: &backoffMock{ - DoFunc: func(ctx context.Context, fn func() (interface{}, error)) (interface{}, error) { - return fn() + DoFunc: func(ctx context.Context, fn func(context.Context) (interface{}, bool, error)) (interface{}, error) { + val, _, err := fn(ctx) + return val, err }, }, }, @@ -285,8 +288,9 @@ func Test_ert_RoundTrip(t *testing.T) { }, }, bo: &backoffMock{ - DoFunc: func(ctx context.Context, fn func() (interface{}, error)) (interface{}, error) { - return fn() + DoFunc: func(ctx context.Context, fn func(context.Context) (interface{}, bool, error)) (interface{}, error) { + val, _, err := fn(ctx) + return val, err }, }, }, @@ -317,7 +321,6 @@ func Test_ert_RoundTrip(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -450,7 +453,6 @@ func Test_ert_roundTrip(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -514,7 +516,6 @@ func Test_retryableStatusCode(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/net.go b/internal/net/net.go index d02ef63f38..dc78a5918d 100644 --- a/internal/net/net.go +++ b/internal/net/net.go @@ -39,26 +39,24 @@ const ( ) type ( - // Conn is an alias of net.Conn + // Conn is an alias of net.Conn. Conn = net.Conn - // Dialer is an alias of net.Dialer + // Dialer is an alias of net.Dialer. Dialer = net.Dialer - // ListenConfig is an alias of net.ListenConfig + // ListenConfig is an alias of net.ListenConfig. ListenConfig = net.ListenConfig - // Listener is an alias of net.Listener + // Listener is an alias of net.Listener. Listener = net.Listener - // Resolver is an alias of net.Resolver + // Resolver is an alias of net.Resolver. Resolver = net.Resolver ) -var ( - // DefaultResolver is an alias of net.DefaultResolver - DefaultResolver = net.DefaultResolver -) +// DefaultResolver is an alias of net.DefaultResolver. +var DefaultResolver = net.DefaultResolver // Listen is a wrapper function of the net.Listen function. func Listen(network, address string) (Listener, error) { @@ -178,3 +176,19 @@ func ScanPorts(ctx context.Context, start, end uint16, host string) (ports []uin return ports, nil } + +func LoadLocalIP() string { + addrs, err := net.InterfaceAddrs() + if err != nil { + log.Warn(err) + return "" + } + for _, address := range addrs { + if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { + if ipnet.IP.To4() != nil { + return ipnet.IP.String() + } + } + } + return "" +} diff --git a/internal/net/net_test.go b/internal/net/net_test.go index fc6ed99970..9682efe928 100644 --- a/internal/net/net_test.go +++ b/internal/net/net_test.go @@ -35,13 +35,11 @@ import ( "go.uber.org/goleak" ) -var ( - // Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), - } -) +// Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), + goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), +} func TestMain(m *testing.M) { log.Init() @@ -108,7 +106,6 @@ func TestListen(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -199,7 +196,6 @@ func TestIsLocal(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -285,7 +281,6 @@ func TestDial(t *testing.T) { if err := test.checkFunc(test.want, gotConn, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -423,7 +418,6 @@ func TestParse(t *testing.T) { if err := test.checkFunc(test.want, gotHost, gotPort, gotIsIP, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -487,7 +481,6 @@ func TestIsIPv6(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -551,7 +544,6 @@ func TestIsIPv4(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -678,7 +670,6 @@ func TestSplitHostPort(t *testing.T) { if err := test.checkFunc(test.want, gotHost, gotPort, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -889,3 +880,66 @@ func TestScanPorts(t *testing.T) { }) } } + +func TestLoadLocalIP(t *testing.T) { + t.Parallel() + type want struct { + want string + } + type test struct { + name string + want want + checkFunc func(want, string) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got string) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := LoadLocalIP() + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/net/tcp/control_darwin_test.go b/internal/net/tcp/control_darwin_test.go index 4328d1a6cf..b42640c36c 100644 --- a/internal/net/tcp/control_darwin_test.go +++ b/internal/net/tcp/control_darwin_test.go @@ -100,7 +100,6 @@ func TestControl(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/tcp/control_other_test.go b/internal/net/tcp/control_other_test.go index 89ac0f8d65..e70ee1c6d6 100644 --- a/internal/net/tcp/control_other_test.go +++ b/internal/net/tcp/control_other_test.go @@ -99,7 +99,6 @@ func TestControl(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/tcp/control_unix_test.go b/internal/net/tcp/control_unix_test.go index 5c322721d4..20c356e69c 100644 --- a/internal/net/tcp/control_unix_test.go +++ b/internal/net/tcp/control_unix_test.go @@ -24,7 +24,6 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) @@ -100,7 +99,6 @@ func TestControl(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/tcp/control_windows_test.go b/internal/net/tcp/control_windows_test.go index 4a90c8fe83..79d846430c 100644 --- a/internal/net/tcp/control_windows_test.go +++ b/internal/net/tcp/control_windows_test.go @@ -99,7 +99,6 @@ func TestControl(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/tcp/dialer.go b/internal/net/tcp/dialer.go index ed877f62f5..ab94537b77 100644 --- a/internal/net/tcp/dialer.go +++ b/internal/net/tcp/dialer.go @@ -21,6 +21,7 @@ import ( "context" "crypto/tls" "strconv" + "sync" "sync/atomic" "time" @@ -49,10 +50,18 @@ type dialer struct { dialerTimeout time.Duration dialerKeepAlive time.Duration dialerDualStack bool + addrs sync.Map der *net.Dialer dialer func(ctx context.Context, network, addr string) (net.Conn, error) } +type addrInfo struct { + addr string + host string + port uint16 + isIP bool +} + type dialerCache struct { ips []string cnt uint32 @@ -68,12 +77,12 @@ func (d *dialerCache) IP() string { return d.ips[atomic.AddUint32(&d.cnt, 1)%d.Len()] } -// Len returns the length of cached IP addresses +// Len returns the length of cached IP addresses. func (d *dialerCache) Len() uint32 { return uint32(len(d.ips)) } -// NewDialer initialize and return the dialer instance +// NewDialer initialize and return the dialer instance. func NewDialer(opts ...DialerOption) (der Dialer, err error) { d := new(dialer) for _, opt := range append(defaultDialerOptions, opts...) { @@ -93,7 +102,6 @@ func NewDialer(opts ...DialerOption) (der Dialer, err error) { if d.dnsRefreshDuration > d.dnsCacheExpiration { return nil, errors.ErrInvalidDNSConfig(d.dnsRefreshDuration, d.dnsCacheExpiration) } - if d.cache == nil { if d.cache, err = cache.New( cache.WithExpireDuration(d.dnsCacheExpirationStr), @@ -103,7 +111,6 @@ func NewDialer(opts ...DialerOption) (der Dialer, err error) { return nil, err } } - d.dialer = d.cachedDialer } @@ -115,7 +122,7 @@ func NewDialer(opts ...DialerOption) (der Dialer, err error) { return d, nil } -// GetDialer returns a function to return the connection +// GetDialer returns a function to return the connection. func (d *dialer) GetDialer() func(ctx context.Context, network, addr string) (net.Conn, error) { return d.dialer } @@ -142,7 +149,7 @@ func (d *dialer) lookup(ctx context.Context, host string) (*dialerCache, error) return dc, nil } -// StartDialerCache starts the dialer cache to expire the cache automatically +// StartDialerCache starts the dialer cache to expire the cache automatically. func (d *dialer) StartDialerCache(ctx context.Context) { if d.dnsCache && d.cache != nil { d.cache.Start(ctx) @@ -157,9 +164,31 @@ func (d *dialer) DialContext(ctx context.Context, network, address string) (net. } func (d *dialer) cachedDialer(dctx context.Context, network, addr string) (conn net.Conn, err error) { - host, port, isIP, err := net.Parse(addr) - if err != nil { - return nil, err + var ( + host string + port uint16 + isIP bool + ) + ai, ok := d.addrs.Load(addr) + if !ok { + host, port, isIP, err = net.Parse(addr) + if err != nil { + d.addrs.Delete(addr) + return nil, err + } + d.addrs.Store(addr, &addrInfo{ + host: host, + port: port, + addr: addr, + isIP: isIP, + }) + } else { + info, ok := ai.(*addrInfo) + if ok { + host = info.host + port = info.port + isIP = info.isIP + } } if d.dnsCache && !isIP { diff --git a/internal/net/tcp/dialer_test.go b/internal/net/tcp/dialer_test.go index 40ffde3cd8..75348fc285 100644 --- a/internal/net/tcp/dialer_test.go +++ b/internal/net/tcp/dialer_test.go @@ -44,13 +44,11 @@ import ( "go.uber.org/goleak" ) -var ( - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), - goleak.IgnoreTopFunction("net._C2func_getaddrinfo"), - } -) +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), + goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), + goleak.IgnoreTopFunction("net._C2func_getaddrinfo"), +} func Test_dialerCache_IP(t *testing.T) { type fields struct { @@ -172,7 +170,6 @@ func Test_dialerCache_IP(t *testing.T) { if err := test.checkFunc(d, test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -230,7 +227,6 @@ func Test_dialerCache_Len(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -265,8 +261,9 @@ func TestNewDialer(t *testing.T) { want := w.wantDer.(*dialer) got := gotDer.(*dialer) - opts := []cmp.Option{cmp.AllowUnexported(*want), - cmpopts.IgnoreFields(*want, "dialer", "der"), + opts := []cmp.Option{ + cmp.AllowUnexported(*want), + cmpopts.IgnoreFields(*want, "dialer", "der", "addrs"), cmp.Comparer(func(x, y cache.Cache) bool { if x == nil && y == nil { return true @@ -404,7 +401,6 @@ func TestNewDialer(t *testing.T) { if err := test.checkFunc(test.want, gotDer, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -467,7 +463,6 @@ func Test_dialer_GetDialer(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -646,6 +641,7 @@ func Test_dialer_lookup(t *testing.T) { }) } } + func Test_dialer_StartDialerCache(t *testing.T) { type args struct { ctx context.Context @@ -1407,7 +1403,6 @@ func Test_dialer_cachedDialer(t *testing.T) { if err := test.checkFunc(d, test.want, gotConn, gotErr); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1612,7 +1607,6 @@ func Test_dialer_dial(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/net/tcp/option.go b/internal/net/tcp/option.go index 06d866185f..73fe98d6ef 100644 --- a/internal/net/tcp/option.go +++ b/internal/net/tcp/option.go @@ -25,26 +25,24 @@ import ( "github.com/vdaas/vald/internal/timeutil" ) -// DialerOption represent the functional option for dialer +// DialerOption represent the functional option for dialer. type DialerOption func(*dialer) -var ( - defaultDialerOptions = []DialerOption{ - WithDialerKeepAlive("30s"), - WithDialerTimeout("30s"), - WithEnableDialerDualStack(), - WithDisableDNSCache(), - } -) +var defaultDialerOptions = []DialerOption{ + WithDialerKeepAlive("30s"), + WithDialerTimeout("30s"), + WithEnableDialerDualStack(), + WithDisableDNSCache(), +} -// WithCache returns the functional option to set the cache +// WithCache returns the functional option to set the cache. func WithCache(c cache.Cache) DialerOption { return func(d *dialer) { d.cache = c } } -// WithDNSRefreshDuration returns the functional option to set the DNSRefreshDuration +// WithDNSRefreshDuration returns the functional option to set the DNSRefreshDuration. func WithDNSRefreshDuration(dur string) DialerOption { return func(d *dialer) { if dur == "" { @@ -60,7 +58,7 @@ func WithDNSRefreshDuration(dur string) DialerOption { } } -// WithDNSCacheExpiration returns the functional option to set the DNSCacheExpiration +// WithDNSCacheExpiration returns the functional option to set the DNSCacheExpiration. func WithDNSCacheExpiration(dur string) DialerOption { return func(d *dialer) { if dur == "" { @@ -79,7 +77,7 @@ func WithDNSCacheExpiration(dur string) DialerOption { } } -// WithDialerTimeout returns the functional option to set the DialerTimeout +// WithDialerTimeout returns the functional option to set the DialerTimeout. func WithDialerTimeout(dur string) DialerOption { return func(d *dialer) { if dur == "" { @@ -93,7 +91,7 @@ func WithDialerTimeout(dur string) DialerOption { } } -// WithDialerKeepAlive returns the functional option to set the DialerKeepAlive +// WithDialerKeepAlive returns the functional option to set the DialerKeepAlive. func WithDialerKeepAlive(dur string) DialerOption { return func(d *dialer) { if dur == "" { @@ -107,35 +105,35 @@ func WithDialerKeepAlive(dur string) DialerOption { } } -// WithTLS returns the functional option to set the DialerTLS +// WithTLS returns the functional option to set the DialerTLS. func WithTLS(cfg *tls.Config) DialerOption { return func(d *dialer) { d.tlsConfig = cfg } } -// WithEnableDNSCache returns the functional option to enable DNSCache +// WithEnableDNSCache returns the functional option to enable DNSCache. func WithEnableDNSCache() DialerOption { return func(d *dialer) { d.dnsCache = true } } -// WithDisableDNSCache returns the functional option to disable DNSCache +// WithDisableDNSCache returns the functional option to disable DNSCache. func WithDisableDNSCache() DialerOption { return func(d *dialer) { d.dnsCache = false } } -// WithEnableDialerDualStack returns the functional option to enable DialerDualStack +// WithEnableDialerDualStack returns the functional option to enable DialerDualStack. func WithEnableDialerDualStack() DialerOption { return func(d *dialer) { d.dialerDualStack = true } } -// WithDisableDialerDualStack returns the functional option to disable DialerDualStack +// WithDisableDialerDualStack returns the functional option to disable DialerDualStack. func WithDisableDialerDualStack() DialerOption { return func(d *dialer) { d.dialerDualStack = false diff --git a/internal/observability/collector/collector_option.go b/internal/observability/collector/collector_option.go index 9549b45fa8..0824a87ad6 100644 --- a/internal/observability/collector/collector_option.go +++ b/internal/observability/collector/collector_option.go @@ -29,12 +29,10 @@ import ( type CollectorOption func(*collector) error -var ( - collectorDefaultOpts = []CollectorOption{ - WithErrGroup(errgroup.Get()), - WithDuration("5s"), - } -) +var collectorDefaultOpts = []CollectorOption{ + WithErrGroup(errgroup.Get()), + WithDuration("5s"), +} func WithErrGroup(eg errgroup.Group) CollectorOption { return func(c *collector) error { diff --git a/internal/observability/collector/collector_test.go b/internal/observability/collector/collector_test.go index f7f7d986a6..da3784646a 100644 --- a/internal/observability/collector/collector_test.go +++ b/internal/observability/collector/collector_test.go @@ -26,7 +26,6 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/observability/metrics" - "go.uber.org/goleak" ) @@ -100,7 +99,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -192,7 +190,6 @@ func Test_collector_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -284,7 +281,6 @@ func Test_collector_Start(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -463,7 +459,6 @@ func Test_collector_collect(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/exporter/jaeger/jaeger_option.go b/internal/observability/exporter/jaeger/jaeger_option.go index 6a8d232383..64fe8a47ff 100644 --- a/internal/observability/exporter/jaeger/jaeger_option.go +++ b/internal/observability/exporter/jaeger/jaeger_option.go @@ -24,16 +24,14 @@ import ( type JaegerOption func(*jaegerOptions) error -var ( - jaegerDefaultOpts = []JaegerOption{ - WithServiceName("vald"), - WithOnErrorFunc(func(err error) { - if err != nil { - log.Warnf("Error when uploading spans to Jaeger: %v", err) - } - }), - } -) +var jaegerDefaultOpts = []JaegerOption{ + WithServiceName("vald"), + WithOnErrorFunc(func(err error) { + if err != nil { + log.Warnf("Error when uploading spans to Jaeger: %v", err) + } + }), +} func WithCollectorEndpoint(cep string) JaegerOption { return func(jo *jaegerOptions) error { diff --git a/internal/observability/exporter/jaeger/jaeger_test.go b/internal/observability/exporter/jaeger/jaeger_test.go index caf605a50b..3f02318767 100644 --- a/internal/observability/exporter/jaeger/jaeger_test.go +++ b/internal/observability/exporter/jaeger/jaeger_test.go @@ -97,7 +97,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotJ, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -185,7 +184,6 @@ func Test_exp_Start(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/exporter/prometheus/prometheus_option.go b/internal/observability/exporter/prometheus/prometheus_option.go index 014ce3d7c4..8228c4025a 100644 --- a/internal/observability/exporter/prometheus/prometheus_option.go +++ b/internal/observability/exporter/prometheus/prometheus_option.go @@ -21,17 +21,15 @@ import "github.com/vdaas/vald/internal/log" type PrometheusOption func(*prometheusOptions) error -var ( - prometheusDefaultOpts = []PrometheusOption{ - WithEndpoint("/metrics"), - WithNamespace("vald"), - WithOnErrorFunc(func(err error) { - if err != nil { - log.Warnf("Failed to export to Prometheus: %v", err) - } - }), - } -) +var prometheusDefaultOpts = []PrometheusOption{ + WithEndpoint("/metrics"), + WithNamespace("vald"), + WithOnErrorFunc(func(err error) { + if err != nil { + log.Warnf("Failed to export to Prometheus: %v", err) + } + }), +} func WithEndpoint(ep string) PrometheusOption { return func(po *prometheusOptions) error { diff --git a/internal/observability/exporter/prometheus/prometheus_test.go b/internal/observability/exporter/prometheus/prometheus_test.go index e84ee90140..2f0d4e33b7 100644 --- a/internal/observability/exporter/prometheus/prometheus_test.go +++ b/internal/observability/exporter/prometheus/prometheus_test.go @@ -98,7 +98,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -186,7 +185,6 @@ func Test_exp_Start(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -347,7 +345,6 @@ func Test_exp_NewHTTPHandler(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -412,7 +409,6 @@ func TestExporter(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/exporter/stackdriver/stackdriver_option.go b/internal/observability/exporter/stackdriver/stackdriver_option.go index 2c69058771..2feade8db0 100644 --- a/internal/observability/exporter/stackdriver/stackdriver_option.go +++ b/internal/observability/exporter/stackdriver/stackdriver_option.go @@ -28,20 +28,18 @@ import ( type Option func(e *exp) error -var ( - defaultOpts = []Option{ - WithOnErrorFunc(func(err error) { - if err != nil { - log.Warnf("Error when uploading stats or spans to Stackdriver: %v", err) - } - }), - WithMonitoredResource(monitoredresource.Autodetect()), - WithMetricPrefix("vald.vdaas.org/"), - WithTimeout("5s"), - WithReportingInterval("0"), - WithNumberOfWorkers(1), - } -) +var defaultOpts = []Option{ + WithOnErrorFunc(func(err error) { + if err != nil { + log.Warnf("Error when uploading stats or spans to Stackdriver: %v", err) + } + }), + WithMonitoredResource(monitoredresource.Autodetect()), + WithMetricPrefix("vald.vdaas.org/"), + WithTimeout("5s"), + WithReportingInterval("0"), + WithNumberOfWorkers(1), +} func WithMonitoring(enabled bool) Option { return func(e *exp) error { diff --git a/internal/observability/exporter/stackdriver/stackdriver_test.go b/internal/observability/exporter/stackdriver/stackdriver_test.go index a14ef7e8e7..533601741d 100644 --- a/internal/observability/exporter/stackdriver/stackdriver_test.go +++ b/internal/observability/exporter/stackdriver/stackdriver_test.go @@ -97,7 +97,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotS, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -193,7 +192,6 @@ func Test_exp_Start(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/metrics/agent/core/ngt/ngt.go b/internal/observability/metrics/agent/core/ngt/ngt.go index 2215455aef..e0ff25767d 100644 --- a/internal/observability/metrics/agent/core/ngt/ngt.go +++ b/internal/observability/metrics/agent/core/ngt/ngt.go @@ -103,49 +103,49 @@ func (n *ngtMetrics) MeasurementWithTags(ctx context.Context) ([]metrics.Measure func (n *ngtMetrics) View() []*metrics.View { return []*metrics.View{ - &metrics.View{ + { Name: "ngt_index_count", Description: n.indexCount.Description(), Measure: &n.indexCount, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "ngt_uncommitted_index_count", Description: n.uncommittedIndexCount.Description(), Measure: &n.uncommittedIndexCount, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "ngt_insert_vcache_count", Description: n.insertVCacheCount.Description(), Measure: &n.insertVCacheCount, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "ngt_delete_vcache_count", Description: n.deleteVCacheCount.Description(), Measure: &n.deleteVCacheCount, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "ngt_completed_create_index_total", Description: n.completedCreateIndexTotal.Description(), Measure: &n.completedCreateIndexTotal, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "ngt_executed_proactive_gc_total", Description: n.executedProactiveGCTotal.Description(), Measure: &n.executedProactiveGCTotal, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "ngt_is_indexing", Description: n.isIndexing.Description(), Measure: &n.isIndexing, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "ngt_is_saving", Description: n.isSaving.Description(), Measure: &n.isSaving, diff --git a/internal/observability/metrics/agent/core/ngt/ngt_test.go b/internal/observability/metrics/agent/core/ngt/ngt_test.go index 1236c23cc8..891a84afdb 100644 --- a/internal/observability/metrics/agent/core/ngt/ngt_test.go +++ b/internal/observability/metrics/agent/core/ngt/ngt_test.go @@ -25,11 +25,11 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/observability/metrics" "github.com/vdaas/vald/pkg/agent/core/ngt/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { n service.NGT } @@ -80,7 +80,8 @@ func TestNew(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -95,22 +96,25 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngtMetrics_Measurement(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } type fields struct { - ngt service.NGT - indexCount metrics.Int64Measure - uncommittedIndexCount metrics.Int64Measure - insertVCacheCount metrics.Int64Measure - deleteVCacheCount metrics.Int64Measure - isIndexing metrics.Int64Measure + ngt service.NGT + indexCount metrics.Int64Measure + uncommittedIndexCount metrics.Int64Measure + insertVCacheCount metrics.Int64Measure + deleteVCacheCount metrics.Int64Measure + completedCreateIndexTotal metrics.Int64Measure + executedProactiveGCTotal metrics.Int64Measure + isIndexing metrics.Int64Measure + isSaving metrics.Int64Measure } type want struct { want []metrics.Measurement @@ -148,7 +152,10 @@ func Test_ngtMetrics_Measurement(t *testing.T) { uncommittedIndexCount: nil, insertVCacheCount: nil, deleteVCacheCount: nil, + completedCreateIndexTotal: nil, + executedProactiveGCTotal: nil, isIndexing: nil, + isSaving: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -169,7 +176,10 @@ func Test_ngtMetrics_Measurement(t *testing.T) { uncommittedIndexCount: nil, insertVCacheCount: nil, deleteVCacheCount: nil, + completedCreateIndexTotal: nil, + executedProactiveGCTotal: nil, isIndexing: nil, + isSaving: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -180,7 +190,8 @@ func Test_ngtMetrics_Measurement(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -191,34 +202,40 @@ func Test_ngtMetrics_Measurement(t *testing.T) { test.checkFunc = defaultCheckFunc } n := &ngtMetrics{ - ngt: test.fields.ngt, - indexCount: test.fields.indexCount, - uncommittedIndexCount: test.fields.uncommittedIndexCount, - insertVCacheCount: test.fields.insertVCacheCount, - deleteVCacheCount: test.fields.deleteVCacheCount, - isIndexing: test.fields.isIndexing, + ngt: test.fields.ngt, + indexCount: test.fields.indexCount, + uncommittedIndexCount: test.fields.uncommittedIndexCount, + insertVCacheCount: test.fields.insertVCacheCount, + deleteVCacheCount: test.fields.deleteVCacheCount, + completedCreateIndexTotal: test.fields.completedCreateIndexTotal, + executedProactiveGCTotal: test.fields.executedProactiveGCTotal, + isIndexing: test.fields.isIndexing, + isSaving: test.fields.isSaving, } got, err := n.Measurement(test.args.ctx) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngtMetrics_MeasurementWithTags(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } type fields struct { - ngt service.NGT - indexCount metrics.Int64Measure - uncommittedIndexCount metrics.Int64Measure - insertVCacheCount metrics.Int64Measure - deleteVCacheCount metrics.Int64Measure - isIndexing metrics.Int64Measure + ngt service.NGT + indexCount metrics.Int64Measure + uncommittedIndexCount metrics.Int64Measure + insertVCacheCount metrics.Int64Measure + deleteVCacheCount metrics.Int64Measure + completedCreateIndexTotal metrics.Int64Measure + executedProactiveGCTotal metrics.Int64Measure + isIndexing metrics.Int64Measure + isSaving metrics.Int64Measure } type want struct { want []metrics.MeasurementWithTags @@ -256,7 +273,10 @@ func Test_ngtMetrics_MeasurementWithTags(t *testing.T) { uncommittedIndexCount: nil, insertVCacheCount: nil, deleteVCacheCount: nil, + completedCreateIndexTotal: nil, + executedProactiveGCTotal: nil, isIndexing: nil, + isSaving: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -277,7 +297,10 @@ func Test_ngtMetrics_MeasurementWithTags(t *testing.T) { uncommittedIndexCount: nil, insertVCacheCount: nil, deleteVCacheCount: nil, + completedCreateIndexTotal: nil, + executedProactiveGCTotal: nil, isIndexing: nil, + isSaving: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -288,7 +311,8 @@ func Test_ngtMetrics_MeasurementWithTags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -299,31 +323,37 @@ func Test_ngtMetrics_MeasurementWithTags(t *testing.T) { test.checkFunc = defaultCheckFunc } n := &ngtMetrics{ - ngt: test.fields.ngt, - indexCount: test.fields.indexCount, - uncommittedIndexCount: test.fields.uncommittedIndexCount, - insertVCacheCount: test.fields.insertVCacheCount, - deleteVCacheCount: test.fields.deleteVCacheCount, - isIndexing: test.fields.isIndexing, + ngt: test.fields.ngt, + indexCount: test.fields.indexCount, + uncommittedIndexCount: test.fields.uncommittedIndexCount, + insertVCacheCount: test.fields.insertVCacheCount, + deleteVCacheCount: test.fields.deleteVCacheCount, + completedCreateIndexTotal: test.fields.completedCreateIndexTotal, + executedProactiveGCTotal: test.fields.executedProactiveGCTotal, + isIndexing: test.fields.isIndexing, + isSaving: test.fields.isSaving, } got, err := n.MeasurementWithTags(test.args.ctx) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngtMetrics_View(t *testing.T) { + t.Parallel() type fields struct { - ngt service.NGT - indexCount metrics.Int64Measure - uncommittedIndexCount metrics.Int64Measure - insertVCacheCount metrics.Int64Measure - deleteVCacheCount metrics.Int64Measure - isIndexing metrics.Int64Measure + ngt service.NGT + indexCount metrics.Int64Measure + uncommittedIndexCount metrics.Int64Measure + insertVCacheCount metrics.Int64Measure + deleteVCacheCount metrics.Int64Measure + completedCreateIndexTotal metrics.Int64Measure + executedProactiveGCTotal metrics.Int64Measure + isIndexing metrics.Int64Measure + isSaving metrics.Int64Measure } type want struct { want []*metrics.View @@ -353,7 +383,10 @@ func Test_ngtMetrics_View(t *testing.T) { uncommittedIndexCount: nil, insertVCacheCount: nil, deleteVCacheCount: nil, + completedCreateIndexTotal: nil, + executedProactiveGCTotal: nil, isIndexing: nil, + isSaving: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -371,7 +404,10 @@ func Test_ngtMetrics_View(t *testing.T) { uncommittedIndexCount: nil, insertVCacheCount: nil, deleteVCacheCount: nil, + completedCreateIndexTotal: nil, + executedProactiveGCTotal: nil, isIndexing: nil, + isSaving: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -382,7 +418,8 @@ func Test_ngtMetrics_View(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -393,19 +430,21 @@ func Test_ngtMetrics_View(t *testing.T) { test.checkFunc = defaultCheckFunc } n := &ngtMetrics{ - ngt: test.fields.ngt, - indexCount: test.fields.indexCount, - uncommittedIndexCount: test.fields.uncommittedIndexCount, - insertVCacheCount: test.fields.insertVCacheCount, - deleteVCacheCount: test.fields.deleteVCacheCount, - isIndexing: test.fields.isIndexing, + ngt: test.fields.ngt, + indexCount: test.fields.indexCount, + uncommittedIndexCount: test.fields.uncommittedIndexCount, + insertVCacheCount: test.fields.insertVCacheCount, + deleteVCacheCount: test.fields.deleteVCacheCount, + completedCreateIndexTotal: test.fields.completedCreateIndexTotal, + executedProactiveGCTotal: test.fields.executedProactiveGCTotal, + isIndexing: test.fields.isIndexing, + isSaving: test.fields.isSaving, } got := n.View() if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/metrics/agent/sidecar/sidecar.go b/internal/observability/metrics/agent/sidecar/sidecar.go index d900d22245..c5fe73dff1 100644 --- a/internal/observability/metrics/agent/sidecar/sidecar.go +++ b/internal/observability/metrics/agent/sidecar/sidecar.go @@ -105,21 +105,21 @@ func (sm *sidecarMetrics) View() []*metrics.View { } return []*metrics.View{ - &metrics.View{ + { Name: "agent_sidecar_completed_upload_total", Description: sm.uploadTotal.Description(), TagKeys: uploadKeys, Measure: &sm.uploadTotal, Aggregation: metrics.Count(), }, - &metrics.View{ + { Name: "agent_sidecar_upload_bytes", Description: sm.uploadBytes.Description(), TagKeys: uploadKeys, Measure: &sm.uploadBytes, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "agent_sidecar_upload_latency", Description: sm.uploadLatency.Description(), TagKeys: uploadKeys, diff --git a/internal/observability/metrics/agent/sidecar/sidecar_test.go b/internal/observability/metrics/agent/sidecar/sidecar_test.go index 30db37b31d..cad680582d 100644 --- a/internal/observability/metrics/agent/sidecar/sidecar_test.go +++ b/internal/observability/metrics/agent/sidecar/sidecar_test.go @@ -30,6 +30,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type want struct { want MetricsHook err error @@ -74,6 +75,7 @@ func TestNew(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -89,12 +91,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_sidecarMetrics_Measurement(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -180,6 +182,7 @@ func Test_sidecarMetrics_Measurement(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -205,12 +208,12 @@ func Test_sidecarMetrics_Measurement(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_sidecarMetrics_MeasurementWithTags(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -296,6 +299,7 @@ func Test_sidecarMetrics_MeasurementWithTags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -321,12 +325,12 @@ func Test_sidecarMetrics_MeasurementWithTags(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_sidecarMetrics_View(t *testing.T) { + t.Parallel() type fields struct { uploadTotal metrics.Int64Measure uploadBytes metrics.Int64Measure @@ -398,6 +402,7 @@ func Test_sidecarMetrics_View(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -423,12 +428,12 @@ func Test_sidecarMetrics_View(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_sidecarMetrics_BeforeProcess(t *testing.T) { + t.Parallel() type args struct { ctx context.Context info *observer.BackupInfo @@ -517,6 +522,7 @@ func Test_sidecarMetrics_BeforeProcess(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -542,12 +548,12 @@ func Test_sidecarMetrics_BeforeProcess(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_sidecarMetrics_AfterProcess(t *testing.T) { + t.Parallel() type args struct { ctx context.Context info *observer.BackupInfo @@ -632,6 +638,7 @@ func Test_sidecarMetrics_AfterProcess(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -657,7 +664,6 @@ func Test_sidecarMetrics_AfterProcess(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/metrics/db/kvs/redis/redis.go b/internal/observability/metrics/db/kvs/redis/redis.go index b78619ea29..b0104fdc39 100644 --- a/internal/observability/metrics/db/kvs/redis/redis.go +++ b/internal/observability/metrics/db/kvs/redis/redis.go @@ -49,8 +49,10 @@ type MetricsHook interface { redis.Hook } -type startTimeKey struct{} -type pipelineStartTimeKey struct{} +type ( + startTimeKey struct{} + pipelineStartTimeKey struct{} +) func New() (o MetricsHook, err error) { rms := new(redisMetrics) @@ -118,28 +120,28 @@ func (rm *redisMetrics) View() []*metrics.View { } return []*metrics.View{ - &metrics.View{ + { Name: "db_kvs_redis_completed_query_total", Description: rm.queryTotal.Description(), TagKeys: queryKeys, Measure: &rm.queryTotal, Aggregation: metrics.Count(), }, - &metrics.View{ + { Name: "db_kvs_redis_query_latency", Description: rm.queryLatency.Description(), TagKeys: queryKeys, Measure: &rm.queryLatency, Aggregation: metrics.DefaultMillisecondsDistribution, }, - &metrics.View{ + { Name: "db_kvs_redis_completed_pipeline_total", Description: rm.pipelineTotal.Description(), TagKeys: pipelineKeys, Measure: &rm.pipelineTotal, Aggregation: metrics.Count(), }, - &metrics.View{ + { Name: "db_kvs_redis_pipeline_latency", Description: rm.pipelineLatency.Description(), TagKeys: pipelineKeys, diff --git a/internal/observability/metrics/db/kvs/redis/redis_test.go b/internal/observability/metrics/db/kvs/redis/redis_test.go index 9c232cf840..0e16a3ef93 100644 --- a/internal/observability/metrics/db/kvs/redis/redis_test.go +++ b/internal/observability/metrics/db/kvs/redis/redis_test.go @@ -30,6 +30,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type want struct { wantO MetricsHook err error @@ -74,6 +75,7 @@ func TestNew(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -89,12 +91,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotO, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_redisMetrics_Measurement(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -180,6 +182,7 @@ func Test_redisMetrics_Measurement(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -205,12 +208,12 @@ func Test_redisMetrics_Measurement(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_redisMetrics_MeasurementWithTags(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -296,6 +299,7 @@ func Test_redisMetrics_MeasurementWithTags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -321,12 +325,12 @@ func Test_redisMetrics_MeasurementWithTags(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_redisMetrics_View(t *testing.T) { + t.Parallel() type fields struct { queryTotal metrics.Int64Measure queryLatency metrics.Float64Measure @@ -398,6 +402,7 @@ func Test_redisMetrics_View(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -423,12 +428,12 @@ func Test_redisMetrics_View(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_redisMetrics_BeforeProcess(t *testing.T) { + t.Parallel() type args struct { ctx context.Context cmd redis.Cmder @@ -517,6 +522,7 @@ func Test_redisMetrics_BeforeProcess(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -542,12 +548,12 @@ func Test_redisMetrics_BeforeProcess(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_redisMetrics_AfterProcess(t *testing.T) { + t.Parallel() type args struct { ctx context.Context cmd redis.Cmder @@ -632,6 +638,7 @@ func Test_redisMetrics_AfterProcess(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -657,12 +664,12 @@ func Test_redisMetrics_AfterProcess(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_redisMetrics_BeforeProcessPipeline(t *testing.T) { + t.Parallel() type args struct { ctx context.Context cmds []redis.Cmder @@ -751,6 +758,7 @@ func Test_redisMetrics_BeforeProcessPipeline(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -776,12 +784,12 @@ func Test_redisMetrics_BeforeProcessPipeline(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_redisMetrics_AfterProcessPipeline(t *testing.T) { + t.Parallel() type args struct { ctx context.Context cmds []redis.Cmder @@ -866,6 +874,7 @@ func Test_redisMetrics_AfterProcessPipeline(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -891,7 +900,6 @@ func Test_redisMetrics_AfterProcessPipeline(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/metrics/db/nosql/cassandra/cassandra.go b/internal/observability/metrics/db/nosql/cassandra/cassandra.go index 55560c4206..9f20b2e23a 100644 --- a/internal/observability/metrics/db/nosql/cassandra/cassandra.go +++ b/internal/observability/metrics/db/nosql/cassandra/cassandra.go @@ -135,21 +135,21 @@ func (cm *cassandraMetrics) View() []*metrics.View { } return []*metrics.View{ - &metrics.View{ + { Name: "db_nosql_cassandra_completed_query_total", Description: cm.queryTotal.Description(), TagKeys: keys, Measure: &cm.queryTotal, Aggregation: metrics.Count(), }, - &metrics.View{ + { Name: "db_nosql_cassandra_query_attempts_total", Description: cm.queryAttemptsTotal.Description(), TagKeys: keys, Measure: &cm.queryAttemptsTotal, Aggregation: metrics.Count(), }, - &metrics.View{ + { Name: "db_nosql_cassandra_query_latency", Description: cm.queryLatency.Description(), TagKeys: keys, diff --git a/internal/observability/metrics/db/nosql/cassandra/cassandra_test.go b/internal/observability/metrics/db/nosql/cassandra/cassandra_test.go index 7ee0e507d4..ed1278cca8 100644 --- a/internal/observability/metrics/db/nosql/cassandra/cassandra_test.go +++ b/internal/observability/metrics/db/nosql/cassandra/cassandra_test.go @@ -30,6 +30,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type want struct { wantO Observer err error @@ -74,6 +75,7 @@ func TestNew(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -89,12 +91,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotO, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_cassandraMetrics_Measurement(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -192,6 +194,7 @@ func Test_cassandraMetrics_Measurement(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -221,12 +224,12 @@ func Test_cassandraMetrics_Measurement(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_cassandraMetrics_MeasurementWithTags(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -324,6 +327,7 @@ func Test_cassandraMetrics_MeasurementWithTags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -353,12 +357,12 @@ func Test_cassandraMetrics_MeasurementWithTags(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_cassandraMetrics_View(t *testing.T) { + t.Parallel() type fields struct { queryTotal metrics.Int64Measure queryAttemptsTotal metrics.Int64Measure @@ -442,6 +446,7 @@ func Test_cassandraMetrics_View(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -471,12 +476,12 @@ func Test_cassandraMetrics_View(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_cassandraMetrics_ObserveQuery(t *testing.T) { + t.Parallel() type args struct { ctx context.Context q cassandra.ObservedQuery @@ -569,6 +574,7 @@ func Test_cassandraMetrics_ObserveQuery(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) diff --git a/internal/observability/metrics/db/rdb/mysql/mysql.go b/internal/observability/metrics/db/rdb/mysql/mysql.go index 378be0cfac..b4cdc1876f 100644 --- a/internal/observability/metrics/db/rdb/mysql/mysql.go +++ b/internal/observability/metrics/db/rdb/mysql/mysql.go @@ -81,13 +81,13 @@ func (mm *mysqlMetrics) MeasurementWithTags(ctx context.Context) ([]metrics.Meas func (mm *mysqlMetrics) View() []*metrics.View { return []*metrics.View{ - &metrics.View{ + { Name: "db_rdb_mysql_completed_query_total", Description: mm.queryTotal.Description(), Measure: &mm.queryTotal, Aggregation: metrics.Count(), }, - &metrics.View{ + { Name: "db_rdb_mysql_query_latency", Description: mm.queryLatency.Description(), Measure: &mm.queryLatency, diff --git a/internal/observability/metrics/db/rdb/mysql/mysql_test.go b/internal/observability/metrics/db/rdb/mysql/mysql_test.go index 95e287b918..214676254b 100644 --- a/internal/observability/metrics/db/rdb/mysql/mysql_test.go +++ b/internal/observability/metrics/db/rdb/mysql/mysql_test.go @@ -30,6 +30,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type want struct { wantE EventReceiver err error @@ -74,6 +75,7 @@ func TestNew(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -89,12 +91,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotE, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_mysqlMetrics_Measurement(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -171,6 +173,7 @@ func Test_mysqlMetrics_Measurement(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -193,12 +196,12 @@ func Test_mysqlMetrics_Measurement(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_mysqlMetrics_MeasurementWithTags(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -275,6 +278,7 @@ func Test_mysqlMetrics_MeasurementWithTags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -297,12 +301,12 @@ func Test_mysqlMetrics_MeasurementWithTags(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_mysqlMetrics_View(t *testing.T) { + t.Parallel() type fields struct { queryTotal metrics.Int64Measure queryLatency metrics.Float64Measure @@ -365,6 +369,7 @@ func Test_mysqlMetrics_View(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -387,12 +392,12 @@ func Test_mysqlMetrics_View(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_mysqlMetrics_SpanStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context eventName string @@ -471,6 +476,7 @@ func Test_mysqlMetrics_SpanStart(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -493,12 +499,12 @@ func Test_mysqlMetrics_SpanStart(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_mysqlMetrics_SpanError(t *testing.T) { + t.Parallel() type args struct { ctx context.Context err error @@ -570,6 +576,7 @@ func Test_mysqlMetrics_SpanError(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -597,6 +604,7 @@ func Test_mysqlMetrics_SpanError(t *testing.T) { } func Test_mysqlMetrics_SpanFinish(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -665,6 +673,7 @@ func Test_mysqlMetrics_SpanFinish(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) diff --git a/internal/observability/metrics/manager/compressor/compressor.go b/internal/observability/metrics/manager/compressor/compressor.go index 48139bca4a..27755d3a8c 100644 --- a/internal/observability/metrics/manager/compressor/compressor.go +++ b/internal/observability/metrics/manager/compressor/compressor.go @@ -83,37 +83,37 @@ func (c *compressorMetrics) MeasurementWithTags(ctx context.Context) ([]metrics. func (c *compressorMetrics) View() []*metrics.View { return []*metrics.View{ - &metrics.View{ + { Name: "compressor_compressor_buffer", Description: c.compressorBuffer.Description(), Measure: &c.compressorBuffer, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "compressor_compressor_requested_jobs_total", Description: c.compressorTotalRequestedJob.Description(), Measure: &c.compressorTotalRequestedJob, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "compressor_compressor_completed_jobs_total", Description: c.compressorTotalCompletedJob.Description(), Measure: &c.compressorTotalCompletedJob, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "compressor_registerer_buffer", Description: c.registererBuffer.Description(), Measure: &c.registererBuffer, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "compressor_registerer_requested_jobs_total", Description: c.registererTotalRequestedJob.Description(), Measure: &c.registererTotalRequestedJob, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "compressor_registerer_completed_jobs_total", Description: c.registererTotalCompletedJob.Description(), Measure: &c.registererTotalCompletedJob, diff --git a/internal/observability/metrics/manager/compressor/compressor_test.go b/internal/observability/metrics/manager/compressor/compressor_test.go index 9adaa426c5..859a6ff449 100644 --- a/internal/observability/metrics/manager/compressor/compressor_test.go +++ b/internal/observability/metrics/manager/compressor/compressor_test.go @@ -25,11 +25,11 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/observability/metrics" "github.com/vdaas/vald/pkg/manager/compressor/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { c service.Compressor r service.Registerer @@ -83,7 +83,8 @@ func TestNew(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -98,12 +99,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_compressorMetrics_Measurement(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -189,7 +190,8 @@ func Test_compressorMetrics_Measurement(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -214,12 +216,12 @@ func Test_compressorMetrics_Measurement(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_compressorMetrics_MeasurementWithTags(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -305,7 +307,8 @@ func Test_compressorMetrics_MeasurementWithTags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -330,12 +333,12 @@ func Test_compressorMetrics_MeasurementWithTags(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_compressorMetrics_View(t *testing.T) { + t.Parallel() type fields struct { compressor service.Compressor registerer service.Registerer @@ -407,7 +410,8 @@ func Test_compressorMetrics_View(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -432,7 +436,6 @@ func Test_compressorMetrics_View(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/metrics/manager/index/index.go b/internal/observability/metrics/manager/index/index.go index 38dae8589f..a2ed8e93eb 100644 --- a/internal/observability/metrics/manager/index/index.go +++ b/internal/observability/metrics/manager/index/index.go @@ -68,19 +68,19 @@ func (i *indexerMetrics) MeasurementWithTags(ctx context.Context) ([]metrics.Mea func (i *indexerMetrics) View() []*metrics.View { return []*metrics.View{ - &metrics.View{ + { Name: "indexer_uuid_count", Description: i.uuidCount.Description(), Measure: &i.uuidCount, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "indexer_uncommitted_uuid_count", Description: i.uncommittedUUIDCount.Description(), Measure: &i.uncommittedUUIDCount, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "indexer_is_indexing", Description: i.isIndexing.Description(), Measure: &i.isIndexing, diff --git a/internal/observability/metrics/manager/index/index_test.go b/internal/observability/metrics/manager/index/index_test.go index 9d1629af16..50da84446a 100644 --- a/internal/observability/metrics/manager/index/index_test.go +++ b/internal/observability/metrics/manager/index/index_test.go @@ -29,6 +29,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { i service.Indexer } @@ -79,6 +80,7 @@ func TestNew(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -94,12 +96,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_indexerMetrics_Measurement(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -173,6 +175,7 @@ func Test_indexerMetrics_Measurement(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -194,12 +197,12 @@ func Test_indexerMetrics_Measurement(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_indexerMetrics_MeasurementWithTags(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -273,6 +276,7 @@ func Test_indexerMetrics_MeasurementWithTags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -294,12 +298,12 @@ func Test_indexerMetrics_MeasurementWithTags(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_indexerMetrics_View(t *testing.T) { + t.Parallel() type fields struct { indexer service.Indexer uuidCount metrics.Int64Measure @@ -359,6 +363,7 @@ func Test_indexerMetrics_View(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -380,7 +385,6 @@ func Test_indexerMetrics_View(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/metrics/mem/mem.go b/internal/observability/metrics/mem/mem.go index 887d273f9f..38c8e0fa71 100644 --- a/internal/observability/metrics/mem/mem.go +++ b/internal/observability/metrics/mem/mem.go @@ -94,85 +94,85 @@ func (m *memory) MeasurementWithTags(ctx context.Context) ([]metrics.Measurement func (m *memory) View() []*metrics.View { return []*metrics.View{ - &metrics.View{ + { Name: "alloc_bytes", Description: m.alloc.Description(), Measure: &m.alloc, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "alloc_bytes_total", Description: m.totalAlloc.Description(), Measure: &m.totalAlloc, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "sys_bytes", Description: m.sys.Description(), Measure: &m.sys, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "mallocs_total", Description: m.mallocs.Description(), Measure: &m.mallocs, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "frees_total", Description: m.frees.Description(), Measure: &m.frees, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "heap_alloc_bytes", Description: m.heapAlloc.Description(), Measure: &m.heapAlloc, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "heap_sys_bytes", Description: m.heapSys.Description(), Measure: &m.heapSys, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "heap_idle_bytes", Description: m.heapIdle.Description(), Measure: &m.heapIdle, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "heap_inuse_bytes", Description: m.heapInuse.Description(), Measure: &m.heapInuse, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "heap_released_bytes", Description: m.heapReleased.Description(), Measure: &m.heapReleased, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "stack_inuse_bytes", Description: m.stackInuse.Description(), Measure: &m.stackInuse, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "stack_sys_bytes", Description: m.stackSys.Description(), Measure: &m.stackSys, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "pause_ms_total", Description: m.pauseTotalMs.Description(), Measure: &m.pauseTotalMs, Aggregation: metrics.LastValue(), }, - &metrics.View{ + { Name: "gc_count", Description: m.numGC.Description(), Measure: &m.numGC, diff --git a/internal/observability/metrics/mem/mem_test.go b/internal/observability/metrics/mem/mem_test.go index e2dc5fc97e..4a99025298 100644 --- a/internal/observability/metrics/mem/mem_test.go +++ b/internal/observability/metrics/mem/mem_test.go @@ -28,6 +28,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type want struct { want metrics.Metric } @@ -68,6 +69,7 @@ func TestNew(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -83,12 +85,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_memory_Measurement(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -192,6 +194,7 @@ func Test_memory_Measurement(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -223,12 +226,12 @@ func Test_memory_Measurement(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_memory_MeasurementWithTags(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -332,6 +335,7 @@ func Test_memory_MeasurementWithTags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -363,12 +367,12 @@ func Test_memory_MeasurementWithTags(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_memory_View(t *testing.T) { + t.Parallel() type fields struct { alloc metrics.Int64Measure totalAlloc metrics.Int64Measure @@ -458,6 +462,7 @@ func Test_memory_View(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -489,7 +494,6 @@ func Test_memory_View(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/metrics/metrics.go b/internal/observability/metrics/metrics.go index ba33f7fd48..6a880edf2a 100644 --- a/internal/observability/metrics/metrics.go +++ b/internal/observability/metrics/metrics.go @@ -130,11 +130,15 @@ var ( ValdOrg = "vald.vdaas.org" ) -type Measurement = stats.Measurement -type View = view.View +type ( + Measurement = stats.Measurement + View = view.View +) -type Int64Measure = stats.Int64Measure -type Float64Measure = stats.Float64Measure +type ( + Int64Measure = stats.Int64Measure + Float64Measure = stats.Float64Measure +) type Key = tag.Key diff --git a/internal/observability/metrics/metrics_test.go b/internal/observability/metrics/metrics_test.go index a588a23684..d669c33a2e 100644 --- a/internal/observability/metrics/metrics_test.go +++ b/internal/observability/metrics/metrics_test.go @@ -23,11 +23,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestRegisterView(t *testing.T) { + t.Parallel() type args struct { views []*View } @@ -78,7 +78,8 @@ func TestRegisterView(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -93,12 +94,12 @@ func TestRegisterView(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestRecord(t *testing.T) { + t.Parallel() type args struct { ctx context.Context ms []Measurement @@ -148,7 +149,8 @@ func TestRecord(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -168,6 +170,7 @@ func TestRecord(t *testing.T) { } func TestRecordWithTags(t *testing.T) { + t.Parallel() type args struct { ctx context.Context mwts []MeasurementWithTags @@ -221,7 +224,8 @@ func TestRecordWithTags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -236,12 +240,12 @@ func TestRecordWithTags(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestMeasurementsCount(t *testing.T) { + t.Parallel() type args struct { m Metric } @@ -292,7 +296,8 @@ func TestMeasurementsCount(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -307,7 +312,6 @@ func TestMeasurementsCount(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/metrics/runtime/cgo/cgo.go b/internal/observability/metrics/runtime/cgo/cgo.go index 29012f3226..d00e44e1a7 100644 --- a/internal/observability/metrics/runtime/cgo/cgo.go +++ b/internal/observability/metrics/runtime/cgo/cgo.go @@ -46,7 +46,7 @@ func (c *cgo) MeasurementWithTags(ctx context.Context) ([]metrics.MeasurementWit func (c *cgo) View() []*metrics.View { return []*metrics.View{ - &metrics.View{ + { Name: "cgo_call_count", Description: c.count.Description(), Measure: &c.count, diff --git a/internal/observability/metrics/runtime/cgo/cgo_test.go b/internal/observability/metrics/runtime/cgo/cgo_test.go index 781b1ad2d7..9a39b7d488 100644 --- a/internal/observability/metrics/runtime/cgo/cgo_test.go +++ b/internal/observability/metrics/runtime/cgo/cgo_test.go @@ -24,11 +24,11 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/observability/metrics" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type want struct { want metrics.Metric } @@ -69,7 +69,8 @@ func TestNew(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -84,12 +85,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_cgo_Measurement(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -154,7 +155,8 @@ func Test_cgo_Measurement(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -172,12 +174,12 @@ func Test_cgo_Measurement(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_cgo_MeasurementWithTags(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -242,7 +244,8 @@ func Test_cgo_MeasurementWithTags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -260,12 +263,12 @@ func Test_cgo_MeasurementWithTags(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_cgo_View(t *testing.T) { + t.Parallel() type fields struct { count metrics.Int64Measure } @@ -316,7 +319,8 @@ func Test_cgo_View(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -334,7 +338,6 @@ func Test_cgo_View(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/metrics/runtime/goroutine/goroutine.go b/internal/observability/metrics/runtime/goroutine/goroutine.go index 61f8850a32..bb5efebf48 100644 --- a/internal/observability/metrics/runtime/goroutine/goroutine.go +++ b/internal/observability/metrics/runtime/goroutine/goroutine.go @@ -46,7 +46,7 @@ func (g *goroutines) MeasurementWithTags(ctx context.Context) ([]metrics.Measure func (g *goroutines) View() []*metrics.View { return []*metrics.View{ - &metrics.View{ + { Name: "goroutine_count", Description: g.count.Description(), Measure: &g.count, diff --git a/internal/observability/metrics/runtime/goroutine/goroutine_test.go b/internal/observability/metrics/runtime/goroutine/goroutine_test.go index 5bbba043eb..e9e9fdcdd2 100644 --- a/internal/observability/metrics/runtime/goroutine/goroutine_test.go +++ b/internal/observability/metrics/runtime/goroutine/goroutine_test.go @@ -24,11 +24,11 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/observability/metrics" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type want struct { want metrics.Metric } @@ -69,7 +69,8 @@ func TestNew(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -84,12 +85,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_goroutines_Measurement(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -154,7 +155,8 @@ func Test_goroutines_Measurement(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -172,12 +174,12 @@ func Test_goroutines_Measurement(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_goroutines_MeasurementWithTags(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -242,7 +244,8 @@ func Test_goroutines_MeasurementWithTags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -260,12 +263,12 @@ func Test_goroutines_MeasurementWithTags(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_goroutines_View(t *testing.T) { + t.Parallel() type fields struct { count metrics.Int64Measure } @@ -316,7 +319,8 @@ func Test_goroutines_View(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -334,7 +338,6 @@ func Test_goroutines_View(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/metrics/version/version.go b/internal/observability/metrics/version/version.go index e850f43d5f..a16643ccb5 100644 --- a/internal/observability/metrics/version/version.go +++ b/internal/observability/metrics/version/version.go @@ -27,9 +27,7 @@ import ( "github.com/vdaas/vald/internal/observability/metrics" ) -var ( - reps = strings.NewReplacer("_", " ", ",omitempty", "") -) +var reps = strings.NewReplacer("_", " ", ",omitempty", "") type version struct { info metrics.Int64Measure @@ -95,7 +93,7 @@ func (v *version) Measurement(ctx context.Context) ([]metrics.Measurement, error func (v *version) MeasurementWithTags(ctx context.Context) ([]metrics.MeasurementWithTags, error) { return []metrics.MeasurementWithTags{ - metrics.MeasurementWithTags{ + { Measurement: v.info.M(int64(1)), Tags: v.kvs, }, @@ -109,7 +107,7 @@ func (v *version) View() []*metrics.View { } return []*metrics.View{ - &metrics.View{ + { Name: "app_version_info", Description: v.info.Description(), TagKeys: keys, diff --git a/internal/observability/metrics/version/version_test.go b/internal/observability/metrics/version/version_test.go index 9378dc6b89..7e134e8307 100644 --- a/internal/observability/metrics/version/version_test.go +++ b/internal/observability/metrics/version/version_test.go @@ -28,6 +28,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { labels []string } @@ -82,6 +83,7 @@ func TestNew(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -97,12 +99,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_labelKVs(t *testing.T) { + t.Parallel() type args struct { labels []string } @@ -157,6 +159,7 @@ func Test_labelKVs(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -172,12 +175,12 @@ func Test_labelKVs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_version_Measurement(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -245,6 +248,7 @@ func Test_version_Measurement(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -264,12 +268,12 @@ func Test_version_Measurement(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_version_MeasurementWithTags(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -337,6 +341,7 @@ func Test_version_MeasurementWithTags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -356,12 +361,12 @@ func Test_version_MeasurementWithTags(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_version_View(t *testing.T) { + t.Parallel() type fields struct { info metrics.Int64Measure kvs map[metrics.Key]string @@ -415,6 +420,7 @@ func Test_version_View(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -434,7 +440,6 @@ func Test_version_View(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/observability_option.go b/internal/observability/observability_option.go index 41d219e168..9e9d955e81 100644 --- a/internal/observability/observability_option.go +++ b/internal/observability/observability_option.go @@ -27,11 +27,9 @@ import ( type Option func(*observability) error -var ( - observabilityDefaultOpts = []Option{ - WithErrGroup(errgroup.Get()), - } -) +var observabilityDefaultOpts = []Option{ + WithErrGroup(errgroup.Get()), +} func WithErrGroup(eg errgroup.Group) Option { return func(o *observability) error { diff --git a/internal/observability/observability_test.go b/internal/observability/observability_test.go index f8acb655c5..0efc41d5ec 100644 --- a/internal/observability/observability_test.go +++ b/internal/observability/observability_test.go @@ -106,7 +106,6 @@ func TestNewWithConfig(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -181,7 +180,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -281,7 +279,6 @@ func Test_observability_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -381,7 +378,6 @@ func Test_observability_Start(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/profiler/stackdriver/stackdriver_option.go b/internal/observability/profiler/stackdriver/stackdriver_option.go index 734cc60004..92f580df2a 100644 --- a/internal/observability/profiler/stackdriver/stackdriver_option.go +++ b/internal/observability/profiler/stackdriver/stackdriver_option.go @@ -24,15 +24,13 @@ import ( type Option func(p *prof) error -var ( - defaultOpts = []Option{ - WithCPUProfiling(true), - WithAllocProfiling(true), - WithHeapProfiling(true), - WithGoroutineProfiling(true), - WithServiceVersion(info.Version), - } -) +var defaultOpts = []Option{ + WithCPUProfiling(true), + WithAllocProfiling(true), + WithHeapProfiling(true), + WithGoroutineProfiling(true), + WithServiceVersion(info.Version), +} func WithProjectID(pid string) Option { return func(p *prof) error { diff --git a/internal/observability/profiler/stackdriver/stackdriver_test.go b/internal/observability/profiler/stackdriver/stackdriver_test.go index aad80e1fa9..725992ef8d 100644 --- a/internal/observability/profiler/stackdriver/stackdriver_test.go +++ b/internal/observability/profiler/stackdriver/stackdriver_test.go @@ -98,7 +98,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotS, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -186,7 +185,6 @@ func Test_prof_Start(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/trace/status_test.go b/internal/observability/trace/status_test.go index 3edc349221..0030c51f47 100644 --- a/internal/observability/trace/status_test.go +++ b/internal/observability/trace/status_test.go @@ -90,7 +90,6 @@ func TestStatusCodeOK(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -160,7 +159,6 @@ func TestStatusCodeCancelled(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -230,7 +228,6 @@ func TestStatusCodeUnknown(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -300,7 +297,6 @@ func TestStatusCodeInvalidArgument(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -370,7 +366,6 @@ func TestStatusCodeDeadlineExceeded(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -440,7 +435,6 @@ func TestStatusCodeNotFound(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -510,7 +504,6 @@ func TestStatusCodeAlreadyExists(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -580,7 +573,6 @@ func TestStatusCodePermissionDenied(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -650,7 +642,6 @@ func TestStatusCodeResourceExhausted(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -720,7 +711,6 @@ func TestStatusCodeFailedPrecondition(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -790,7 +780,6 @@ func TestStatusCodeAborted(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -860,7 +849,6 @@ func TestStatusCodeOutOfRange(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -930,7 +918,6 @@ func TestStatusCodeUnimplemented(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1000,7 +987,6 @@ func TestStatusCodeInternal(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1070,7 +1056,6 @@ func TestStatusCodeUnavailable(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1140,7 +1125,6 @@ func TestStatusCodeDataLoss(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1210,7 +1194,6 @@ func TestStatusCodeUnauthenticated(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/observability/trace/trace_option.go b/internal/observability/trace/trace_option.go index ae7f8b0ad5..56299df2b0 100644 --- a/internal/observability/trace/trace_option.go +++ b/internal/observability/trace/trace_option.go @@ -19,11 +19,9 @@ package trace type TraceOption func(*tracer) -var ( - traceDefaultOpts = []TraceOption{ - WithSamplingRate(1.0), - } -) +var traceDefaultOpts = []TraceOption{ + WithSamplingRate(1.0), +} func WithSamplingRate(rate float64) TraceOption { return func(t *tracer) { diff --git a/internal/observability/trace/trace_test.go b/internal/observability/trace/trace_test.go index 37dcd229e7..515eceb0f6 100644 --- a/internal/observability/trace/trace_test.go +++ b/internal/observability/trace/trace_test.go @@ -101,7 +101,6 @@ func TestStartSpan(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -171,7 +170,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/params/option.go b/internal/params/option.go index c3bd8a9787..3cba16463a 100644 --- a/internal/params/option.go +++ b/internal/params/option.go @@ -19,16 +19,14 @@ package params type Option func(*parser) -var ( - defaultOpts = []Option{ - WithConfigFilePathKeys("f", "file", "c", "config"), - WithConfigFilePathDefault("/etc/server/config.yaml"), - WithConfigFileDescription("config file path"), - WithVersionKeys("v", "ver", "version"), - WithVersionFlagDefault(false), - WithVersionDescription("show server version"), - } -) +var defaultOpts = []Option{ + WithConfigFilePathKeys("f", "file", "c", "config"), + WithConfigFilePathDefault("/etc/server/config.yaml"), + WithConfigFileDescription("config file path"), + WithVersionKeys("v", "ver", "version"), + WithVersionFlagDefault(false), + WithVersionDescription("show server version"), +} // WithConfigFilePathKeys returns Option that sets filePath.keys. func WithConfigFilePathKeys(keys ...string) Option { diff --git a/internal/params/params_test.go b/internal/params/params_test.go index 65f12ab25b..9c3c49763b 100644 --- a/internal/params/params_test.go +++ b/internal/params/params_test.go @@ -25,7 +25,6 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) diff --git a/internal/rand/rand.go b/internal/rand/rand.go index c89cc062c2..fbebb630ac 100644 --- a/internal/rand/rand.go +++ b/internal/rand/rand.go @@ -28,13 +28,11 @@ type rand struct { x *uint32 } -var ( - pool = sync.Pool{ - New: func() interface{} { - return new(rand).init() - }, - } -) +var pool = sync.Pool{ + New: func() interface{} { + return new(rand).init() + }, +} func Uint32() (x uint32) { r := pool.Get().(*rand) diff --git a/internal/runner/option.go b/internal/runner/option.go index f176fb28d3..5b3051241a 100644 --- a/internal/runner/option.go +++ b/internal/runner/option.go @@ -21,9 +21,7 @@ import "github.com/vdaas/vald/internal/config" type Option func(*runner) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithName(name string) Option { return func(r *runner) { diff --git a/internal/runner/option_test.go b/internal/runner/option_test.go index b8239b73de..ad2bfd8e86 100644 --- a/internal/runner/option_test.go +++ b/internal/runner/option_test.go @@ -23,16 +23,13 @@ import ( "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) -var ( - // Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - } -) +// Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), +} func TestWithName(t *testing.T) { type T = runner diff --git a/internal/runner/runner_mock_test.go b/internal/runner/runner_mock_test.go index 7669b637a8..6fce630871 100644 --- a/internal/runner/runner_mock_test.go +++ b/internal/runner/runner_mock_test.go @@ -36,9 +36,11 @@ func (m *runnerMock) Start(ctx context.Context) (<-chan error, error) { func (m *runnerMock) PreStop(ctx context.Context) error { return m.PreStopFunc(ctx) } + func (m *runnerMock) Stop(ctx context.Context) error { return m.StopFunc(ctx) } + func (m *runnerMock) PostStop(ctx context.Context) error { return m.PostStopFunc(ctx) } diff --git a/internal/runner/runner_test.go b/internal/runner/runner_test.go index 1a6b0a745b..46b6264c4e 100644 --- a/internal/runner/runner_test.go +++ b/internal/runner/runner_test.go @@ -27,7 +27,6 @@ import ( "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" - "go.uber.org/goleak" ) diff --git a/internal/safety/safety_test.go b/internal/safety/safety_test.go index 6010e3c9d7..e363c4b066 100644 --- a/internal/safety/safety_test.go +++ b/internal/safety/safety_test.go @@ -24,12 +24,10 @@ import ( "go.uber.org/goleak" ) -var ( - // Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - } -) +// Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), +} func init() { log.Init() @@ -70,7 +68,7 @@ func TestRecoverFunc(t *testing.T) { } tests := []test{ { - name: "returns error when system paniced caused by runtime error", + name: "returns error when system panicked caused by runtime error", args: args{ fn: func() error { _ = []string{}[10] @@ -79,12 +77,12 @@ func TestRecoverFunc(t *testing.T) { }, want: want{ wantPanic: func() error { - return errors.New("system paniced caused by runtime error: runtime error: index out of range [10] with length 0") + return errors.New("system panicked caused by runtime error: runtime error: index out of range [10] with length 0") }, }, }, { - name: "returns error when system paniced caused by panic with string value", + name: "returns error when system panicked caused by panic with string value", args: args{ fn: func() error { panic("panic") @@ -97,7 +95,7 @@ func TestRecoverFunc(t *testing.T) { }, }, { - name: "returns error when system paniced caused by panic with error", + name: "returns error when system panicked caused by panic with error", args: args{ fn: func() error { panic(errors.Errorf("error")) @@ -110,7 +108,7 @@ func TestRecoverFunc(t *testing.T) { }, }, { - name: "returns error when system paniced caused by panic with int value", + name: "returns error when system panicked caused by panic with int value", args: args{ fn: func() error { panic(10) @@ -227,7 +225,6 @@ func TestRecoverWithoutPanicFunc(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -301,7 +298,6 @@ func Test_recoverFunc(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/servers/option.go b/internal/servers/option.go index 0933af33a0..f58f7cc42a 100644 --- a/internal/servers/option.go +++ b/internal/servers/option.go @@ -27,11 +27,9 @@ import ( type Option func(*listener) -var ( - defaultOpts = []Option{ - WithErrorGroup(errgroup.Get()), - } -) +var defaultOpts = []Option{ + WithErrorGroup(errgroup.Get()), +} func WithServer(srv server.Server) Option { return func(l *listener) { diff --git a/internal/servers/server/option.go b/internal/servers/server/option.go index a24384118f..2c13c56811 100644 --- a/internal/servers/server/option.go +++ b/internal/servers/server/option.go @@ -270,6 +270,7 @@ func WithGRPCMaxReceiveMessageSize(size int) Option { } } } + func WithGRPCMaxSendMessageSize(size int) Option { return func(s *server) { if size > 0 || size == -1 { @@ -277,6 +278,7 @@ func WithGRPCMaxSendMessageSize(size int) Option { } } } + func WithGRPCInitialWindowSize(size int) Option { return func(s *server) { if size > 0 || size == -1 { @@ -284,6 +286,7 @@ func WithGRPCInitialWindowSize(size int) Option { } } } + func WithGRPCInitialConnWindowSize(size int) Option { return func(s *server) { if size > 0 || size == -1 { @@ -307,6 +310,7 @@ func WithGRPCKeepaliveMaxConnIdle(max string) Option { s.grpc.keepAlive.maxConnIdle = d } } + func WithGRPCKeepaliveMaxConnAge(max string) Option { return func(s *server) { if len(max) == 0 { @@ -322,6 +326,7 @@ func WithGRPCKeepaliveMaxConnAge(max string) Option { s.grpc.keepAlive.maxConnAge = d } } + func WithGRPCKeepaliveMaxConnAgeGrace(max string) Option { return func(s *server) { if len(max) == 0 { @@ -337,6 +342,7 @@ func WithGRPCKeepaliveMaxConnAgeGrace(max string) Option { s.grpc.keepAlive.maxConnAgeGrace = d } } + func WithGRPCKeepaliveTime(dur string) Option { return func(s *server) { if len(dur) == 0 { @@ -352,6 +358,7 @@ func WithGRPCKeepaliveTime(dur string) Option { s.grpc.keepAlive.t = d } } + func WithGRPCKeepaliveTimeout(dur string) Option { return func(s *server) { if len(dur) == 0 { @@ -367,6 +374,7 @@ func WithGRPCKeepaliveTimeout(dur string) Option { s.grpc.keepAlive.timeout = d } } + func WithGRPCWriteBufferSize(size int) Option { return func(s *server) { if size > 0 || size == -1 { @@ -374,6 +382,7 @@ func WithGRPCWriteBufferSize(size int) Option { } } } + func WithGRPCReadBufferSize(size int) Option { return func(s *server) { if size > 0 || size == -1 { @@ -381,6 +390,7 @@ func WithGRPCReadBufferSize(size int) Option { } } } + func WithGRPCConnectionTimeout(to string) Option { return func(s *server) { if len(to) == 0 { @@ -393,6 +403,7 @@ func WithGRPCConnectionTimeout(to string) Option { s.grpc.opts = append(s.grpc.opts, grpc.ConnectionTimeout(d)) } } + func WithGRPCMaxHeaderListSize(size int) Option { return func(s *server) { if size > 0 { @@ -400,6 +411,7 @@ func WithGRPCMaxHeaderListSize(size int) Option { } } } + func WithGRPCHeaderTableSize(size int) Option { return func(s *server) { if size > 0 { @@ -407,6 +419,7 @@ func WithGRPCHeaderTableSize(size int) Option { } } } + func WithGRPCInterceptors(name ...string) Option { return func(s *server) { // s.grpc.opts = append(s.grpc.opts, grpc.UnaryInterceptor(uint32(size))) diff --git a/internal/servers/server/option_test.go b/internal/servers/server/option_test.go index 0fa906df44..053b84b446 100644 --- a/internal/servers/server/option_test.go +++ b/internal/servers/server/option_test.go @@ -25,9 +25,8 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net" - "google.golang.org/grpc" - "go.uber.org/goleak" + "google.golang.org/grpc" ) func TestWithHost(t *testing.T) { diff --git a/internal/servers/server/server.go b/internal/servers/server/server.go index ae71a41878..2be37e8737 100644 --- a/internal/servers/server/server.go +++ b/internal/servers/server/server.go @@ -307,13 +307,17 @@ func (s *server) Shutdown(ctx context.Context) (rerr error) { s.wg.Done() return err })) - time.Sleep(s.pwt) + tctx, cancel := context.WithTimeout(ctx, s.pwt) + defer cancel() + <-tctx.Done() err := <-ech if err != nil { rerr = err } } else { - time.Sleep(s.pwt) + tctx, cancel := context.WithTimeout(ctx, s.pwt) + defer cancel() + <-tctx.Done() } log.Warnf("%s server %s is now shutting down", s.mode.String(), s.name) diff --git a/internal/servers/server/server_test.go b/internal/servers/server/server_test.go index bf623f62d2..a55c8b6ed0 100644 --- a/internal/servers/server/server_test.go +++ b/internal/servers/server/server_test.go @@ -30,9 +30,8 @@ import ( "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net" "github.com/vdaas/vald/internal/net/tcp" - "google.golang.org/grpc" - "go.uber.org/goleak" + "google.golang.org/grpc" ) func TestString(t *testing.T) { @@ -724,7 +723,6 @@ func Test_mode_String(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -891,7 +889,6 @@ func Test_server_IsRunning(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1058,7 +1055,6 @@ func Test_server_Name(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1238,7 +1234,6 @@ func Test_server_ListenAndServe(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1415,7 +1410,67 @@ func Test_server_Shutdown(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } + }) + } +} + +func TestServerMode_String(t *testing.T) { + type want struct { + want string + } + type test struct { + name string + m ServerMode + want want + checkFunc func(want, string) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got string) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got = %v, want %v", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := test.m.String() + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } }) } } diff --git a/internal/servers/servers_test.go b/internal/servers/servers_test.go index c8c6eec0cd..c2f27ce7d0 100644 --- a/internal/servers/servers_test.go +++ b/internal/servers/servers_test.go @@ -24,7 +24,6 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/servers/server" - "go.uber.org/goleak" ) @@ -454,7 +453,6 @@ func Test_listener_ListenAndServe(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -554,7 +552,6 @@ func Test_listener_Shutdown(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/servers/starter/starter_test.go b/internal/servers/starter/starter_test.go index c7a5592571..d65ecfe2c8 100644 --- a/internal/servers/starter/starter_test.go +++ b/internal/servers/starter/starter_test.go @@ -26,7 +26,6 @@ import ( "github.com/vdaas/vald/internal/servers" "github.com/vdaas/vald/internal/servers/server" "github.com/vdaas/vald/internal/tls" - "go.uber.org/goleak" ) @@ -569,7 +568,6 @@ func Test_srvs_setupAPIs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -677,7 +675,6 @@ func Test_srvs_setupHealthCheck(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -785,7 +782,6 @@ func Test_srvs_setupMetrics(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/singleflight/singleflight.go b/internal/singleflight/singleflight.go index c884f155b0..acd90c95e3 100644 --- a/internal/singleflight/singleflight.go +++ b/internal/singleflight/singleflight.go @@ -62,7 +62,7 @@ func (g *group) Do(ctx context.Context, key string, fn func() (interface{}, erro c.val, c.err = fn() c.wg.Done() - g.m.Delete(key) + g.m.LoadAndDelete(key) return c.val, c.err, atomic.LoadUint64(&c.dups) > 0 } diff --git a/internal/singleflight/singleflight_test.go b/internal/singleflight/singleflight_test.go index 8126a1c6b1..b08292d311 100644 --- a/internal/singleflight/singleflight_test.go +++ b/internal/singleflight/singleflight_test.go @@ -73,7 +73,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/test/testdata.go b/internal/test/testdata.go index 8997b749fd..d713f2c885 100644 --- a/internal/test/testdata.go +++ b/internal/test/testdata.go @@ -21,7 +21,7 @@ import ( "strings" ) -// GetTestdataPath returns the test data file path under `internal/test/data` +// GetTestdataPath returns the test data file path under `internal/test/data`. func GetTestdataPath(filename string) string { fp, _ := filepath.Abs(baseDir() + "/internal/test/data/" + filename) return fp diff --git a/internal/timeutil/location/loc_test.go b/internal/timeutil/location/loc_test.go index 5f01ec79bb..c46bc92758 100644 --- a/internal/timeutil/location/loc_test.go +++ b/internal/timeutil/location/loc_test.go @@ -60,7 +60,7 @@ func TestSet(t *testing.T) { if got == nil { return errors.New("got is nil") } else if got, want := got.String(), locationGMT; got != want { - return errors.Errorf("String() not equals. want: %v, but got: %v") + return errors.Errorf("String() not equals. want: %v, but got: %v", want, got) } return nil }, @@ -73,7 +73,7 @@ func TestSet(t *testing.T) { if got == nil { return errors.New("got is nil") } else if got, want := got.String(), locationGMT; got != want { - return errors.Errorf("String() not equals. want: %v, but got: %v") + return errors.Errorf("String() not equals. want: %v, but got: %v", want, got) } return nil }, @@ -86,7 +86,7 @@ func TestSet(t *testing.T) { if got == nil { return errors.New("got is nil") } else if got, want := got.String(), locationJST; got != want { - return errors.Errorf("String() not equals. want: %v, but got: %v") + return errors.Errorf("String() not equals. want: %v, but got: %v", want, got) } return nil }, @@ -98,8 +98,8 @@ func TestSet(t *testing.T) { checkFunc: func(got *time.Location) error { if got == nil { return errors.New("got is nil") - } else if got, want := got.String(), "Jst"; got != want { - return errors.Errorf("String() not equals. want: %v, but got: %v") + } else if got, want := got.String(), locationJST; got != want { + return errors.Errorf("String() not equals. want: %v, but got: %v", want, got) } return nil }, @@ -111,8 +111,8 @@ func TestSet(t *testing.T) { checkFunc: func(got *time.Location) error { if got == nil { return errors.New("got is nil") - } else if got, want := got.String(), locationTokyo; got != want { - return errors.Errorf("String() not equals. want: %v, but got: %v") + } else if got, want := got.String(), locationJST; got != want { + return errors.Errorf("String() not equals. want: %v, but got: %v", want, got) } return nil }, @@ -124,8 +124,8 @@ func TestSet(t *testing.T) { checkFunc: func(got *time.Location) error { if got == nil { return errors.New("got is nil") - } else if got, want := got.String(), "ASIA/Tokyo"; got != want { - return errors.Errorf("String() not equals. want: %v, but got: %v") + } else if got, want := got.String(), locationJST; got != want { + return errors.Errorf("String() not equals. want: %v, but got: %v", want, got) } return nil }, @@ -138,7 +138,7 @@ func TestSet(t *testing.T) { if got == nil { return errors.New("got is nil") } else if got, want := got.String(), "invalid"; got != want { - return errors.Errorf("String() not equals. want: %v, but got: %v") + return errors.Errorf("String() not equals. want: %v, but got: %v", want, got) } return nil }, diff --git a/internal/timeutil/time_test.go b/internal/timeutil/time_test.go index b43aa5b682..2bf2fe9a2e 100644 --- a/internal/timeutil/time_test.go +++ b/internal/timeutil/time_test.go @@ -25,6 +25,10 @@ import ( "go.uber.org/goleak" ) +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), +} + func TestParse(t *testing.T) { type test struct { name string @@ -79,6 +83,7 @@ func TestParse(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + defer goleak.VerifyNone(t, goleakIgnoreOptions...) got, err := Parse(tt.t) if (err != nil) != tt.wantErr { t.Errorf("Parse() error = %v, wantErr %v", err, tt.wantErr) @@ -148,7 +153,7 @@ func TestParseWithDefault(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) + defer goleak.VerifyNone(tt, goleakIgnoreOptions...) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -163,7 +168,6 @@ func TestParseWithDefault(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/tls/option.go b/internal/tls/option.go index 4f7bf8e656..e75a49a504 100644 --- a/internal/tls/option.go +++ b/internal/tls/option.go @@ -21,7 +21,7 @@ import "crypto/tls" type Option func(*credentials) error -func defaultOptions() []Option { +var defaultOptions = func() []Option { return []Option{ WithTLSConfig(&tls.Config{ MinVersion: tls.VersionTLS12, diff --git a/internal/tls/option_test.go b/internal/tls/option_test.go index 2b5d549bea..d4eacbf673 100644 --- a/internal/tls/option_test.go +++ b/internal/tls/option_test.go @@ -79,7 +79,7 @@ func TestWithCert(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) + defer goleak.VerifyNone(tt, goleakIgnoreOptions...) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -154,7 +154,7 @@ func TestWithKey(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) + defer goleak.VerifyNone(tt, goleakIgnoreOptions...) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -229,7 +229,7 @@ func TestWithCa(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) + defer goleak.VerifyNone(tt, goleakIgnoreOptions...) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -305,7 +305,7 @@ func TestWithTLSConfig(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) + defer goleak.VerifyNone(tt, goleakIgnoreOptions...) if test.beforeFunc != nil { test.beforeFunc(test.args) } diff --git a/internal/tls/tls.go b/internal/tls/tls.go index a2254332b5..64653632b1 100644 --- a/internal/tls/tls.go +++ b/internal/tls/tls.go @@ -20,10 +20,10 @@ package tls import ( "crypto/tls" "crypto/x509" - "io/ioutil" "reflect" "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/io/ioutil" ) type Config = tls.Config @@ -102,9 +102,11 @@ func NewClientConfig(opts ...Option) (*Config, error) { // NewX509CertPool returns *x509.CertPool struct or error. // The CertPool will read the certificate from the path, and append the content to the system certificate pool, and return. -func NewX509CertPool(path string) (*x509.CertPool, error) { - var pool *x509.CertPool +func NewX509CertPool(path string) (pool *x509.CertPool, err error) { c, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } if err == nil && c != nil { pool, err = x509.SystemCertPool() if err != nil || pool == nil { diff --git a/internal/tls/tls_test.go b/internal/tls/tls_test.go index bf25cb4e11..da712a4c71 100644 --- a/internal/tls/tls_test.go +++ b/internal/tls/tls_test.go @@ -22,15 +22,19 @@ import ( "crypto/x509" stderrs "errors" "fmt" - "io/ioutil" "reflect" "testing" "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/io/ioutil" testdata "github.com/vdaas/vald/internal/test" "go.uber.org/goleak" ) +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), +} + func TestNew(t *testing.T) { type args struct { opts []Option @@ -173,7 +177,7 @@ func TestNew(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) + defer goleak.VerifyNone(tt, goleakIgnoreOptions...) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -188,7 +192,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -286,7 +289,7 @@ func TestNewClientConfig(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) + defer goleak.VerifyNone(tt, goleakIgnoreOptions...) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -301,7 +304,6 @@ func TestNewClientConfig(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -413,7 +415,7 @@ func TestNewX509CertPool(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) + defer goleak.VerifyNone(tt, goleakIgnoreOptions...) if test.beforeFunc != nil { test.beforeFunc(test.args) } diff --git a/internal/unit/unit.go b/internal/unit/unit.go index f073952282..27cd88b9d3 100644 --- a/internal/unit/unit.go +++ b/internal/unit/unit.go @@ -21,7 +21,7 @@ import ( "github.com/vdaas/vald/internal/errors" ) -// ParseBytes parses string to uint64 +// ParseBytes parses string to uint64. func ParseBytes(bs string) (bytes uint64, err error) { if bs == "" || bs == "0" { return 0, nil diff --git a/internal/unit/unit_test.go b/internal/unit/unit_test.go index 7b6f360619..ff4c36707a 100644 --- a/internal/unit/unit_test.go +++ b/internal/unit/unit_test.go @@ -113,7 +113,6 @@ func TestParseBytes(t *testing.T) { if err := test.checkFunc(test.want, gotBytes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/worker/queue_option.go b/internal/worker/queue_option.go index 78ea4c353a..f95aee9c9e 100644 --- a/internal/worker/queue_option.go +++ b/internal/worker/queue_option.go @@ -25,13 +25,11 @@ import ( // QueueOption represents the functional option for queue. type QueueOption func(q *queue) error -var ( - defaultQueueOpts = []QueueOption{ - WithQueueBuffer(10), - WithQueueErrGroup(errgroup.Get()), - WithQueueCheckDuration("200ms"), - } -) +var defaultQueueOpts = []QueueOption{ + WithQueueBuffer(10), + WithQueueErrGroup(errgroup.Get()), + WithQueueCheckDuration("200ms"), +} // WithQueueBuffer returns the option to set the buffer for queue. func WithQueueBuffer(buffer int) QueueOption { diff --git a/internal/worker/queue_option_test.go b/internal/worker/queue_option_test.go index 937be81028..6008f22fec 100644 --- a/internal/worker/queue_option_test.go +++ b/internal/worker/queue_option_test.go @@ -18,12 +18,11 @@ package worker import ( + stderrors "errors" "reflect" "testing" "time" - stderrors "errors" - "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "go.uber.org/goleak" diff --git a/internal/worker/queue_test.go b/internal/worker/queue_test.go index 92863f98a7..ed498f6f99 100644 --- a/internal/worker/queue_test.go +++ b/internal/worker/queue_test.go @@ -30,13 +30,11 @@ import ( "go.uber.org/goleak" ) -var ( - // Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. - goleakIgnoreOptions = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), - goleak.IgnoreTopFunction("github.com/vdaas/vald/internal/worker.(*queue).Start.func1"), - } -) +// Goroutine leak is detected by `fastime`, but it should be ignored in the test because it is an external package. +var goleakIgnoreOptions = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/kpango/fastime.(*Fastime).StartTimerD.func1"), + goleak.IgnoreTopFunction("github.com/vdaas/vald/internal/worker.(*queue).Start.func1"), +} func TestNewQueue(t *testing.T) { type args struct { @@ -149,7 +147,6 @@ func TestNewQueue(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -364,7 +361,6 @@ func Test_queue_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -426,7 +422,6 @@ func Test_queue_isRunning(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -695,7 +690,6 @@ func Test_queue_Pop(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -892,7 +886,6 @@ func Test_queue_pop(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/internal/worker/worker_option.go b/internal/worker/worker_option.go index 4c5d9a3865..5ae5bd94df 100644 --- a/internal/worker/worker_option.go +++ b/internal/worker/worker_option.go @@ -23,13 +23,11 @@ import ( type WorkerOption func(w *worker) error -var ( - defaultWorkerOpts = []WorkerOption{ - WithName("worker"), - WithLimitation(10), - WithErrGroup(errgroup.Get()), - } -) +var defaultWorkerOpts = []WorkerOption{ + WithName("worker"), + WithLimitation(10), + WithErrGroup(errgroup.Get()), +} func WithName(name string) WorkerOption { return func(w *worker) error { diff --git a/internal/worker/worker_option_test.go b/internal/worker/worker_option_test.go index 6dfd96aa3c..0b31bd1598 100644 --- a/internal/worker/worker_option_test.go +++ b/internal/worker/worker_option_test.go @@ -21,7 +21,6 @@ import ( "testing" "github.com/vdaas/vald/internal/errgroup" - "go.uber.org/goleak" ) diff --git a/internal/worker/worker_test.go b/internal/worker/worker_test.go index ea1fa55937..e6e385473c 100644 --- a/internal/worker/worker_test.go +++ b/internal/worker/worker_test.go @@ -28,7 +28,6 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/test/comparator" - "go.uber.org/goleak" ) @@ -165,7 +164,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -314,7 +312,6 @@ func Test_worker_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -573,7 +570,6 @@ func Test_worker_startJobLoop(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -806,7 +802,6 @@ func Test_worker_IsRunning(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -878,7 +873,6 @@ func Test_worker_Name(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -954,7 +948,6 @@ func Test_worker_Len(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1026,7 +1019,6 @@ func Test_worker_TotalRequested(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1098,7 +1090,6 @@ func Test_worker_TotalCompleted(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } @@ -1251,7 +1242,6 @@ func Test_worker_Dispatch(t *testing.T) { if err := test.checkFunc(w, test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/k8s/debug/kind/config.yaml b/k8s/debug/kind/config.yaml index 9b7364754f..3fd3962d7d 100644 --- a/k8s/debug/kind/config.yaml +++ b/k8s/debug/kind/config.yaml @@ -19,6 +19,19 @@ apiVersion: kind.x-k8s.io/v1alpha4 # AllBeta: false nodes: - role: control-plane + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + extraPortMappings: + - containerPort: 80 + hostPort: 8080 + protocol: TCP + - containerPort: 443 + hostPort: 4443 + protocol: TCP - role: worker - role: worker - role: worker diff --git a/k8s/jobs/db/initialize/cassandra/configmap.yaml b/k8s/jobs/db/initialize/cassandra/configmap.yaml index a192f847ac..e6345d3935 100644 --- a/k8s/jobs/db/initialize/cassandra/configmap.yaml +++ b/k8s/jobs/db/initialize/cassandra/configmap.yaml @@ -39,14 +39,13 @@ data: ); // backup - DROP TABLE IF EXISTS vald.meta_vector; - CREATE TABLE vald.meta_vector ( + DROP TABLE IF EXISTS vald.backup_vector; + CREATE TABLE vald.backup_vector ( uuid text, vector blob, - meta text, ips list, PRIMARY KEY (uuid) ); - DROP INDEX IF EXISTS vald.meta_vector; - CREATE INDEX ON vald.meta_vector (ips); + DROP INDEX IF EXISTS vald.backup_vector; + CREATE INDEX ON vald.backup_vector (ips); diff --git a/k8s/jobs/db/initialize/mysql/configmap.yaml b/k8s/jobs/db/initialize/mysql/configmap.yaml index ad50e7c47a..88382fbf8e 100644 --- a/k8s/jobs/db/initialize/mysql/configmap.yaml +++ b/k8s/jobs/db/initialize/mysql/configmap.yaml @@ -25,10 +25,9 @@ data: USE `vald` ; - CREATE TABLE IF NOT EXISTS `vald`.`meta_vector` ( + CREATE TABLE IF NOT EXISTS `vald`.`backup_vector` ( `uuid` VARCHAR(255) NOT NULL, `vector` BLOB NOT NULL, - `meta` VARCHAR(1024) NOT NULL, `id` int NOT NULL AUTO_INCREMENT, PRIMARY KEY (`uuid`), UNIQUE INDEX `id_unique` (`id` ASC), diff --git a/pkg/agent/core/ngt/config/config_test.go b/pkg/agent/core/ngt/config/config_test.go index 4b07b57620..e90809127a 100644 --- a/pkg/agent/core/ngt/config/config_test.go +++ b/pkg/agent/core/ngt/config/config_test.go @@ -26,6 +26,7 @@ import ( ) func TestNewConfig(t *testing.T) { + t.Parallel() type args struct { path string } @@ -78,8 +79,10 @@ func TestNewConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -95,7 +98,6 @@ func TestNewConfig(t *testing.T) { if err := test.checkFunc(test.want, gotCfg, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/core/ngt/handler/grpc/handler.go b/pkg/agent/core/ngt/handler/grpc/handler.go index dc2895e26b..09071050a5 100644 --- a/pkg/agent/core/ngt/handler/grpc/handler.go +++ b/pkg/agent/core/ngt/handler/grpc/handler.go @@ -21,26 +21,38 @@ import ( "context" "fmt" "strconv" + "sync" - agent "github.com/vdaas/vald/apis/grpc/agent/core" - "github.com/vdaas/vald/apis/grpc/payload" + agent "github.com/vdaas/vald/apis/grpc/v1/agent/core" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/net/grpc/status" "github.com/vdaas/vald/internal/observability/trace" + "github.com/vdaas/vald/internal/safety" "github.com/vdaas/vald/pkg/agent/core/ngt/model" "github.com/vdaas/vald/pkg/agent/core/ngt/service" ) -type Server agent.AgentServer +type Server interface { + agent.AgentServer + vald.Server +} type server struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } +const apiName = "vald/agent-ngt" + func New(opts ...Option) Server { s := new(server) @@ -50,8 +62,33 @@ func New(opts ...Option) Server { return s } +func (s *server) newLocations(uuids ...string) (locs *payload.Object_Locations) { + if len(uuids) == 0 { + return nil + } + locs = &payload.Object_Locations{ + Locations: make([]*payload.Object_Location, 0, len(uuids)), + } + for _, uuid := range uuids { + locs.Locations = append(locs.Locations, &payload.Object_Location{ + Name: s.name, + Uuid: uuid, + Ips: []string{s.ip}, + }) + } + return locs +} + +func (s *server) newLocation(uuid string) *payload.Object_Location { + locs := s.newLocations(uuid) + if locs != nil && locs.Locations != nil && len(locs.Locations) > 0 { + return locs.Locations[0] + } + return nil +} + func (s *server) Exists(ctx context.Context, uid *payload.Object_ID) (res *payload.Object_ID, err error) { - ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.Exists") + ctx, span := trace.StartSpan(ctx, apiName+".Exists") defer func() { if span != nil { span.End() @@ -67,13 +104,13 @@ func (s *server) Exists(ctx context.Context, uid *payload.Object_ID) (res *paylo } return nil, status.WrapWithNotFound(fmt.Sprintf("Exists API uuid %s's oid not found", uuid), err, info.Get()) } - res = new(payload.Object_ID) - res.Id = strconv.Itoa(int(oid)) - return res, nil + return &payload.Object_ID{ + Id: strconv.Itoa(int(oid)), + }, nil } func (s *server) Search(ctx context.Context, req *payload.Search_Request) (*payload.Search_Response, error) { - ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.Search") + ctx, span := trace.StartSpan(ctx, apiName+".Search") defer func() { if span != nil { span.End() @@ -88,7 +125,7 @@ func (s *server) Search(ctx context.Context, req *payload.Search_Request) (*payl } func (s *server) SearchByID(ctx context.Context, req *payload.Search_IDRequest) (*payload.Search_Response, error) { - ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.SearchByID") + ctx, span := trace.StartSpan(ctx, apiName+".SearchByID") defer func() { if span != nil { span.End() @@ -118,8 +155,8 @@ func toSearchResponse(dists []model.Distance, err error) (res *payload.Search_Re return res, err } -func (s *server) StreamSearch(stream agent.Agent_StreamSearchServer) error { - ctx, span := trace.StartSpan(stream.Context(), "vald/agent-ngt.StreamSearch") +func (s *server) StreamSearch(stream vald.Search_StreamSearchServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamSearch") defer func() { if span != nil { span.End() @@ -132,8 +169,8 @@ func (s *server) StreamSearch(stream agent.Agent_StreamSearchServer) error { }) } -func (s *server) StreamSearchByID(stream agent.Agent_StreamSearchByIDServer) error { - ctx, span := trace.StartSpan(stream.Context(), "vald/agent-ngt.StreamSearchByID") +func (s *server) StreamSearchByID(stream vald.Search_StreamSearchByIDServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamSearchByID") defer func() { if span != nil { span.End() @@ -146,13 +183,86 @@ func (s *server) StreamSearchByID(stream agent.Agent_StreamSearchByIDServer) err }) } -func (s *server) Insert(ctx context.Context, vec *payload.Object_Vector) (res *payload.Empty, err error) { - ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.Insert") +func (s *server) MultiSearch(ctx context.Context, reqs *payload.Search_MultiRequest) (res *payload.Search_Responses, errs error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiSearch") defer func() { if span != nil { span.End() } }() + + res = &payload.Search_Responses{ + Responses: make([]*payload.Search_Response, len(reqs.Requests)), + } + var wg sync.WaitGroup + var mu sync.Mutex + for i, req := range reqs.Requests { + idx, query := i, req + wg.Add(1) + s.eg.Go(func() error { + defer wg.Done() + r, err := s.Search(ctx, query) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + mu.Lock() + errs = errors.Wrap(errs, status.WrapWithNotFound(fmt.Sprintf("MultiSearch API vector %v's search request result not found", query.GetVector()), err, info.Get()).Error()) + mu.Unlock() + return nil + } + res.Responses[idx] = r + return nil + }) + } + wg.Wait() + return res, errs +} + +func (s *server) MultiSearchByID(ctx context.Context, reqs *payload.Search_MultiIDRequest) (res *payload.Search_Responses, errs error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiSearchByID") + defer func() { + if span != nil { + span.End() + } + }() + + res = &payload.Search_Responses{ + Responses: make([]*payload.Search_Response, len(reqs.Requests)), + } + var wg sync.WaitGroup + var mu sync.Mutex + for i, req := range reqs.Requests { + idx, query := i, req + wg.Add(1) + s.eg.Go(func() error { + defer wg.Done() + r, err := s.SearchByID(ctx, query) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + mu.Lock() + errs = errors.Wrap(errs, status.WrapWithNotFound(fmt.Sprintf("MultiSearchByID API uuid %v's search by id request result not found", query.GetId()), err, info.Get()).Error()) + mu.Unlock() + return nil + } + res.Responses[idx] = r + return nil + }) + } + wg.Wait() + return res, errs +} + +func (s *server) Insert(ctx context.Context, req *payload.Insert_Request) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Insert") + defer func() { + if span != nil { + span.End() + } + }() + vec := req.GetVector() err = s.ngt.Insert(vec.GetId(), vec.GetVector()) if err != nil { log.Errorf("[Insert]\tUnknown error\t%+v", err) @@ -161,33 +271,36 @@ func (s *server) Insert(ctx context.Context, vec *payload.Object_Vector) (res *p } return nil, status.WrapWithInternal(fmt.Sprintf("Insert API failed to insert %#v", vec), err, info.Get()) } - return new(payload.Empty), nil + return s.newLocation(vec.GetId()), nil } -func (s *server) StreamInsert(stream agent.Agent_StreamInsertServer) error { - ctx, span := trace.StartSpan(stream.Context(), "vald/agent-ngt.StreamInsert") +func (s *server) StreamInsert(stream vald.Insert_StreamInsertServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamInsert") defer func() { if span != nil { span.End() } }() return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, - func() interface{} { return new(payload.Object_Vector) }, + func() interface{} { return new(payload.Insert_Request) }, func(ctx context.Context, data interface{}) (interface{}, error) { - return s.Insert(ctx, data.(*payload.Object_Vector)) + return s.Insert(ctx, data.(*payload.Insert_Request)) }) } -func (s *server) MultiInsert(ctx context.Context, vecs *payload.Object_Vectors) (res *payload.Empty, err error) { - ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.MultiInsert") +func (s *server) MultiInsert(ctx context.Context, reqs *payload.Insert_MultiRequest) (res *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiInsert") defer func() { if span != nil { span.End() } }() - vmap := make(map[string][]float32, len(vecs.GetVectors())) - for _, vec := range vecs.GetVectors() { + uuids := make([]string, 0, len(reqs.GetRequests())) + vmap := make(map[string][]float32, len(reqs.GetRequests())) + for _, req := range reqs.GetRequests() { + vec := req.GetVector() vmap[vec.GetId()] = vec.GetVector() + uuids = append(uuids, vec.GetId()) } err = s.ngt.InsertMultiple(vmap) if err != nil { @@ -197,17 +310,17 @@ func (s *server) MultiInsert(ctx context.Context, vecs *payload.Object_Vectors) } return nil, status.WrapWithInternal(fmt.Sprintf("MultiInsert API failed insert %#v", vmap), err, info.Get()) } - return new(payload.Empty), nil + return s.newLocations(uuids...), nil } -func (s *server) Update(ctx context.Context, vec *payload.Object_Vector) (res *payload.Empty, err error) { - ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.Update") +func (s *server) Update(ctx context.Context, req *payload.Update_Request) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Update") defer func() { if span != nil { span.End() } }() - res = new(payload.Empty) + vec := req.GetVector() err = s.ngt.Update(vec.GetId(), vec.GetVector()) if err != nil { log.Errorf("[Update]\tUnknown error\t%+v", err) @@ -216,35 +329,37 @@ func (s *server) Update(ctx context.Context, vec *payload.Object_Vector) (res *p } return nil, status.WrapWithInternal(fmt.Sprintf("Update API failed to update %#v", vec), err, info.Get()) } - return res, nil + return s.newLocation(vec.GetId()), nil } -func (s *server) StreamUpdate(stream agent.Agent_StreamUpdateServer) error { - ctx, span := trace.StartSpan(stream.Context(), "vald/agent-ngt.StreamUpdate") +func (s *server) StreamUpdate(stream vald.Update_StreamUpdateServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamUpdate") defer func() { if span != nil { span.End() } }() return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, - func() interface{} { return new(payload.Object_Vector) }, + func() interface{} { return new(payload.Update_Request) }, func(ctx context.Context, data interface{}) (interface{}, error) { - return s.Update(ctx, data.(*payload.Object_Vector)) + return s.Update(ctx, data.(*payload.Update_Request)) }) } -func (s *server) MultiUpdate(ctx context.Context, vecs *payload.Object_Vectors) (res *payload.Empty, err error) { - ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.MultiUpdate") +func (s *server) MultiUpdate(ctx context.Context, reqs *payload.Update_MultiRequest) (res *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiUpdate") defer func() { if span != nil { span.End() } }() - res = new(payload.Empty) - vmap := make(map[string][]float32, len(vecs.GetVectors())) - for _, vec := range vecs.GetVectors() { + uuids := make([]string, 0, len(reqs.GetRequests())) + vmap := make(map[string][]float32, len(reqs.GetRequests())) + for _, req := range reqs.GetRequests() { + vec := req.GetVector() vmap[vec.GetId()] = vec.GetVector() + uuids = append(uuids, vec.GetId()) } err = s.ngt.UpdateMultiple(vmap) @@ -255,17 +370,107 @@ func (s *server) MultiUpdate(ctx context.Context, vecs *payload.Object_Vectors) } return nil, status.WrapWithInternal(fmt.Sprintf("MultiUpdate API failed to update %#v", vmap), err, info.Get()) } - return res, err + return s.newLocations(uuids...), nil } -func (s *server) Remove(ctx context.Context, id *payload.Object_ID) (res *payload.Empty, err error) { - ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.Remove") +func (s *server) Upsert(ctx context.Context, req *payload.Upsert_Request) (*payload.Object_Location, error) { + ctx, span := trace.StartSpan(ctx, apiName+".Upsert") defer func() { if span != nil { span.End() } }() - res = new(payload.Empty) + + _, exists := s.ngt.Exists(req.GetVector().GetId()) + if exists { + return s.Update(ctx, &payload.Update_Request{ + Vector: req.GetVector(), + }) + } + return s.Insert(ctx, &payload.Insert_Request{ + Vector: req.GetVector(), + }) +} + +func (s *server) StreamUpsert(stream vald.Upsert_StreamUpsertServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamUpsert") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Upsert_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Upsert(ctx, data.(*payload.Upsert_Request)) + }) +} + +func (s *server) MultiUpsert(ctx context.Context, reqs *payload.Upsert_MultiRequest) (res *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiUpsert") + defer func() { + if span != nil { + span.End() + } + }() + + insertReqs := make([]*payload.Insert_Request, 0, len(reqs.GetRequests())) + updateReqs := make([]*payload.Update_Request, 0, len(reqs.GetRequests())) + + for _, req := range reqs.GetRequests() { + vec := req.GetVector() + _, exists := s.ngt.Exists(vec.GetId()) + if exists { + updateReqs = append(updateReqs, &payload.Update_Request{ + Vector: vec, + }) + } else { + insertReqs = append(insertReqs, &payload.Insert_Request{ + Vector: vec, + }) + } + } + + var ures, ires *payload.Object_Locations + + eg, ectx := errgroup.New(ctx) + eg.Go(safety.RecoverFunc(func() error { + var err error + if len(updateReqs) > 0 { + ures, err = s.MultiUpdate(ectx, &payload.Update_MultiRequest{ + Requests: updateReqs, + }) + } + return err + })) + + eg.Go(safety.RecoverFunc(func() error { + var err error + if len(insertReqs) > 0 { + ires, err = s.MultiInsert(ectx, &payload.Insert_MultiRequest{ + Requests: insertReqs, + }) + } + return err + })) + + if err = eg.Wait(); err != nil { + return nil, status.WrapWithInternal("MultiUpsert API failed", err, info.Get()) + } + + return &payload.Object_Locations{ + Locations: append(ures.Locations, ires.Locations...), + }, nil +} + +func (s *server) Remove(ctx context.Context, req *payload.Remove_Request) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Remove") + defer func() { + if span != nil { + span.End() + } + }() + id := req.GetId() uuid := id.GetId() err = s.ngt.Delete(uuid) if err != nil { @@ -275,32 +480,34 @@ func (s *server) Remove(ctx context.Context, id *payload.Object_ID) (res *payloa } return nil, status.WrapWithInternal(fmt.Sprintf("Remove API failed to delete uuid %s", uuid), err, info.Get()) } - return res, nil + return s.newLocation(uuid), nil } -func (s *server) StreamRemove(stream agent.Agent_StreamRemoveServer) error { - ctx, span := trace.StartSpan(stream.Context(), "vald/agent-ngt.StreamRemove") +func (s *server) StreamRemove(stream vald.Remove_StreamRemoveServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamRemove") defer func() { if span != nil { span.End() } }() return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, - func() interface{} { return new(payload.Object_ID) }, + func() interface{} { return new(payload.Remove_Request) }, func(ctx context.Context, data interface{}) (interface{}, error) { - return s.Remove(ctx, data.(*payload.Object_ID)) + return s.Remove(ctx, data.(*payload.Remove_Request)) }) } -func (s *server) MultiRemove(ctx context.Context, ids *payload.Object_IDs) (res *payload.Empty, err error) { - ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.MultiRemove") +func (s *server) MultiRemove(ctx context.Context, reqs *payload.Remove_MultiRequest) (res *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiRemove") defer func() { if span != nil { span.End() } }() - res = new(payload.Empty) - uuids := ids.GetIds() + uuids := make([]string, 0, len(reqs.GetRequests())) + for _, req := range reqs.GetRequests() { + uuids = append(uuids, req.GetId().GetId()) + } err = s.ngt.DeleteMultiple(uuids...) if err != nil { log.Errorf("[MultiRemove]\tUnknown error\t%+v", err) @@ -309,11 +516,11 @@ func (s *server) MultiRemove(ctx context.Context, ids *payload.Object_IDs) (res } return nil, status.WrapWithInternal(fmt.Sprintf("MultiUpdate API failed to delete %#v", uuids), err, info.Get()) } - return res, nil + return s.newLocations(uuids...), nil } func (s *server) GetObject(ctx context.Context, id *payload.Object_ID) (res *payload.Object_Vector, err error) { - ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.GetObject") + ctx, span := trace.StartSpan(ctx, apiName+".GetObject") defer func() { if span != nil { span.End() @@ -334,8 +541,8 @@ func (s *server) GetObject(ctx context.Context, id *payload.Object_ID) (res *pay }, nil } -func (s *server) StreamGetObject(stream agent.Agent_StreamGetObjectServer) error { - ctx, span := trace.StartSpan(stream.Context(), "vald/agent-ngt.StreamGetObject") +func (s *server) StreamGetObject(stream vald.Object_StreamGetObjectServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamGetObject") defer func() { if span != nil { span.End() @@ -349,7 +556,7 @@ func (s *server) StreamGetObject(stream agent.Agent_StreamGetObjectServer) error } func (s *server) CreateIndex(ctx context.Context, c *payload.Control_CreateIndexRequest) (res *payload.Empty, err error) { - ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.CreateIndex") + ctx, span := trace.StartSpan(ctx, apiName+".CreateIndex") defer func() { if span != nil { span.End() @@ -376,7 +583,7 @@ func (s *server) CreateIndex(ctx context.Context, c *payload.Control_CreateIndex } func (s *server) SaveIndex(ctx context.Context, _ *payload.Empty) (res *payload.Empty, err error) { - ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.SaveIndex") + ctx, span := trace.StartSpan(ctx, apiName+".SaveIndex") defer func() { if span != nil { span.End() @@ -395,7 +602,7 @@ func (s *server) SaveIndex(ctx context.Context, _ *payload.Empty) (res *payload. } func (s *server) CreateAndSaveIndex(ctx context.Context, c *payload.Control_CreateIndexRequest) (res *payload.Empty, err error) { - ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.CreateAndSaveIndex") + ctx, span := trace.StartSpan(ctx, apiName+".CreateAndSaveIndex") defer func() { if span != nil { span.End() @@ -414,7 +621,7 @@ func (s *server) CreateAndSaveIndex(ctx context.Context, c *payload.Control_Crea } func (s *server) IndexInfo(ctx context.Context, _ *payload.Empty) (res *payload.Info_Index_Count, err error) { - ctx, span := trace.StartSpan(ctx, "vald/agent-ngt.IndexInfo") + ctx, span := trace.StartSpan(ctx, apiName+".IndexInfo") defer func() { if span != nil { span.End() diff --git a/pkg/agent/core/ngt/handler/grpc/handler_test.go b/pkg/agent/core/ngt/handler/grpc/handler_test.go index 05f77f00a2..72bb64eccd 100644 --- a/pkg/agent/core/ngt/handler/grpc/handler_test.go +++ b/pkg/agent/core/ngt/handler/grpc/handler_test.go @@ -22,8 +22,9 @@ import ( "reflect" "testing" - agent "github.com/vdaas/vald/apis/grpc/agent/core" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/pkg/agent/core/ngt/model" "github.com/vdaas/vald/pkg/agent/core/ngt/service" @@ -31,6 +32,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -79,8 +81,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -96,40 +100,751 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } + }) + } +} + +func Test_server_newLocations(t *testing.T) { + t.Parallel() + type args struct { + uuids []string + } + type fields struct { + name string + ip string + ngt service.NGT + eg errgroup.Group + streamConcurrency int + } + type want struct { + wantLocs *payload.Object_Locations + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, *payload.Object_Locations) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotLocs *payload.Object_Locations) error { + if !reflect.DeepEqual(gotLocs, w.wantLocs) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLocs, w.wantLocs) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + uuids: nil, + }, + fields: fields { + name: "", + ip: "", + ngt: nil, + eg: nil, + streamConcurrency: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + uuids: nil, + }, + fields: fields { + name: "", + ip: "", + ngt: nil, + eg: nil, + streamConcurrency: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &server{ + name: test.fields.name, + ip: test.fields.ip, + ngt: test.fields.ngt, + eg: test.fields.eg, + streamConcurrency: test.fields.streamConcurrency, + } + + gotLocs := s.newLocations(test.args.uuids...) + if err := test.checkFunc(test.want, gotLocs); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_server_newLocation(t *testing.T) { + t.Parallel() + type args struct { + uuid string + } + type fields struct { + name string + ip string + ngt service.NGT + eg errgroup.Group + streamConcurrency int + } + type want struct { + want *payload.Object_Location + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, *payload.Object_Location) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got *payload.Object_Location) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + uuid: "", + }, + fields: fields { + name: "", + ip: "", + ngt: nil, + eg: nil, + streamConcurrency: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + uuid: "", + }, + fields: fields { + name: "", + ip: "", + ngt: nil, + eg: nil, + streamConcurrency: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &server{ + name: test.fields.name, + ip: test.fields.ip, + ngt: test.fields.ngt, + eg: test.fields.eg, + streamConcurrency: test.fields.streamConcurrency, + } + + got := s.newLocation(test.args.uuid) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_server_Exists(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + uid *payload.Object_ID + } + type fields struct { + name string + ip string + ngt service.NGT + eg errgroup.Group + streamConcurrency int + } + type want struct { + wantRes *payload.Object_ID + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, *payload.Object_ID, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotRes *payload.Object_ID, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + uid: nil, + }, + fields: fields { + name: "", + ip: "", + ngt: nil, + eg: nil, + streamConcurrency: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + uid: nil, + }, + fields: fields { + name: "", + ip: "", + ngt: nil, + eg: nil, + streamConcurrency: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &server{ + name: test.fields.name, + ip: test.fields.ip, + ngt: test.fields.ngt, + eg: test.fields.eg, + streamConcurrency: test.fields.streamConcurrency, + } + + gotRes, err := s.Exists(test.args.ctx, test.args.uid) + if err := test.checkFunc(test.want, gotRes, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_server_Search(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + req *payload.Search_Request + } + type fields struct { + name string + ip string + ngt service.NGT + eg errgroup.Group + streamConcurrency int + } + type want struct { + want *payload.Search_Response + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, *payload.Search_Response, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got *payload.Search_Response, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + req: nil, + }, + fields: fields { + name: "", + ip: "", + ngt: nil, + eg: nil, + streamConcurrency: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + req: nil, + }, + fields: fields { + name: "", + ip: "", + ngt: nil, + eg: nil, + streamConcurrency: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &server{ + name: test.fields.name, + ip: test.fields.ip, + ngt: test.fields.ngt, + eg: test.fields.eg, + streamConcurrency: test.fields.streamConcurrency, + } + + got, err := s.Search(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_server_SearchByID(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + req *payload.Search_IDRequest + } + type fields struct { + name string + ip string + ngt service.NGT + eg errgroup.Group + streamConcurrency int + } + type want struct { + want *payload.Search_Response + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, *payload.Search_Response, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got *payload.Search_Response, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + req: nil, + }, + fields: fields { + name: "", + ip: "", + ngt: nil, + eg: nil, + streamConcurrency: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + req: nil, + }, + fields: fields { + name: "", + ip: "", + ngt: nil, + eg: nil, + streamConcurrency: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &server{ + name: test.fields.name, + ip: test.fields.ip, + ngt: test.fields.ngt, + eg: test.fields.eg, + streamConcurrency: test.fields.streamConcurrency, + } + + got, err := s.SearchByID(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_toSearchResponse(t *testing.T) { + t.Parallel() + type args struct { + dists []model.Distance + err error + } + type want struct { + wantRes *payload.Search_Response + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, *payload.Search_Response, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + dists: nil, + err: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + dists: nil, + err: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + gotRes, err := toSearchResponse(test.args.dists, test.args.err) + if err := test.checkFunc(test.want, gotRes, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_server_StreamSearch(t *testing.T) { + t.Parallel() + type args struct { + stream vald.Search_StreamSearchServer + } + type fields struct { + name string + ip string + ngt service.NGT + eg errgroup.Group + streamConcurrency int + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + stream: nil, + }, + fields: fields { + name: "", + ip: "", + ngt: nil, + eg: nil, + streamConcurrency: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + stream: nil, + }, + fields: fields { + name: "", + ip: "", + ngt: nil, + eg: nil, + streamConcurrency: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &server{ + name: test.fields.name, + ip: test.fields.ip, + ngt: test.fields.ngt, + eg: test.fields.eg, + streamConcurrency: test.fields.streamConcurrency, + } + err := s.StreamSearch(test.args.stream) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } }) } } -func Test_server_Exists(t *testing.T) { +func Test_server_StreamSearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - uid *payload.Object_ID + stream vald.Search_StreamSearchByIDServer } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { - wantRes *payload.Object_ID - err error + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *payload.Object_ID, error) error + checkFunc func(want, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Object_ID, err error) error { + defaultCheckFunc := func(w want, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(gotRes, w.wantRes) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) - } return nil } tests := []test{ @@ -138,11 +853,13 @@ func Test_server_Exists(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - uid: nil, + stream: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -156,11 +873,13 @@ func Test_server_Exists(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - uid: nil, + stream: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -170,8 +889,10 @@ func Test_server_Exists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -183,47 +904,53 @@ func Test_server_Exists(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } - gotRes, err := s.Exists(test.args.ctx, test.args.uid) - if err := test.checkFunc(test.want, gotRes, err); err != nil { + err := s.StreamSearchByID(test.args.stream) + if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_server_Search(t *testing.T) { +func Test_server_MultiSearch(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *payload.Search_Request + ctx context.Context + reqs *payload.Search_MultiRequest } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { - want *payload.Search_Response - err error + wantRes *payload.Search_Responses + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *payload.Search_Response, error) error + checkFunc func(want, *payload.Search_Responses, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *payload.Search_Response, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Responses, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) } return nil } @@ -234,10 +961,13 @@ func Test_server_Search(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -252,10 +982,13 @@ func Test_server_Search(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -265,8 +998,10 @@ func Test_server_Search(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -278,47 +1013,53 @@ func Test_server_Search(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } - got, err := s.Search(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotRes, err := s.MultiSearch(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_server_SearchByID(t *testing.T) { +func Test_server_MultiSearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *payload.Search_IDRequest + ctx context.Context + reqs *payload.Search_MultiIDRequest } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { - want *payload.Search_Response - err error + wantRes *payload.Search_Responses + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *payload.Search_Response, error) error + checkFunc func(want, *payload.Search_Responses, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *payload.Search_Response, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Responses, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) } return nil } @@ -329,10 +1070,13 @@ func Test_server_SearchByID(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -347,10 +1091,13 @@ func Test_server_SearchByID(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -360,8 +1107,10 @@ func Test_server_SearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -373,37 +1122,48 @@ func Test_server_SearchByID(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } - got, err := s.SearchByID(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotRes, err := s.MultiSearchByID(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_toSearchResponse(t *testing.T) { +func Test_server_Insert(t *testing.T) { + t.Parallel() type args struct { - dists []model.Distance - err error + ctx context.Context + req *payload.Insert_Request + } + type fields struct { + name string + ip string + ngt service.NGT + eg errgroup.Group + streamConcurrency int } type want struct { - wantRes *payload.Search_Response + wantRes *payload.Object_Location err error } type test struct { name string args args + fields fields want want - checkFunc func(want, *payload.Search_Response, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -418,8 +1178,15 @@ func Test_toSearchResponse(t *testing.T) { { name: "test_case_1", args: args { - dists: nil, - err: nil, + ctx: nil, + req: nil, + }, + fields: fields { + name: "", + ip: "", + ngt: nil, + eg: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -432,8 +1199,15 @@ func Test_toSearchResponse(t *testing.T) { return test { name: "test_case_2", args: args { - dists: nil, - err: nil, + ctx: nil, + req: nil, + }, + fields: fields { + name: "", + ip: "", + ngt: nil, + eg: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -442,8 +1216,10 @@ func Test_toSearchResponse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -454,22 +1230,32 @@ func Test_toSearchResponse(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } + s := &server{ + name: test.fields.name, + ip: test.fields.ip, + ngt: test.fields.ngt, + eg: test.fields.eg, + streamConcurrency: test.fields.streamConcurrency, + } - gotRes, err := toSearchResponse(test.args.dists, test.args.err) + gotRes, err := s.Insert(test.args.ctx, test.args.req) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_server_StreamSearch(t *testing.T) { +func Test_server_StreamInsert(t *testing.T) { + t.Parallel() type args struct { - stream agent.Agent_StreamSearchServer + stream vald.Insert_StreamInsertServer } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { @@ -499,7 +1285,10 @@ func Test_server_StreamSearch(t *testing.T) { stream: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -516,7 +1305,10 @@ func Test_server_StreamSearch(t *testing.T) { stream: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -526,8 +1318,10 @@ func Test_server_StreamSearch(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -539,43 +1333,54 @@ func Test_server_StreamSearch(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } - err := s.StreamSearch(test.args.stream) + err := s.StreamInsert(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_server_StreamSearchByID(t *testing.T) { +func Test_server_MultiInsert(t *testing.T) { + t.Parallel() type args struct { - stream agent.Agent_StreamSearchByIDServer + ctx context.Context + reqs *payload.Insert_MultiRequest } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { - err error + wantRes *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -584,10 +1389,14 @@ func Test_server_StreamSearchByID(t *testing.T) { { name: "test_case_1", args: args { - stream: nil, + ctx: nil, + reqs: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -601,10 +1410,14 @@ func Test_server_StreamSearchByID(t *testing.T) { return test { name: "test_case_2", args: args { - stream: nil, + ctx: nil, + reqs: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -614,8 +1427,10 @@ func Test_server_StreamSearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -627,30 +1442,36 @@ func Test_server_StreamSearchByID(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } - err := s.StreamSearchByID(test.args.stream) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.MultiInsert(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_server_Insert(t *testing.T) { +func Test_server_Update(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - vec *payload.Object_Vector + req *payload.Update_Request } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { - wantRes *payload.Empty + wantRes *payload.Object_Location err error } type test struct { @@ -658,11 +1479,11 @@ func Test_server_Insert(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Empty, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Empty, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -678,10 +1499,13 @@ func Test_server_Insert(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - vec: nil, + req: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -696,10 +1520,13 @@ func Test_server_Insert(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - vec: nil, + req: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -709,8 +1536,10 @@ func Test_server_Insert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -722,25 +1551,31 @@ func Test_server_Insert(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } - gotRes, err := s.Insert(test.args.ctx, test.args.vec) + gotRes, err := s.Update(test.args.ctx, test.args.req) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_server_StreamInsert(t *testing.T) { +func Test_server_StreamUpdate(t *testing.T) { + t.Parallel() type args struct { - stream agent.Agent_StreamInsertServer + stream vald.Update_StreamUpdateServer } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { @@ -770,7 +1605,10 @@ func Test_server_StreamInsert(t *testing.T) { stream: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -787,7 +1625,10 @@ func Test_server_StreamInsert(t *testing.T) { stream: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -797,8 +1638,10 @@ func Test_server_StreamInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -810,30 +1653,36 @@ func Test_server_StreamInsert(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } - err := s.StreamInsert(test.args.stream) + err := s.StreamUpdate(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_server_MultiInsert(t *testing.T) { +func Test_server_MultiUpdate(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - vecs *payload.Object_Vectors + reqs *payload.Update_MultiRequest } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { - wantRes *payload.Empty + wantRes *payload.Object_Locations err error } type test struct { @@ -841,11 +1690,11 @@ func Test_server_MultiInsert(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Empty, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Empty, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -861,10 +1710,13 @@ func Test_server_MultiInsert(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - vecs: nil, + reqs: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -879,10 +1731,13 @@ func Test_server_MultiInsert(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - vecs: nil, + reqs: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -892,8 +1747,10 @@ func Test_server_MultiInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -905,47 +1762,53 @@ func Test_server_MultiInsert(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } - gotRes, err := s.MultiInsert(test.args.ctx, test.args.vecs) + gotRes, err := s.MultiUpdate(test.args.ctx, test.args.reqs) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_server_Update(t *testing.T) { +func Test_server_Upsert(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - vec *payload.Object_Vector + req *payload.Upsert_Request } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { - wantRes *payload.Empty - err error + want *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *payload.Empty, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Empty, err error) error { + defaultCheckFunc := func(w want, got *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(gotRes, w.wantRes) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -956,10 +1819,13 @@ func Test_server_Update(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - vec: nil, + req: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -974,10 +1840,13 @@ func Test_server_Update(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - vec: nil, + req: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -987,8 +1856,10 @@ func Test_server_Update(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1000,25 +1871,31 @@ func Test_server_Update(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } - gotRes, err := s.Update(test.args.ctx, test.args.vec) - if err := test.checkFunc(test.want, gotRes, err); err != nil { + got, err := s.Upsert(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_server_StreamUpdate(t *testing.T) { +func Test_server_StreamUpsert(t *testing.T) { + t.Parallel() type args struct { - stream agent.Agent_StreamUpdateServer + stream vald.Upsert_StreamUpsertServer } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { @@ -1048,7 +1925,10 @@ func Test_server_StreamUpdate(t *testing.T) { stream: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1065,7 +1945,10 @@ func Test_server_StreamUpdate(t *testing.T) { stream: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1075,8 +1958,10 @@ func Test_server_StreamUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1088,30 +1973,36 @@ func Test_server_StreamUpdate(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } - err := s.StreamUpdate(test.args.stream) + err := s.StreamUpsert(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_server_MultiUpdate(t *testing.T) { +func Test_server_MultiUpsert(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - vecs *payload.Object_Vectors + reqs *payload.Upsert_MultiRequest } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { - wantRes *payload.Empty + wantRes *payload.Object_Locations err error } type test struct { @@ -1119,11 +2010,11 @@ func Test_server_MultiUpdate(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Empty, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Empty, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -1139,10 +2030,13 @@ func Test_server_MultiUpdate(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - vecs: nil, + reqs: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1157,10 +2051,13 @@ func Test_server_MultiUpdate(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - vecs: nil, + reqs: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1170,8 +2067,10 @@ func Test_server_MultiUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1183,30 +2082,36 @@ func Test_server_MultiUpdate(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } - gotRes, err := s.MultiUpdate(test.args.ctx, test.args.vecs) + gotRes, err := s.MultiUpsert(test.args.ctx, test.args.reqs) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Remove(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - id *payload.Object_ID + req *payload.Remove_Request } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { - wantRes *payload.Empty + wantRes *payload.Object_Location err error } type test struct { @@ -1214,11 +2119,11 @@ func Test_server_Remove(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Empty, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Empty, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -1234,10 +2139,13 @@ func Test_server_Remove(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - id: nil, + req: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1252,10 +2160,13 @@ func Test_server_Remove(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - id: nil, + req: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1265,8 +2176,10 @@ func Test_server_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1278,25 +2191,31 @@ func Test_server_Remove(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } - gotRes, err := s.Remove(test.args.ctx, test.args.id) + gotRes, err := s.Remove(test.args.ctx, test.args.req) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_StreamRemove(t *testing.T) { + t.Parallel() type args struct { - stream agent.Agent_StreamRemoveServer + stream vald.Remove_StreamRemoveServer } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { @@ -1326,7 +2245,10 @@ func Test_server_StreamRemove(t *testing.T) { stream: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1343,7 +2265,10 @@ func Test_server_StreamRemove(t *testing.T) { stream: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1353,8 +2278,10 @@ func Test_server_StreamRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1366,7 +2293,10 @@ func Test_server_StreamRemove(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } @@ -1374,22 +2304,25 @@ func Test_server_StreamRemove(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_MultiRemove(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - ids *payload.Object_IDs + ctx context.Context + reqs *payload.Remove_MultiRequest } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { - wantRes *payload.Empty + wantRes *payload.Object_Locations err error } type test struct { @@ -1397,11 +2330,11 @@ func Test_server_MultiRemove(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Empty, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Empty, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -1417,10 +2350,13 @@ func Test_server_MultiRemove(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - ids: nil, + reqs: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1435,10 +2371,13 @@ func Test_server_MultiRemove(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - ids: nil, + reqs: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1448,8 +2387,10 @@ func Test_server_MultiRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1461,26 +2402,32 @@ func Test_server_MultiRemove(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } - gotRes, err := s.MultiRemove(test.args.ctx, test.args.ids) + gotRes, err := s.MultiRemove(test.args.ctx, test.args.reqs) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_GetObject(t *testing.T) { + t.Parallel() type args struct { ctx context.Context id *payload.Object_ID } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { @@ -1515,7 +2462,10 @@ func Test_server_GetObject(t *testing.T) { id: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1533,7 +2483,10 @@ func Test_server_GetObject(t *testing.T) { id: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1543,8 +2496,10 @@ func Test_server_GetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1556,7 +2511,10 @@ func Test_server_GetObject(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } @@ -1564,17 +2522,20 @@ func Test_server_GetObject(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_StreamGetObject(t *testing.T) { + t.Parallel() type args struct { - stream agent.Agent_StreamGetObjectServer + stream vald.Object_StreamGetObjectServer } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { @@ -1604,7 +2565,10 @@ func Test_server_StreamGetObject(t *testing.T) { stream: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1621,7 +2585,10 @@ func Test_server_StreamGetObject(t *testing.T) { stream: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1631,8 +2598,10 @@ func Test_server_StreamGetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1644,7 +2613,10 @@ func Test_server_StreamGetObject(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } @@ -1652,18 +2624,21 @@ func Test_server_StreamGetObject(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_CreateIndex(t *testing.T) { + t.Parallel() type args struct { ctx context.Context c *payload.Control_CreateIndexRequest } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { @@ -1698,7 +2673,10 @@ func Test_server_CreateIndex(t *testing.T) { c: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1716,7 +2694,10 @@ func Test_server_CreateIndex(t *testing.T) { c: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1726,8 +2707,10 @@ func Test_server_CreateIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1739,7 +2722,10 @@ func Test_server_CreateIndex(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } @@ -1747,18 +2733,21 @@ func Test_server_CreateIndex(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_SaveIndex(t *testing.T) { + t.Parallel() type args struct { ctx context.Context in1 *payload.Empty } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { @@ -1793,7 +2782,10 @@ func Test_server_SaveIndex(t *testing.T) { in1: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1811,7 +2803,10 @@ func Test_server_SaveIndex(t *testing.T) { in1: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1821,8 +2816,10 @@ func Test_server_SaveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1834,7 +2831,10 @@ func Test_server_SaveIndex(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } @@ -1842,18 +2842,21 @@ func Test_server_SaveIndex(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_CreateAndSaveIndex(t *testing.T) { + t.Parallel() type args struct { ctx context.Context c *payload.Control_CreateIndexRequest } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { @@ -1888,7 +2891,10 @@ func Test_server_CreateAndSaveIndex(t *testing.T) { c: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1906,7 +2912,10 @@ func Test_server_CreateAndSaveIndex(t *testing.T) { c: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -1916,8 +2925,10 @@ func Test_server_CreateAndSaveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1929,7 +2940,10 @@ func Test_server_CreateAndSaveIndex(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } @@ -1937,18 +2951,21 @@ func Test_server_CreateAndSaveIndex(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_IndexInfo(t *testing.T) { + t.Parallel() type args struct { ctx context.Context in1 *payload.Empty } type fields struct { + name string + ip string ngt service.NGT + eg errgroup.Group streamConcurrency int } type want struct { @@ -1983,7 +3000,10 @@ func Test_server_IndexInfo(t *testing.T) { in1: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -2001,7 +3021,10 @@ func Test_server_IndexInfo(t *testing.T) { in1: nil, }, fields: fields { + name: "", + ip: "", ngt: nil, + eg: nil, streamConcurrency: 0, }, want: want{}, @@ -2011,8 +3034,10 @@ func Test_server_IndexInfo(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -2024,7 +3049,10 @@ func Test_server_IndexInfo(t *testing.T) { test.checkFunc = defaultCheckFunc } s := &server{ + name: test.fields.name, + ip: test.fields.ip, ngt: test.fields.ngt, + eg: test.fields.eg, streamConcurrency: test.fields.streamConcurrency, } @@ -2032,7 +3060,6 @@ func Test_server_IndexInfo(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/core/ngt/handler/grpc/option.go b/pkg/agent/core/ngt/handler/grpc/option.go index 3d8c270c9f..ccf9ac3b3c 100644 --- a/pkg/agent/core/ngt/handler/grpc/option.go +++ b/pkg/agent/core/ngt/handler/grpc/option.go @@ -17,15 +17,45 @@ // Package grpc provides grpc server logic package grpc -import "github.com/vdaas/vald/pkg/agent/core/ngt/service" +import ( + "os" + + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/net" + "github.com/vdaas/vald/pkg/agent/core/ngt/service" +) type Option func(*server) -var ( - defaultOpts = []Option{ - WithStreamConcurrency(20), +var defaultOpts = []Option{ + WithName(func() string { + name, err := os.Hostname() + if err != nil { + log.Warn(err) + } + return name + }()), + WithIP(net.LoadLocalIP()), + WithStreamConcurrency(20), + WithErrGroup(errgroup.Get()), +} + +func WithIP(ip string) Option { + return func(s *server) { + if len(ip) != 0 { + s.ip = ip + } } -) +} + +func WithName(name string) Option { + return func(s *server) { + if len(name) != 0 { + s.name = name + } + } +} func WithNGT(n service.NGT) Option { return func(s *server) { @@ -40,3 +70,11 @@ func WithStreamConcurrency(c int) Option { } } } + +func WithErrGroup(eg errgroup.Group) Option { + return func(s *server) { + if eg != nil { + s.eg = eg + } + } +} diff --git a/pkg/agent/core/ngt/handler/grpc/option_test.go b/pkg/agent/core/ngt/handler/grpc/option_test.go index dbdb548d34..08407d1b80 100644 --- a/pkg/agent/core/ngt/handler/grpc/option_test.go +++ b/pkg/agent/core/ngt/handler/grpc/option_test.go @@ -20,11 +20,247 @@ package grpc import ( "testing" + "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/pkg/agent/core/ngt/service" "go.uber.org/goleak" ) +func TestWithIP(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + ip string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ip: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ip: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithIP(test.args.ip) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithIP(test.args.ip) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithName(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + name string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + name: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + name: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithName(test.args.name) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithName(test.args.name) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + func TestWithNGT(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -99,8 +335,10 @@ func TestWithNGT(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -139,6 +377,7 @@ func TestWithNGT(t *testing.T) { } func TestWithStreamConcurrency(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -213,8 +452,10 @@ func TestWithStreamConcurrency(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -251,3 +492,120 @@ func TestWithStreamConcurrency(t *testing.T) { }) } } + +func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + eg errgroup.Group + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + eg: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + eg: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithErrGroup(test.args.eg) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithErrGroup(test.args.eg) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/agent/core/ngt/handler/rest/handler.go b/pkg/agent/core/ngt/handler/rest/handler.go index 25bb1c4a2b..108b01b54a 100644 --- a/pkg/agent/core/ngt/handler/rest/handler.go +++ b/pkg/agent/core/ngt/handler/rest/handler.go @@ -22,10 +22,10 @@ import ( "io/ioutil" "net/http" - agent "github.com/vdaas/vald/apis/grpc/agent/core" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/net/http/dump" "github.com/vdaas/vald/internal/net/http/json" + "github.com/vdaas/vald/pkg/agent/core/ngt/handler/grpc" ) type Handler interface { @@ -46,7 +46,7 @@ type Handler interface { } type handler struct { - agent agent.AgentServer + agent grpc.Server } func New(opts ...Option) Handler { @@ -80,42 +80,42 @@ func (h *handler) SearchByID(w http.ResponseWriter, r *http.Request) (code int, } func (h *handler) Insert(w http.ResponseWriter, r *http.Request) (code int, err error) { - var req *payload.Object_Vector + var req *payload.Insert_Request return json.Handler(w, r, &req, func() (interface{}, error) { return h.agent.Insert(r.Context(), req) }) } func (h *handler) MultiInsert(w http.ResponseWriter, r *http.Request) (code int, err error) { - var req *payload.Object_Vectors + var req *payload.Insert_MultiRequest return json.Handler(w, r, &req, func() (interface{}, error) { return h.agent.MultiInsert(r.Context(), req) }) } func (h *handler) Update(w http.ResponseWriter, r *http.Request) (code int, err error) { - var req *payload.Object_Vector + var req *payload.Update_Request return json.Handler(w, r, &req, func() (interface{}, error) { return h.agent.Update(r.Context(), req) }) } func (h *handler) MultiUpdate(w http.ResponseWriter, r *http.Request) (code int, err error) { - var req *payload.Object_Vectors + var req *payload.Update_MultiRequest return json.Handler(w, r, &req, func() (interface{}, error) { return h.agent.MultiUpdate(r.Context(), req) }) } func (h *handler) Remove(w http.ResponseWriter, r *http.Request) (code int, err error) { - var req *payload.Object_ID + var req *payload.Remove_Request return json.Handler(w, r, &req, func() (interface{}, error) { return h.agent.Remove(r.Context(), req) }) } func (h *handler) MultiRemove(w http.ResponseWriter, r *http.Request) (code int, err error) { - var req *payload.Object_IDs + var req *payload.Remove_MultiRequest return json.Handler(w, r, &req, func() (interface{}, error) { return h.agent.MultiRemove(r.Context(), req) }) diff --git a/pkg/agent/core/ngt/handler/rest/handler_test.go b/pkg/agent/core/ngt/handler/rest/handler_test.go index 9659e184e3..7c2c5d8100 100644 --- a/pkg/agent/core/ngt/handler/rest/handler_test.go +++ b/pkg/agent/core/ngt/handler/rest/handler_test.go @@ -22,12 +22,13 @@ import ( "reflect" "testing" - agent "github.com/vdaas/vald/apis/grpc/agent/core" "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/pkg/agent/core/ngt/handler/grpc" "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -76,8 +77,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -93,18 +96,18 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Index(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request } type fields struct { - agent agent.AgentServer + agent grpc.Server } type want struct { want int @@ -164,8 +167,10 @@ func Test_handler_Index(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -184,18 +189,18 @@ func Test_handler_Index(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Search(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request } type fields struct { - agent agent.AgentServer + agent grpc.Server } type want struct { wantCode int @@ -255,8 +260,10 @@ func Test_handler_Search(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -275,18 +282,18 @@ func Test_handler_Search(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_SearchByID(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request } type fields struct { - agent agent.AgentServer + agent grpc.Server } type want struct { wantCode int @@ -346,8 +353,10 @@ func Test_handler_SearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -366,18 +375,18 @@ func Test_handler_SearchByID(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Insert(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request } type fields struct { - agent agent.AgentServer + agent grpc.Server } type want struct { wantCode int @@ -437,8 +446,10 @@ func Test_handler_Insert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -457,18 +468,18 @@ func Test_handler_Insert(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_MultiInsert(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request } type fields struct { - agent agent.AgentServer + agent grpc.Server } type want struct { wantCode int @@ -528,8 +539,10 @@ func Test_handler_MultiInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -548,18 +561,18 @@ func Test_handler_MultiInsert(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Update(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request } type fields struct { - agent agent.AgentServer + agent grpc.Server } type want struct { wantCode int @@ -619,8 +632,10 @@ func Test_handler_Update(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -639,18 +654,18 @@ func Test_handler_Update(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_MultiUpdate(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request } type fields struct { - agent agent.AgentServer + agent grpc.Server } type want struct { wantCode int @@ -710,8 +725,10 @@ func Test_handler_MultiUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -730,18 +747,18 @@ func Test_handler_MultiUpdate(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Remove(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request } type fields struct { - agent agent.AgentServer + agent grpc.Server } type want struct { wantCode int @@ -801,8 +818,10 @@ func Test_handler_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -821,18 +840,18 @@ func Test_handler_Remove(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_MultiRemove(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request } type fields struct { - agent agent.AgentServer + agent grpc.Server } type want struct { wantCode int @@ -892,8 +911,10 @@ func Test_handler_MultiRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -912,18 +933,18 @@ func Test_handler_MultiRemove(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_CreateIndex(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request } type fields struct { - agent agent.AgentServer + agent grpc.Server } type want struct { wantCode int @@ -983,8 +1004,10 @@ func Test_handler_CreateIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1003,18 +1026,18 @@ func Test_handler_CreateIndex(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_SaveIndex(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request } type fields struct { - agent agent.AgentServer + agent grpc.Server } type want struct { wantCode int @@ -1074,8 +1097,10 @@ func Test_handler_SaveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1094,18 +1119,18 @@ func Test_handler_SaveIndex(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_CreateAndSaveIndex(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request } type fields struct { - agent agent.AgentServer + agent grpc.Server } type want struct { wantCode int @@ -1165,8 +1190,10 @@ func Test_handler_CreateAndSaveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1185,18 +1212,18 @@ func Test_handler_CreateAndSaveIndex(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_GetObject(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request } type fields struct { - agent agent.AgentServer + agent grpc.Server } type want struct { wantCode int @@ -1256,8 +1283,10 @@ func Test_handler_GetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1276,18 +1305,18 @@ func Test_handler_GetObject(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Exists(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request } type fields struct { - agent agent.AgentServer + agent grpc.Server } type want struct { wantCode int @@ -1347,8 +1376,10 @@ func Test_handler_Exists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1367,7 +1398,6 @@ func Test_handler_Exists(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/core/ngt/handler/rest/option.go b/pkg/agent/core/ngt/handler/rest/option.go index c7cfd0fd66..13bacc231c 100644 --- a/pkg/agent/core/ngt/handler/rest/option.go +++ b/pkg/agent/core/ngt/handler/rest/option.go @@ -17,15 +17,13 @@ // Package rest provides rest api logic package rest -import agent "github.com/vdaas/vald/apis/grpc/agent/core" +import "github.com/vdaas/vald/pkg/agent/core/ngt/handler/grpc" type Option func(*handler) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} -func WithAgent(a agent.AgentServer) Option { +func WithAgent(a grpc.Server) Option { return func(h *handler) { h.agent = a } diff --git a/pkg/agent/core/ngt/handler/rest/option_test.go b/pkg/agent/core/ngt/handler/rest/option_test.go index 93e0f39712..c5c13c3d5c 100644 --- a/pkg/agent/core/ngt/handler/rest/option_test.go +++ b/pkg/agent/core/ngt/handler/rest/option_test.go @@ -20,15 +20,16 @@ package rest import ( "testing" - agent "github.com/vdaas/vald/apis/grpc/agent/core" + "github.com/vdaas/vald/pkg/agent/core/ngt/handler/grpc" "go.uber.org/goleak" ) func TestWithAgent(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { - a agent.AgentServer + a grpc.Server } type want struct { obj *T @@ -99,8 +100,10 @@ func TestWithAgent(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) diff --git a/pkg/agent/core/ngt/router/option.go b/pkg/agent/core/ngt/router/option.go index 0b2567351e..42a660dd96 100644 --- a/pkg/agent/core/ngt/router/option.go +++ b/pkg/agent/core/ngt/router/option.go @@ -24,11 +24,9 @@ import ( type Option func(*router) -var ( - defaultOpts = []Option{ - WithTimeout("3s"), - } -) +var defaultOpts = []Option{ + WithTimeout("3s"), +} func WithHandler(h rest.Handler) Option { return func(r *router) { diff --git a/pkg/agent/core/ngt/router/option_test.go b/pkg/agent/core/ngt/router/option_test.go index 6ec1ab28de..0bcaca2288 100644 --- a/pkg/agent/core/ngt/router/option_test.go +++ b/pkg/agent/core/ngt/router/option_test.go @@ -26,6 +26,7 @@ import ( ) func TestWithHandler(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -100,8 +101,10 @@ func TestWithHandler(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -140,6 +143,7 @@ func TestWithHandler(t *testing.T) { } func TestWithTimeout(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -214,8 +218,10 @@ func TestWithTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -254,6 +260,7 @@ func TestWithTimeout(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -328,8 +335,10 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) diff --git a/pkg/agent/core/ngt/router/router.go b/pkg/agent/core/ngt/router/router.go index e3325b7622..bfb0e26cd5 100644 --- a/pkg/agent/core/ngt/router/router.go +++ b/pkg/agent/core/ngt/router/router.go @@ -32,7 +32,7 @@ type router struct { timeout string } -// New returns REST route&method information from handler interface +// New returns REST route&method information from handler interface. func New(opts ...Option) http.Handler { r := new(router) diff --git a/pkg/agent/core/ngt/router/router_test.go b/pkg/agent/core/ngt/router/router_test.go index e02106ae0e..97248787ae 100644 --- a/pkg/agent/core/ngt/router/router_test.go +++ b/pkg/agent/core/ngt/router/router_test.go @@ -27,6 +27,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -75,8 +76,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -92,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/core/ngt/service/kvs/kvs.go b/pkg/agent/core/ngt/service/kvs/kvs.go index 95823feccb..13248aeac1 100644 --- a/pkg/agent/core/ngt/service/kvs/kvs.go +++ b/pkg/agent/core/ngt/service/kvs/kvs.go @@ -18,6 +18,7 @@ package kvs import ( "context" + "reflect" "sync" "sync/atomic" "unsafe" @@ -42,12 +43,12 @@ type bidi struct { } const ( - // slen is shards length + // slen is shards length. slen = 512 // slen = 4096 - // mask is slen-1 Hex value + // mask is slen-1 Hex value. mask = 0x1FF - // mask = 0xFFF + // mask = 0xFFF. ) func New() BidiMap { @@ -64,7 +65,7 @@ func New() BidiMap { } func (b *bidi) Get(key string) (uint32, bool) { - return b.uo[xxhash.Sum64(*(*[]byte)(unsafe.Pointer(&key)))&mask].Load(key) + return b.uo[xxhash.Sum64(stringToBytes(key))&mask].Load(key) } func (b *bidi) GetInverse(val uint32) (string, bool) { @@ -72,13 +73,13 @@ func (b *bidi) GetInverse(val uint32) (string, bool) { } func (b *bidi) Set(key string, val uint32) { - b.uo[xxhash.Sum64(*(*[]byte)(unsafe.Pointer(&key)))&mask].Store(key, val) + b.uo[xxhash.Sum64(stringToBytes(key))&mask].Store(key, val) b.ou[val&mask].Store(val, key) atomic.AddUint64(&b.l, 1) } func (b *bidi) Delete(key string) (val uint32, ok bool) { - idx := xxhash.Sum64(*(*[]byte)(unsafe.Pointer(&key))) & mask + idx := xxhash.Sum64(stringToBytes(key)) & mask val, ok = b.uo[idx].Load(key) if !ok { return 0, false @@ -95,7 +96,7 @@ func (b *bidi) DeleteInverse(val uint32) (key string, ok bool) { if !ok { return "", false } - b.uo[xxhash.Sum64(*(*[]byte)(unsafe.Pointer(&key)))&mask].Delete(key) + b.uo[xxhash.Sum64(stringToBytes(key))&mask].Delete(key) b.ou[val&mask].Delete(val) atomic.AddUint64(&b.l, ^uint64(0)) return key, true @@ -124,3 +125,12 @@ func (b *bidi) Range(ctx context.Context, f func(string, uint32) bool) { func (b *bidi) Len() uint64 { return atomic.LoadUint64(&b.l) } + +func stringToBytes(s string) (b []byte) { + sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: sh.Data, + Len: sh.Len, + Cap: sh.Len, + })) +} diff --git a/pkg/agent/core/ngt/service/kvs/kvs_test.go b/pkg/agent/core/ngt/service/kvs/kvs_test.go index 8c5262baad..6b691bed40 100644 --- a/pkg/agent/core/ngt/service/kvs/kvs_test.go +++ b/pkg/agent/core/ngt/service/kvs/kvs_test.go @@ -26,6 +26,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type want struct { want BidiMap } @@ -64,8 +65,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -81,12 +84,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_bidi_Get(t *testing.T) { + t.Parallel() type args struct { key string } @@ -155,8 +158,10 @@ func Test_bidi_Get(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -177,12 +182,12 @@ func Test_bidi_Get(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_bidi_GetInverse(t *testing.T) { + t.Parallel() type args struct { val uint32 } @@ -251,8 +256,10 @@ func Test_bidi_GetInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -273,12 +280,12 @@ func Test_bidi_GetInverse(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_bidi_Set(t *testing.T) { + t.Parallel() type args struct { key string val uint32 @@ -342,8 +349,10 @@ func Test_bidi_Set(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -369,6 +378,7 @@ func Test_bidi_Set(t *testing.T) { } func Test_bidi_Delete(t *testing.T) { + t.Parallel() type args struct { key string } @@ -437,8 +447,10 @@ func Test_bidi_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -459,12 +471,12 @@ func Test_bidi_Delete(t *testing.T) { if err := test.checkFunc(test.want, gotVal, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_bidi_DeleteInverse(t *testing.T) { + t.Parallel() type args struct { val uint32 } @@ -533,8 +545,10 @@ func Test_bidi_DeleteInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -555,12 +569,12 @@ func Test_bidi_DeleteInverse(t *testing.T) { if err := test.checkFunc(test.want, gotKey, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_bidi_Range(t *testing.T) { + t.Parallel() type args struct { ctx context.Context f func(string, uint32) bool @@ -624,8 +638,10 @@ func Test_bidi_Range(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -651,6 +667,7 @@ func Test_bidi_Range(t *testing.T) { } func Test_bidi_Len(t *testing.T) { + t.Parallel() type fields struct { ou [slen]*ou uo [slen]*uo @@ -705,8 +722,10 @@ func Test_bidi_Len(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -727,7 +746,79 @@ func Test_bidi_Len(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } + }) + } +} + +func Test_stringToBytes(t *testing.T) { + t.Parallel() + type args struct { + s string + } + type want struct { + wantB []byte + } + type test struct { + name string + args args + want want + checkFunc func(want, []byte) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotB []byte) error { + if !reflect.DeepEqual(gotB, w.wantB) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotB, w.wantB) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + s: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + s: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + gotB := stringToBytes(test.args.s) + if err := test.checkFunc(test.want, gotB); err != nil { + tt.Errorf("error = %v", err) + } }) } } diff --git a/pkg/agent/core/ngt/service/kvs/ou_test.go b/pkg/agent/core/ngt/service/kvs/ou_test.go index 62413f54b6..0eaa03caa3 100644 --- a/pkg/agent/core/ngt/service/kvs/ou_test.go +++ b/pkg/agent/core/ngt/service/kvs/ou_test.go @@ -28,6 +28,7 @@ import ( ) func Test_newEntryOu(t *testing.T) { + t.Parallel() type args struct { i string } @@ -76,8 +77,10 @@ func Test_newEntryOu(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -93,12 +96,12 @@ func Test_newEntryOu(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ou_Load(t *testing.T) { + t.Parallel() type args struct { key uint32 } @@ -170,8 +173,10 @@ func Test_ou_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -193,12 +198,12 @@ func Test_ou_Load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryOu_load(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -251,8 +256,10 @@ func Test_entryOu_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -271,12 +278,12 @@ func Test_entryOu_load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ou_Store(t *testing.T) { + t.Parallel() type args struct { key uint32 value string @@ -343,8 +350,10 @@ func Test_ou_Store(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -371,6 +380,7 @@ func Test_ou_Store(t *testing.T) { } func Test_entryOu_tryStore(t *testing.T) { + t.Parallel() type args struct { i *string } @@ -429,8 +439,10 @@ func Test_entryOu_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -449,12 +461,12 @@ func Test_entryOu_tryStore(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryOu_unexpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -503,8 +515,10 @@ func Test_entryOu_unexpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -523,12 +537,12 @@ func Test_entryOu_unexpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotWasExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryOu_storeLocked(t *testing.T) { + t.Parallel() type args struct { i *string } @@ -583,8 +597,10 @@ func Test_entryOu_storeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -608,6 +624,7 @@ func Test_entryOu_storeLocked(t *testing.T) { } func Test_ou_Delete(t *testing.T) { + t.Parallel() type args struct { key uint32 } @@ -671,8 +688,10 @@ func Test_ou_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -699,6 +718,7 @@ func Test_ou_Delete(t *testing.T) { } func Test_entryOu_delete(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -747,8 +767,10 @@ func Test_entryOu_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -767,12 +789,12 @@ func Test_entryOu_delete(t *testing.T) { if err := test.checkFunc(test.want, gotHadValue); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ou_missLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -826,8 +848,10 @@ func Test_ou_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -854,6 +878,7 @@ func Test_ou_missLocked(t *testing.T) { } func Test_ou_dirtyLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -907,8 +932,10 @@ func Test_ou_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -935,6 +962,7 @@ func Test_ou_dirtyLocked(t *testing.T) { } func Test_entryOu_tryExpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -983,8 +1011,10 @@ func Test_entryOu_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -1003,7 +1033,6 @@ func Test_entryOu_tryExpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotIsExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/core/ngt/service/kvs/uo_test.go b/pkg/agent/core/ngt/service/kvs/uo_test.go index c0528020c2..0a660612e4 100644 --- a/pkg/agent/core/ngt/service/kvs/uo_test.go +++ b/pkg/agent/core/ngt/service/kvs/uo_test.go @@ -28,6 +28,7 @@ import ( ) func Test_newEntryUo(t *testing.T) { + t.Parallel() type args struct { i uint32 } @@ -76,8 +77,10 @@ func Test_newEntryUo(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -93,12 +96,12 @@ func Test_newEntryUo(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_uo_Load(t *testing.T) { + t.Parallel() type args struct { key string } @@ -170,8 +173,10 @@ func Test_uo_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -193,12 +198,12 @@ func Test_uo_Load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryUo_load(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -251,8 +256,10 @@ func Test_entryUo_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -271,12 +278,12 @@ func Test_entryUo_load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_uo_Store(t *testing.T) { + t.Parallel() type args struct { key string value uint32 @@ -343,8 +350,10 @@ func Test_uo_Store(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -371,6 +380,7 @@ func Test_uo_Store(t *testing.T) { } func Test_entryUo_tryStore(t *testing.T) { + t.Parallel() type args struct { i *uint32 } @@ -429,8 +439,10 @@ func Test_entryUo_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -449,12 +461,12 @@ func Test_entryUo_tryStore(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryUo_unexpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -503,8 +515,10 @@ func Test_entryUo_unexpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -523,12 +537,12 @@ func Test_entryUo_unexpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotWasExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryUo_storeLocked(t *testing.T) { + t.Parallel() type args struct { i *uint32 } @@ -583,8 +597,10 @@ func Test_entryUo_storeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -608,6 +624,7 @@ func Test_entryUo_storeLocked(t *testing.T) { } func Test_uo_Delete(t *testing.T) { + t.Parallel() type args struct { key string } @@ -671,8 +688,10 @@ func Test_uo_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -699,6 +718,7 @@ func Test_uo_Delete(t *testing.T) { } func Test_entryUo_delete(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -747,8 +767,10 @@ func Test_entryUo_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -767,12 +789,12 @@ func Test_entryUo_delete(t *testing.T) { if err := test.checkFunc(test.want, gotHadValue); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_uo_Range(t *testing.T) { + t.Parallel() type args struct { f func(uuid string, oid uint32) bool } @@ -836,8 +858,10 @@ func Test_uo_Range(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -864,6 +888,7 @@ func Test_uo_Range(t *testing.T) { } func Test_uo_missLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -917,8 +942,10 @@ func Test_uo_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -945,6 +972,7 @@ func Test_uo_missLocked(t *testing.T) { } func Test_uo_dirtyLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -998,8 +1026,10 @@ func Test_uo_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -1026,6 +1056,7 @@ func Test_uo_dirtyLocked(t *testing.T) { } func Test_entryUo_tryExpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -1074,8 +1105,10 @@ func Test_entryUo_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -1094,7 +1127,6 @@ func Test_entryUo_tryExpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotIsExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/core/ngt/service/ngt.go b/pkg/agent/core/ngt/service/ngt.go index 26261b7db1..fdaa631cc1 100644 --- a/pkg/agent/core/ngt/service/ngt.go +++ b/pkg/agent/core/ngt/service/ngt.go @@ -30,7 +30,7 @@ import ( "time" "github.com/vdaas/vald/internal/config" - core "github.com/vdaas/vald/internal/core/ngt" + core "github.com/vdaas/vald/internal/core/algorithm/ngt" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/file" diff --git a/pkg/agent/core/ngt/service/ngt_test.go b/pkg/agent/core/ngt/service/ngt_test.go index 6a74d8e418..8626f07ef6 100644 --- a/pkg/agent/core/ngt/service/ngt_test.go +++ b/pkg/agent/core/ngt/service/ngt_test.go @@ -25,7 +25,7 @@ import ( "time" "github.com/vdaas/vald/internal/config" - core "github.com/vdaas/vald/internal/core/ngt" + core "github.com/vdaas/vald/internal/core/algorithm/ngt" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/pkg/agent/core/ngt/model" @@ -34,6 +34,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.NGT opts []Option @@ -89,8 +90,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -106,12 +109,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotNn, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_initNGT(t *testing.T) { + t.Parallel() type args struct { opts []core.Option } @@ -245,8 +248,10 @@ func Test_ngt_initNGT(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -290,12 +295,12 @@ func Test_ngt_initNGT(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_loadKVS(t *testing.T) { + t.Parallel() type fields struct { core core.NGT eg errgroup.Group @@ -419,8 +424,10 @@ func Test_ngt_loadKVS(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -464,12 +471,12 @@ func Test_ngt_loadKVS(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -603,8 +610,10 @@ func Test_ngt_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -648,12 +657,12 @@ func Test_ngt_Start(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_Search(t *testing.T) { + t.Parallel() type args struct { vec []float32 size uint32 @@ -800,8 +809,10 @@ func Test_ngt_Search(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -845,12 +856,12 @@ func Test_ngt_Search(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_SearchByID(t *testing.T) { + t.Parallel() type args struct { uuid string size uint32 @@ -997,8 +1008,10 @@ func Test_ngt_SearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1042,12 +1055,12 @@ func Test_ngt_SearchByID(t *testing.T) { if err := test.checkFunc(test.want, gotDst, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_Insert(t *testing.T) { + t.Parallel() type args struct { uuid string vec []float32 @@ -1184,8 +1197,10 @@ func Test_ngt_Insert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1229,12 +1244,12 @@ func Test_ngt_Insert(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_insert(t *testing.T) { + t.Parallel() type args struct { uuid string vec []float32 @@ -1377,8 +1392,10 @@ func Test_ngt_insert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1422,12 +1439,12 @@ func Test_ngt_insert(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_InsertMultiple(t *testing.T) { + t.Parallel() type args struct { vecs map[string][]float32 } @@ -1561,8 +1578,10 @@ func Test_ngt_InsertMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1606,12 +1625,12 @@ func Test_ngt_InsertMultiple(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_Update(t *testing.T) { + t.Parallel() type args struct { uuid string vec []float32 @@ -1748,8 +1767,10 @@ func Test_ngt_Update(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1793,12 +1814,12 @@ func Test_ngt_Update(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_UpdateMultiple(t *testing.T) { + t.Parallel() type args struct { vecs map[string][]float32 } @@ -1932,8 +1953,10 @@ func Test_ngt_UpdateMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1977,12 +2000,12 @@ func Test_ngt_UpdateMultiple(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_Delete(t *testing.T) { + t.Parallel() type args struct { uuid string } @@ -2116,8 +2139,10 @@ func Test_ngt_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -2161,12 +2186,12 @@ func Test_ngt_Delete(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_delete(t *testing.T) { + t.Parallel() type args struct { uuid string t int64 @@ -2303,8 +2328,10 @@ func Test_ngt_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -2348,12 +2375,12 @@ func Test_ngt_delete(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_DeleteMultiple(t *testing.T) { + t.Parallel() type args struct { uuids []string } @@ -2487,8 +2514,10 @@ func Test_ngt_DeleteMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -2532,12 +2561,12 @@ func Test_ngt_DeleteMultiple(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_GetObject(t *testing.T) { + t.Parallel() type args struct { uuid string } @@ -2675,8 +2704,10 @@ func Test_ngt_GetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -2720,12 +2751,12 @@ func Test_ngt_GetObject(t *testing.T) { if err := test.checkFunc(test.want, gotVec, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_CreateIndex(t *testing.T) { + t.Parallel() type args struct { ctx context.Context poolSize uint32 @@ -2862,8 +2893,10 @@ func Test_ngt_CreateIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -2907,12 +2940,12 @@ func Test_ngt_CreateIndex(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_SaveIndex(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -3046,8 +3079,10 @@ func Test_ngt_SaveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -3091,12 +3126,12 @@ func Test_ngt_SaveIndex(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_saveIndex(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -3230,8 +3265,10 @@ func Test_ngt_saveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -3275,12 +3312,12 @@ func Test_ngt_saveIndex(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_CreateAndSaveIndex(t *testing.T) { + t.Parallel() type args struct { ctx context.Context poolSize uint32 @@ -3417,8 +3454,10 @@ func Test_ngt_CreateAndSaveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -3462,12 +3501,12 @@ func Test_ngt_CreateAndSaveIndex(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_Exists(t *testing.T) { + t.Parallel() type args struct { uuid string } @@ -3605,8 +3644,10 @@ func Test_ngt_Exists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -3650,12 +3691,12 @@ func Test_ngt_Exists(t *testing.T) { if err := test.checkFunc(test.want, gotOid, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_insertCache(t *testing.T) { + t.Parallel() type args struct { uuid string } @@ -3793,8 +3834,10 @@ func Test_ngt_insertCache(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -3838,12 +3881,12 @@ func Test_ngt_insertCache(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_IsSaving(t *testing.T) { + t.Parallel() type fields struct { core core.NGT eg errgroup.Group @@ -3967,8 +4010,10 @@ func Test_ngt_IsSaving(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -4012,12 +4057,12 @@ func Test_ngt_IsSaving(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_IsIndexing(t *testing.T) { + t.Parallel() type fields struct { core core.NGT eg errgroup.Group @@ -4141,8 +4186,10 @@ func Test_ngt_IsIndexing(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -4186,12 +4233,12 @@ func Test_ngt_IsIndexing(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_UUIDs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -4325,8 +4372,10 @@ func Test_ngt_UUIDs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -4370,12 +4419,12 @@ func Test_ngt_UUIDs(t *testing.T) { if err := test.checkFunc(test.want, gotUuids); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_UncommittedUUIDs(t *testing.T) { + t.Parallel() type fields struct { core core.NGT eg errgroup.Group @@ -4499,8 +4548,10 @@ func Test_ngt_UncommittedUUIDs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -4544,12 +4595,12 @@ func Test_ngt_UncommittedUUIDs(t *testing.T) { if err := test.checkFunc(test.want, gotUuids); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_NumberOfCreateIndexExecution(t *testing.T) { + t.Parallel() type fields struct { core core.NGT eg errgroup.Group @@ -4673,8 +4724,10 @@ func Test_ngt_NumberOfCreateIndexExecution(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -4718,12 +4771,12 @@ func Test_ngt_NumberOfCreateIndexExecution(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_NumberOfProactiveGCExecution(t *testing.T) { + t.Parallel() type fields struct { core core.NGT eg errgroup.Group @@ -4847,8 +4900,10 @@ func Test_ngt_NumberOfProactiveGCExecution(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -4892,12 +4947,12 @@ func Test_ngt_NumberOfProactiveGCExecution(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_gc(t *testing.T) { + t.Parallel() type fields struct { core core.NGT eg errgroup.Group @@ -5017,8 +5072,10 @@ func Test_ngt_gc(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -5067,6 +5124,7 @@ func Test_ngt_gc(t *testing.T) { } func Test_ngt_Len(t *testing.T) { + t.Parallel() type fields struct { core core.NGT eg errgroup.Group @@ -5190,8 +5248,10 @@ func Test_ngt_Len(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -5235,12 +5295,12 @@ func Test_ngt_Len(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_InsertVCacheLen(t *testing.T) { + t.Parallel() type fields struct { core core.NGT eg errgroup.Group @@ -5364,8 +5424,10 @@ func Test_ngt_InsertVCacheLen(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -5409,12 +5471,12 @@ func Test_ngt_InsertVCacheLen(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_DeleteVCacheLen(t *testing.T) { + t.Parallel() type fields struct { core core.NGT eg errgroup.Group @@ -5538,8 +5600,10 @@ func Test_ngt_DeleteVCacheLen(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -5583,12 +5647,12 @@ func Test_ngt_DeleteVCacheLen(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_ngt_Close(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -5722,8 +5786,10 @@ func Test_ngt_Close(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -5767,7 +5833,6 @@ func Test_ngt_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/core/ngt/service/option.go b/pkg/agent/core/ngt/service/option.go index 9d8c7a7983..b77c5bc8fb 100644 --- a/pkg/agent/core/ngt/service/option.go +++ b/pkg/agent/core/ngt/service/option.go @@ -21,7 +21,7 @@ import ( "strings" "time" - core "github.com/vdaas/vald/internal/core/ngt" + core "github.com/vdaas/vald/internal/core/algorithm/ngt" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/rand" "github.com/vdaas/vald/internal/timeutil" @@ -29,23 +29,21 @@ import ( type Option func(n *ngt) error -var ( - defaultOpts = []Option{ - WithErrGroup(errgroup.Get()), - WithAutoIndexCheckDuration("30m"), - WithAutoIndexDurationLimit("24h"), - WithAutoSaveIndexDuration("35m"), - WithAutoIndexLength(100), - WithInitialDelayMaxDuration("3m"), - WithMinLoadIndexTimeout("3m"), - WithMaxLoadIndexTimeout("10m"), - WithLoadIndexTimeoutFactor("1ms"), - WithDefaultPoolSize(core.DefaultPoolSize), - WithDefaultRadius(core.DefaultRadius), - WithDefaultEpsilon(core.DefaultEpsilon), - WithProactiveGC(true), - } -) +var defaultOpts = []Option{ + WithErrGroup(errgroup.Get()), + WithAutoIndexCheckDuration("30m"), + WithAutoIndexDurationLimit("24h"), + WithAutoSaveIndexDuration("35m"), + WithAutoIndexLength(100), + WithInitialDelayMaxDuration("3m"), + WithMinLoadIndexTimeout("3m"), + WithMaxLoadIndexTimeout("10m"), + WithLoadIndexTimeoutFactor("1ms"), + WithDefaultPoolSize(core.DefaultPoolSize), + WithDefaultRadius(core.DefaultRadius), + WithDefaultEpsilon(core.DefaultEpsilon), + WithProactiveGC(true), +} func WithErrGroup(eg errgroup.Group) Option { return func(n *ngt) error { diff --git a/pkg/agent/core/ngt/service/option_test.go b/pkg/agent/core/ngt/service/option_test.go index fe2561e1aa..9c5e0fdec9 100644 --- a/pkg/agent/core/ngt/service/option_test.go +++ b/pkg/agent/core/ngt/service/option_test.go @@ -24,6 +24,7 @@ import ( ) func TestWithErrGroup(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -98,8 +99,10 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -138,6 +141,7 @@ func TestWithErrGroup(t *testing.T) { } func TestWithEnableInMemoryMode(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -212,8 +216,10 @@ func TestWithEnableInMemoryMode(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -252,6 +258,7 @@ func TestWithEnableInMemoryMode(t *testing.T) { } func TestWithIndexPath(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -326,8 +333,10 @@ func TestWithIndexPath(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -366,6 +375,7 @@ func TestWithIndexPath(t *testing.T) { } func TestWithAutoIndexCheckDuration(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -440,8 +450,10 @@ func TestWithAutoIndexCheckDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -480,6 +492,7 @@ func TestWithAutoIndexCheckDuration(t *testing.T) { } func TestWithAutoIndexDurationLimit(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -554,8 +567,10 @@ func TestWithAutoIndexDurationLimit(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -594,6 +609,7 @@ func TestWithAutoIndexDurationLimit(t *testing.T) { } func TestWithAutoSaveIndexDuration(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -668,8 +684,10 @@ func TestWithAutoSaveIndexDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -708,6 +726,7 @@ func TestWithAutoSaveIndexDuration(t *testing.T) { } func TestWithAutoIndexLength(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -782,8 +801,10 @@ func TestWithAutoIndexLength(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -822,6 +843,7 @@ func TestWithAutoIndexLength(t *testing.T) { } func TestWithInitialDelayMaxDuration(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -896,8 +918,10 @@ func TestWithInitialDelayMaxDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -936,6 +960,7 @@ func TestWithInitialDelayMaxDuration(t *testing.T) { } func TestWithMinLoadIndexTimeout(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -1010,8 +1035,10 @@ func TestWithMinLoadIndexTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1050,6 +1077,7 @@ func TestWithMinLoadIndexTimeout(t *testing.T) { } func TestWithMaxLoadIndexTimeout(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -1124,8 +1152,10 @@ func TestWithMaxLoadIndexTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1164,6 +1194,7 @@ func TestWithMaxLoadIndexTimeout(t *testing.T) { } func TestWithLoadIndexTimeoutFactor(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -1238,8 +1269,10 @@ func TestWithLoadIndexTimeoutFactor(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1278,6 +1311,7 @@ func TestWithLoadIndexTimeoutFactor(t *testing.T) { } func TestWithDefaultPoolSize(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -1352,8 +1386,10 @@ func TestWithDefaultPoolSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1392,6 +1428,7 @@ func TestWithDefaultPoolSize(t *testing.T) { } func TestWithDefaultRadius(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -1466,8 +1503,10 @@ func TestWithDefaultRadius(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1506,6 +1545,7 @@ func TestWithDefaultRadius(t *testing.T) { } func TestWithDefaultEpsilon(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -1580,8 +1620,10 @@ func TestWithDefaultEpsilon(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1620,6 +1662,7 @@ func TestWithDefaultEpsilon(t *testing.T) { } func TestWithProactiveGC(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -1694,8 +1737,10 @@ func TestWithProactiveGC(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) diff --git a/pkg/agent/core/ngt/service/vcaches_test.go b/pkg/agent/core/ngt/service/vcaches_test.go index e8b4b4a3b5..4c3f02df28 100644 --- a/pkg/agent/core/ngt/service/vcaches_test.go +++ b/pkg/agent/core/ngt/service/vcaches_test.go @@ -28,6 +28,7 @@ import ( ) func Test_newEntryVCache(t *testing.T) { + t.Parallel() type args struct { i vcache } @@ -76,8 +77,10 @@ func Test_newEntryVCache(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -93,12 +96,12 @@ func Test_newEntryVCache(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_vcaches_Load(t *testing.T) { + t.Parallel() type args struct { key string } @@ -173,8 +176,10 @@ func Test_vcaches_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -197,12 +202,12 @@ func Test_vcaches_Load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryVCache_load(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -255,8 +260,10 @@ func Test_entryVCache_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -275,12 +282,12 @@ func Test_entryVCache_load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_vcaches_Store(t *testing.T) { + t.Parallel() type args struct { key string value vcache @@ -350,8 +357,10 @@ func Test_vcaches_Store(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -379,6 +388,7 @@ func Test_vcaches_Store(t *testing.T) { } func Test_entryVCache_tryStore(t *testing.T) { + t.Parallel() type args struct { i *vcache } @@ -437,8 +447,10 @@ func Test_entryVCache_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -457,12 +469,12 @@ func Test_entryVCache_tryStore(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryVCache_unexpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -511,8 +523,10 @@ func Test_entryVCache_unexpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -531,12 +545,12 @@ func Test_entryVCache_unexpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotWasExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryVCache_storeLocked(t *testing.T) { + t.Parallel() type args struct { i *vcache } @@ -591,8 +605,10 @@ func Test_entryVCache_storeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -616,6 +632,7 @@ func Test_entryVCache_storeLocked(t *testing.T) { } func Test_vcaches_Delete(t *testing.T) { + t.Parallel() type args struct { key string } @@ -682,8 +699,10 @@ func Test_vcaches_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -711,6 +730,7 @@ func Test_vcaches_Delete(t *testing.T) { } func Test_entryVCache_delete(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -759,8 +779,10 @@ func Test_entryVCache_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -779,12 +801,12 @@ func Test_entryVCache_delete(t *testing.T) { if err := test.checkFunc(test.want, gotHadValue); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_vcaches_Range(t *testing.T) { + t.Parallel() type args struct { f func(key string, value vcache) bool } @@ -851,8 +873,10 @@ func Test_vcaches_Range(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -880,6 +904,7 @@ func Test_vcaches_Range(t *testing.T) { } func Test_vcaches_missLocked(t *testing.T) { + t.Parallel() type fields struct { length uint64 mu sync.Mutex @@ -936,8 +961,10 @@ func Test_vcaches_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -965,6 +992,7 @@ func Test_vcaches_missLocked(t *testing.T) { } func Test_vcaches_dirtyLocked(t *testing.T) { + t.Parallel() type fields struct { length uint64 mu sync.Mutex @@ -1021,8 +1049,10 @@ func Test_vcaches_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -1050,6 +1080,7 @@ func Test_vcaches_dirtyLocked(t *testing.T) { } func Test_entryVCache_tryExpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -1098,8 +1129,10 @@ func Test_entryVCache_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -1118,12 +1151,12 @@ func Test_entryVCache_tryExpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotIsExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_vcaches_Len(t *testing.T) { + t.Parallel() type fields struct { length uint64 mu sync.Mutex @@ -1184,8 +1217,10 @@ func Test_vcaches_Len(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -1208,7 +1243,6 @@ func Test_vcaches_Len(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/core/ngt/usecase/agentd.go b/pkg/agent/core/ngt/usecase/agentd.go index dbd939453d..3137a1294e 100644 --- a/pkg/agent/core/ngt/usecase/agentd.go +++ b/pkg/agent/core/ngt/usecase/agentd.go @@ -19,7 +19,8 @@ package usecase import ( "context" - agent "github.com/vdaas/vald/apis/grpc/agent/core" + agent "github.com/vdaas/vald/apis/grpc/v1/agent/core" + vald "github.com/vdaas/vald/apis/grpc/v1/vald" iconf "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/net/grpc" @@ -76,17 +77,16 @@ func New(cfg *config.Data) (r runner.Runner, err error) { grpcServerOptions := []server.Option{ server.WithGRPCRegistFunc(func(srv *grpc.Server) { agent.RegisterAgentServer(srv, g) + vald.RegisterValdServer(srv, g) }), server.WithGRPCOption( grpc.ChainUnaryInterceptor(grpc.RecoverInterceptor()), grpc.ChainStreamInterceptor(grpc.RecoverStreamInterceptor()), ), server.WithPreStartFunc(func() error { - // TODO check unbackupped upstream return nil }), server.WithPreStopFunction(func() error { - // TODO backup all index data here return nil }), } @@ -130,7 +130,6 @@ func New(cfg *config.Data) (r runner.Runner, err error) { }), // TODO add GraphQL handler ) - if err != nil { return nil, err } diff --git a/pkg/agent/core/ngt/usecase/agentd_test.go b/pkg/agent/core/ngt/usecase/agentd_test.go index 12ec0d5eb3..d55a1b04f5 100644 --- a/pkg/agent/core/ngt/usecase/agentd_test.go +++ b/pkg/agent/core/ngt/usecase/agentd_test.go @@ -32,6 +32,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -84,8 +85,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -101,12 +104,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -177,8 +180,10 @@ func Test_run_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -201,12 +206,12 @@ func Test_run_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -281,8 +286,10 @@ func Test_run_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -305,12 +312,12 @@ func Test_run_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -381,8 +388,10 @@ func Test_run_PreStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -405,12 +414,12 @@ func Test_run_PreStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Stop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -481,8 +490,10 @@ func Test_run_Stop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -505,12 +516,12 @@ func Test_run_Stop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -581,8 +592,10 @@ func Test_run_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -605,7 +618,6 @@ func Test_run_PostStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/internal/metadata/metadata_test.go b/pkg/agent/internal/metadata/metadata_test.go new file mode 100644 index 0000000000..db16bd1e19 --- /dev/null +++ b/pkg/agent/internal/metadata/metadata_test.go @@ -0,0 +1,179 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package metadata provides agent metadata structs and info. +package metadata + +import ( + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestLoad(t *testing.T) { + t.Parallel() + type args struct { + path string + } + type want struct { + want *Metadata + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, *Metadata, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got *Metadata, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + path: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + path: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got, err := Load(test.args.path) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestStore(t *testing.T) { + t.Parallel() + type args struct { + path string + meta *Metadata + } + type want struct { + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + path: "", + meta: Metadata{}, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + path: "", + meta: Metadata{}, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + err := Store(test.args.path, test.args.meta) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/pkg/agent/sidecar/config/config_test.go b/pkg/agent/sidecar/config/config_test.go index f38b1e493b..837d3a8116 100644 --- a/pkg/agent/sidecar/config/config_test.go +++ b/pkg/agent/sidecar/config/config_test.go @@ -26,6 +26,7 @@ import ( ) func TestMode_String(t *testing.T) { + t.Parallel() type want struct { want string } @@ -65,9 +66,11 @@ func TestMode_String(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -82,12 +85,12 @@ func TestMode_String(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestSidecarMode(t *testing.T) { + t.Parallel() type args struct { m string } @@ -136,9 +139,11 @@ func TestSidecarMode(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -153,12 +158,12 @@ func TestSidecarMode(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestNewConfig(t *testing.T) { + t.Parallel() type args struct { path string } @@ -211,9 +216,11 @@ func TestNewConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -228,7 +235,6 @@ func TestNewConfig(t *testing.T) { if err := test.checkFunc(test.want, gotCfg, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/sidecar/handler/grpc/handler.go b/pkg/agent/sidecar/handler/grpc/handler.go index d990a01965..8df8a1a3fa 100644 --- a/pkg/agent/sidecar/handler/grpc/handler.go +++ b/pkg/agent/sidecar/handler/grpc/handler.go @@ -18,7 +18,7 @@ package grpc import ( - "github.com/vdaas/vald/apis/grpc/agent/sidecar" + "github.com/vdaas/vald/apis/grpc/v1/agent/sidecar" "github.com/vdaas/vald/pkg/agent/sidecar/service/observer" ) diff --git a/pkg/agent/sidecar/handler/grpc/handler_test.go b/pkg/agent/sidecar/handler/grpc/handler_test.go index 66fbc841de..30e34b7d8b 100644 --- a/pkg/agent/sidecar/handler/grpc/handler_test.go +++ b/pkg/agent/sidecar/handler/grpc/handler_test.go @@ -21,12 +21,13 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/agent/sidecar" + "github.com/vdaas/vald/apis/grpc/v1/agent/sidecar" "github.com/vdaas/vald/internal/errors" "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -75,9 +76,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -92,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/sidecar/handler/grpc/option.go b/pkg/agent/sidecar/handler/grpc/option.go index 99c4510a55..df83210211 100644 --- a/pkg/agent/sidecar/handler/grpc/option.go +++ b/pkg/agent/sidecar/handler/grpc/option.go @@ -21,9 +21,7 @@ import "github.com/vdaas/vald/pkg/agent/sidecar/service/observer" type Option func(*server) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithStorageObserver(so observer.StorageObserver) Option { return func(s *server) { diff --git a/pkg/agent/sidecar/handler/grpc/option_test.go b/pkg/agent/sidecar/handler/grpc/option_test.go index d53940b4d3..b900f5ca6d 100644 --- a/pkg/agent/sidecar/handler/grpc/option_test.go +++ b/pkg/agent/sidecar/handler/grpc/option_test.go @@ -25,6 +25,8 @@ import ( ) func TestWithStorageObserver(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { so observer.StorageObserver @@ -62,7 +64,7 @@ func TestWithStorageObserver(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -98,9 +100,11 @@ func TestWithStorageObserver(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -121,7 +125,7 @@ func TestWithStorageObserver(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -129,7 +133,7 @@ func TestWithStorageObserver(t *testing.T) { got := WithStorageObserver(test.args.so) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/agent/sidecar/handler/rest/handler.go b/pkg/agent/sidecar/handler/rest/handler.go index 5fb6c20df2..773d82226d 100644 --- a/pkg/agent/sidecar/handler/rest/handler.go +++ b/pkg/agent/sidecar/handler/rest/handler.go @@ -20,7 +20,7 @@ package rest import ( "net/http" - "github.com/vdaas/vald/apis/grpc/agent/sidecar" + "github.com/vdaas/vald/apis/grpc/v1/agent/sidecar" "github.com/vdaas/vald/internal/net/http/dump" "github.com/vdaas/vald/internal/net/http/json" ) diff --git a/pkg/agent/sidecar/handler/rest/handler_test.go b/pkg/agent/sidecar/handler/rest/handler_test.go index 732f20161b..d199a679c4 100644 --- a/pkg/agent/sidecar/handler/rest/handler_test.go +++ b/pkg/agent/sidecar/handler/rest/handler_test.go @@ -22,12 +22,13 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/agent/sidecar" + "github.com/vdaas/vald/apis/grpc/v1/agent/sidecar" "github.com/vdaas/vald/internal/errors" "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -76,9 +77,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -93,12 +96,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Index(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -164,9 +167,11 @@ func Test_handler_Index(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -184,7 +189,6 @@ func Test_handler_Index(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/sidecar/handler/rest/option.go b/pkg/agent/sidecar/handler/rest/option.go index 00d34db91e..b8185ac8ba 100644 --- a/pkg/agent/sidecar/handler/rest/option.go +++ b/pkg/agent/sidecar/handler/rest/option.go @@ -18,14 +18,12 @@ package rest import ( - "github.com/vdaas/vald/apis/grpc/agent/sidecar" + "github.com/vdaas/vald/apis/grpc/v1/agent/sidecar" ) type Option func(*handler) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithSidecar(sc sidecar.SidecarServer) Option { return func(h *handler) { diff --git a/pkg/agent/sidecar/handler/rest/option_test.go b/pkg/agent/sidecar/handler/rest/option_test.go index 214ec26cc5..b5924f1075 100644 --- a/pkg/agent/sidecar/handler/rest/option_test.go +++ b/pkg/agent/sidecar/handler/rest/option_test.go @@ -20,11 +20,13 @@ package rest import ( "testing" - "github.com/vdaas/vald/apis/grpc/agent/sidecar" + "github.com/vdaas/vald/apis/grpc/v1/agent/sidecar" "go.uber.org/goleak" ) func TestWithSidecar(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { sc sidecar.SidecarServer @@ -62,7 +64,7 @@ func TestWithSidecar(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -98,9 +100,11 @@ func TestWithSidecar(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -121,7 +125,7 @@ func TestWithSidecar(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -129,7 +133,7 @@ func TestWithSidecar(t *testing.T) { got := WithSidecar(test.args.sc) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/agent/sidecar/router/option.go b/pkg/agent/sidecar/router/option.go index 2ca5928d0c..d7892b3fe7 100644 --- a/pkg/agent/sidecar/router/option.go +++ b/pkg/agent/sidecar/router/option.go @@ -23,11 +23,9 @@ import ( type Option func(*router) -var ( - defaultOpts = []Option{ - WithTimeout("3s"), - } -) +var defaultOpts = []Option{ + WithTimeout("3s"), +} func WithHandler(h rest.Handler) Option { return func(r *router) { diff --git a/pkg/agent/sidecar/router/option_test.go b/pkg/agent/sidecar/router/option_test.go index 83f2dfae78..dae6876f62 100644 --- a/pkg/agent/sidecar/router/option_test.go +++ b/pkg/agent/sidecar/router/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/pkg/agent/sidecar/handler/rest" - "go.uber.org/goleak" ) func TestWithHandler(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { h rest.Handler @@ -63,7 +64,7 @@ func TestWithHandler(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithHandler(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithHandler(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithHandler(t *testing.T) { got := WithHandler(test.args.h) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -139,6 +142,8 @@ func TestWithHandler(t *testing.T) { } func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { timeout string @@ -176,7 +181,7 @@ func TestWithTimeout(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -212,9 +217,11 @@ func TestWithTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -235,7 +242,7 @@ func TestWithTimeout(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -243,7 +250,7 @@ func TestWithTimeout(t *testing.T) { got := WithTimeout(test.args.timeout) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/agent/sidecar/router/router.go b/pkg/agent/sidecar/router/router.go index a0a876b575..b808ce6683 100644 --- a/pkg/agent/sidecar/router/router.go +++ b/pkg/agent/sidecar/router/router.go @@ -29,7 +29,7 @@ type router struct { timeout string } -// New returns REST route&method information from handler interface +// New returns REST route&method information from handler interface. func New(opts ...Option) http.Handler { r := new(router) @@ -48,5 +48,6 @@ func New(opts ...Option) http.Handler { }, "/", h.Index, - }}...)) + }, + }...)) } diff --git a/pkg/agent/sidecar/router/router_test.go b/pkg/agent/sidecar/router/router_test.go index 25eaec0bef..97248787ae 100644 --- a/pkg/agent/sidecar/router/router_test.go +++ b/pkg/agent/sidecar/router/router_test.go @@ -23,9 +23,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -74,8 +76,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -90,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/sidecar/service/observer/observer.go b/pkg/agent/sidecar/service/observer/observer.go index a3609c4191..8b16dcf9f8 100644 --- a/pkg/agent/sidecar/service/observer/observer.go +++ b/pkg/agent/sidecar/service/observer/observer.go @@ -497,27 +497,27 @@ func (o *observer) backup(ctx context.Context) (err error) { return nil } - return func() error { - data, err := os.Open(file) - if err != nil { - return err + data, err := os.OpenFile(file, os.O_RDONLY, os.ModePerm) + if err != nil { + return err + } + defer func() { + e := data.Close() + if e != nil { + log.Errorf("failed to close %s: %s", file, e) } + }() - defer func() { - e := data.Close() - if e != nil { - log.Errorf("failed to close %s: %s", file, e) - } - }() - - d, err := ctxio.NewReaderWithContext(ctx, data) - if err != nil { - return err - } + d, err := ctxio.NewReaderWithContext(ctx, data) + if err != nil { + return err + } - _, err = io.Copy(tw, d) + _, err = io.Copy(tw, d) + if err != nil { return err - }() + } + return nil }) })) diff --git a/pkg/agent/sidecar/service/observer/observer_test.go b/pkg/agent/sidecar/service/observer/observer_test.go index 75d10965fa..4408c5b19f 100644 --- a/pkg/agent/sidecar/service/observer/observer_test.go +++ b/pkg/agent/sidecar/service/observer/observer_test.go @@ -31,6 +31,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -83,8 +84,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -100,12 +103,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotSo, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_observer_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -120,6 +123,7 @@ func Test_observer_Start(t *testing.T) { tickerEnabled bool storage storage.Storage ch chan struct{} + hooks []Hook } type want struct { want <-chan error @@ -162,6 +166,7 @@ func Test_observer_Start(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -187,6 +192,7 @@ func Test_observer_Start(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -195,8 +201,10 @@ func Test_observer_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -218,18 +226,19 @@ func Test_observer_Start(t *testing.T) { tickerEnabled: test.fields.tickerEnabled, storage: test.fields.storage, ch: test.fields.ch, + hooks: test.fields.hooks, } got, err := o.Start(test.args.ctx) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_observer_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -244,6 +253,7 @@ func Test_observer_PostStop(t *testing.T) { tickerEnabled bool storage storage.Storage ch chan struct{} + hooks []Hook } type want struct { err error @@ -282,6 +292,7 @@ func Test_observer_PostStop(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -307,6 +318,7 @@ func Test_observer_PostStop(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -315,8 +327,10 @@ func Test_observer_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -338,18 +352,19 @@ func Test_observer_PostStop(t *testing.T) { tickerEnabled: test.fields.tickerEnabled, storage: test.fields.storage, ch: test.fields.ch, + hooks: test.fields.hooks, } err := o.PostStop(test.args.ctx) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_observer_startTicker(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -364,6 +379,7 @@ func Test_observer_startTicker(t *testing.T) { tickerEnabled bool storage storage.Storage ch chan struct{} + hooks []Hook } type want struct { want <-chan error @@ -406,6 +422,7 @@ func Test_observer_startTicker(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -431,6 +448,7 @@ func Test_observer_startTicker(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -439,8 +457,10 @@ func Test_observer_startTicker(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -462,18 +482,19 @@ func Test_observer_startTicker(t *testing.T) { tickerEnabled: test.fields.tickerEnabled, storage: test.fields.storage, ch: test.fields.ch, + hooks: test.fields.hooks, } got, err := o.startTicker(test.args.ctx) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_observer_startBackupLoop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -488,6 +509,7 @@ func Test_observer_startBackupLoop(t *testing.T) { tickerEnabled bool storage storage.Storage ch chan struct{} + hooks []Hook } type want struct { want <-chan error @@ -530,6 +552,7 @@ func Test_observer_startBackupLoop(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -555,6 +578,7 @@ func Test_observer_startBackupLoop(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -563,8 +587,10 @@ func Test_observer_startBackupLoop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -586,18 +612,19 @@ func Test_observer_startBackupLoop(t *testing.T) { tickerEnabled: test.fields.tickerEnabled, storage: test.fields.storage, ch: test.fields.ch, + hooks: test.fields.hooks, } got, err := o.startBackupLoop(test.args.ctx) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_observer_onWrite(t *testing.T) { + t.Parallel() type args struct { ctx context.Context name string @@ -613,6 +640,7 @@ func Test_observer_onWrite(t *testing.T) { tickerEnabled bool storage storage.Storage ch chan struct{} + hooks []Hook } type want struct { err error @@ -652,6 +680,7 @@ func Test_observer_onWrite(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -678,6 +707,7 @@ func Test_observer_onWrite(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -686,8 +716,10 @@ func Test_observer_onWrite(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -709,18 +741,19 @@ func Test_observer_onWrite(t *testing.T) { tickerEnabled: test.fields.tickerEnabled, storage: test.fields.storage, ch: test.fields.ch, + hooks: test.fields.hooks, } err := o.onWrite(test.args.ctx, test.args.name) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_observer_onCreate(t *testing.T) { + t.Parallel() type args struct { ctx context.Context name string @@ -736,6 +769,7 @@ func Test_observer_onCreate(t *testing.T) { tickerEnabled bool storage storage.Storage ch chan struct{} + hooks []Hook } type want struct { err error @@ -775,6 +809,7 @@ func Test_observer_onCreate(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -801,6 +836,7 @@ func Test_observer_onCreate(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -809,8 +845,10 @@ func Test_observer_onCreate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -832,20 +870,19 @@ func Test_observer_onCreate(t *testing.T) { tickerEnabled: test.fields.tickerEnabled, storage: test.fields.storage, ch: test.fields.ch, + hooks: test.fields.hooks, } err := o.onCreate(test.args.ctx, test.args.name) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_observer_isValidMetadata(t *testing.T) { - type args struct { - } + t.Parallel() type fields struct { w watch.Watcher dir string @@ -857,6 +894,7 @@ func Test_observer_isValidMetadata(t *testing.T) { tickerEnabled bool storage storage.Storage ch chan struct{} + hooks []Hook } type want struct { want bool @@ -864,12 +902,11 @@ func Test_observer_isValidMetadata(t *testing.T) { } type test struct { name string - args args fields fields want want checkFunc func(want, bool, error) error - beforeFunc func(args) - afterFunc func(args) + beforeFunc func() + afterFunc func() } defaultCheckFunc := func(w want, got bool, err error) error { if !errors.Is(err, w.err) { @@ -885,8 +922,6 @@ func Test_observer_isValidMetadata(t *testing.T) { /* { name: "test_case_1", - args: args { - }, fields: fields { w: nil, dir: "", @@ -898,6 +933,7 @@ func Test_observer_isValidMetadata(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -909,8 +945,6 @@ func Test_observer_isValidMetadata(t *testing.T) { func() test { return test { name: "test_case_2", - args: args { - }, fields: fields { w: nil, dir: "", @@ -922,6 +956,7 @@ func Test_observer_isValidMetadata(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -930,14 +965,16 @@ func Test_observer_isValidMetadata(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { - test.beforeFunc(test.args) + test.beforeFunc() } if test.afterFunc != nil { - defer test.afterFunc(test.args) + defer test.afterFunc() } if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -953,18 +990,19 @@ func Test_observer_isValidMetadata(t *testing.T) { tickerEnabled: test.fields.tickerEnabled, storage: test.fields.storage, ch: test.fields.ch, + hooks: test.fields.hooks, } got, err := o.isValidMetadata() if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_observer_terminate(t *testing.T) { + t.Parallel() type fields struct { w watch.Watcher dir string @@ -976,6 +1014,7 @@ func Test_observer_terminate(t *testing.T) { tickerEnabled bool storage storage.Storage ch chan struct{} + hooks []Hook } type want struct { err error @@ -1010,6 +1049,7 @@ func Test_observer_terminate(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1032,6 +1072,7 @@ func Test_observer_terminate(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1040,8 +1081,10 @@ func Test_observer_terminate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -1063,18 +1106,19 @@ func Test_observer_terminate(t *testing.T) { tickerEnabled: test.fields.tickerEnabled, storage: test.fields.storage, ch: test.fields.ch, + hooks: test.fields.hooks, } err := o.terminate() if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_observer_requestBackup(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -1089,6 +1133,7 @@ func Test_observer_requestBackup(t *testing.T) { tickerEnabled bool storage storage.Storage ch chan struct{} + hooks []Hook } type want struct { err error @@ -1127,6 +1172,7 @@ func Test_observer_requestBackup(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1152,6 +1198,7 @@ func Test_observer_requestBackup(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1160,8 +1207,10 @@ func Test_observer_requestBackup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1183,18 +1232,19 @@ func Test_observer_requestBackup(t *testing.T) { tickerEnabled: test.fields.tickerEnabled, storage: test.fields.storage, ch: test.fields.ch, + hooks: test.fields.hooks, } err := o.requestBackup(test.args.ctx) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_observer_backup(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -1209,6 +1259,7 @@ func Test_observer_backup(t *testing.T) { tickerEnabled bool storage storage.Storage ch chan struct{} + hooks []Hook } type want struct { err error @@ -1247,6 +1298,7 @@ func Test_observer_backup(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1272,6 +1324,7 @@ func Test_observer_backup(t *testing.T) { tickerEnabled: false, storage: nil, ch: nil, + hooks: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1280,8 +1333,10 @@ func Test_observer_backup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1303,13 +1358,13 @@ func Test_observer_backup(t *testing.T) { tickerEnabled: test.fields.tickerEnabled, storage: test.fields.storage, ch: test.fields.ch, + hooks: test.fields.hooks, } err := o.backup(test.args.ctx) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/sidecar/service/observer/option.go b/pkg/agent/sidecar/service/observer/option.go index 3e27f803aa..4991eb6e89 100644 --- a/pkg/agent/sidecar/service/observer/option.go +++ b/pkg/agent/sidecar/service/observer/option.go @@ -28,15 +28,13 @@ import ( type Option func(o *observer) error -var ( - defaultOpts = []Option{ - WithErrGroup(errgroup.Get()), - WithBackupDuration("10m"), - WithPostStopTimeout("2m"), - WithWatch(true), - WithTicker(true), - } -) +var defaultOpts = []Option{ + WithErrGroup(errgroup.Get()), + WithBackupDuration("10m"), + WithPostStopTimeout("2m"), + WithWatch(true), + WithTicker(true), +} func WithBackupDuration(dur string) Option { return func(o *observer) error { diff --git a/pkg/agent/sidecar/service/observer/option_test.go b/pkg/agent/sidecar/service/observer/option_test.go index fa4497140d..7092c48377 100644 --- a/pkg/agent/sidecar/service/observer/option_test.go +++ b/pkg/agent/sidecar/service/observer/option_test.go @@ -26,6 +26,7 @@ import ( ) func TestWithBackupDuration(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -100,8 +101,10 @@ func TestWithBackupDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -140,6 +143,7 @@ func TestWithBackupDuration(t *testing.T) { } func TestWithPostStopTimeout(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -214,8 +218,10 @@ func TestWithPostStopTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -254,6 +260,7 @@ func TestWithPostStopTimeout(t *testing.T) { } func TestWithWatch(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -328,8 +335,10 @@ func TestWithWatch(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -368,6 +377,7 @@ func TestWithWatch(t *testing.T) { } func TestWithTicker(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -442,8 +452,10 @@ func TestWithTicker(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -482,6 +494,7 @@ func TestWithTicker(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -556,8 +569,10 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -596,6 +611,7 @@ func TestWithErrGroup(t *testing.T) { } func TestWithDir(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -670,8 +686,10 @@ func TestWithDir(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -710,6 +728,7 @@ func TestWithDir(t *testing.T) { } func TestWithBlobStorage(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -784,8 +803,10 @@ func TestWithBlobStorage(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -822,3 +843,120 @@ func TestWithBlobStorage(t *testing.T) { }) } } + +func TestWithHooks(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + hooks []Hook + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + hooks: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + hooks: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithHooks(test.args.hooks...) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithHooks(test.args.hooks...) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/agent/sidecar/service/restorer/option.go b/pkg/agent/sidecar/service/restorer/option.go index 5e62baa443..4597ad8383 100644 --- a/pkg/agent/sidecar/service/restorer/option.go +++ b/pkg/agent/sidecar/service/restorer/option.go @@ -25,12 +25,10 @@ import ( type Option func(r *restorer) error -var ( - defaultOpts = []Option{ - WithErrGroup(errgroup.Get()), - WithBackoff(false), - } -) +var defaultOpts = []Option{ + WithErrGroup(errgroup.Get()), + WithBackoff(false), +} func WithErrGroup(eg errgroup.Group) Option { return func(r *restorer) error { diff --git a/pkg/agent/sidecar/service/restorer/option_test.go b/pkg/agent/sidecar/service/restorer/option_test.go index 9b5b24619e..0e32e54a45 100644 --- a/pkg/agent/sidecar/service/restorer/option_test.go +++ b/pkg/agent/sidecar/service/restorer/option_test.go @@ -27,6 +27,7 @@ import ( ) func TestWithErrGroup(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -101,8 +102,10 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -141,6 +144,7 @@ func TestWithErrGroup(t *testing.T) { } func TestWithDir(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -215,8 +219,10 @@ func TestWithDir(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -255,6 +261,7 @@ func TestWithDir(t *testing.T) { } func TestWithBlobStorage(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -329,8 +336,10 @@ func TestWithBlobStorage(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -368,7 +377,125 @@ func TestWithBlobStorage(t *testing.T) { } } +func TestWithBackoff(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + enabled bool + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + enabled: false, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + enabled: false, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithBackoff(test.args.enabled) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithBackoff(test.args.enabled) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + func TestWithBackoffOpts(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -443,8 +570,10 @@ func TestWithBackoffOpts(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) diff --git a/pkg/agent/sidecar/service/restorer/restorer.go b/pkg/agent/sidecar/service/restorer/restorer.go index 7f84ee4326..9451e164b1 100644 --- a/pkg/agent/sidecar/service/restorer/restorer.go +++ b/pkg/agent/sidecar/service/restorer/restorer.go @@ -49,6 +49,7 @@ type restorer struct { backoffEnabled bool backoffOpts []backoff.Option + bo backoff.Backoff } func New(opts ...Option) (Restorer, error) { @@ -58,6 +59,9 @@ func New(opts ...Option) (Restorer, error) { return nil, errors.ErrOptionFailed(err, reflect.ValueOf(opt)) } } + if r.backoffEnabled { + r.bo = backoff.New(r.backoffOpts...) + } return r, nil } @@ -82,6 +86,9 @@ func (r *restorer) Start(ctx context.Context) (<-chan error, error) { r.eg.Go(safety.RecoverFunc(func() (err error) { defer close(ech) + if r.backoffEnabled { + defer r.bo.Close() + } for { select { @@ -112,27 +119,23 @@ func (r *restorer) startRestore(ctx context.Context) (<-chan error, error) { return ech, err } - restore := func() (interface{}, error) { + restore := func(ctx context.Context) (interface{}, bool, error) { err := r.restore(ctx) if err != nil { log.Errorf("restoring failed: %s", err) - - return nil, err + return nil, true, err } - return nil, nil + return nil, false, nil } r.eg.Go(safety.RecoverFunc(func() (err error) { defer close(ech) if r.backoffEnabled { - b := backoff.New(r.backoffOpts...) - defer b.Close() - - _, err = b.Do(ctx, restore) + _, err = r.bo.Do(ctx, restore) } else { - _, err = restore() + _, _, err = restore(ctx) } if err != nil { @@ -212,7 +215,7 @@ func (r *restorer) restore(ctx context.Context) (err error) { case tar.TypeDir: _, err = os.Stat(target) if err != nil { - err = os.MkdirAll(target, 0700) + err = os.MkdirAll(target, 0o700) if err != nil { return err } diff --git a/pkg/agent/sidecar/service/restorer/restorer_test.go b/pkg/agent/sidecar/service/restorer/restorer_test.go index e5fea23373..3b5c841b61 100644 --- a/pkg/agent/sidecar/service/restorer/restorer_test.go +++ b/pkg/agent/sidecar/service/restorer/restorer_test.go @@ -30,6 +30,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -82,8 +83,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -99,20 +102,22 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_restorer_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } type fields struct { - dir string - eg errgroup.Group - storage storage.Storage - backoffOpts []backoff.Option + dir string + eg errgroup.Group + storage storage.Storage + backoffEnabled bool + backoffOpts []backoff.Option + bo backoff.Backoff } type want struct { want <-chan error @@ -148,7 +153,9 @@ func Test_restorer_Start(t *testing.T) { dir: "", eg: nil, storage: nil, + backoffEnabled: false, backoffOpts: nil, + bo: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -167,7 +174,9 @@ func Test_restorer_Start(t *testing.T) { dir: "", eg: nil, storage: nil, + backoffEnabled: false, backoffOpts: nil, + bo: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -176,8 +185,10 @@ func Test_restorer_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -189,30 +200,34 @@ func Test_restorer_Start(t *testing.T) { test.checkFunc = defaultCheckFunc } r := &restorer{ - dir: test.fields.dir, - eg: test.fields.eg, - storage: test.fields.storage, - backoffOpts: test.fields.backoffOpts, + dir: test.fields.dir, + eg: test.fields.eg, + storage: test.fields.storage, + backoffEnabled: test.fields.backoffEnabled, + backoffOpts: test.fields.backoffOpts, + bo: test.fields.bo, } got, err := r.Start(test.args.ctx) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_restorer_startRestore(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } type fields struct { - dir string - eg errgroup.Group - storage storage.Storage - backoffOpts []backoff.Option + dir string + eg errgroup.Group + storage storage.Storage + backoffEnabled bool + backoffOpts []backoff.Option + bo backoff.Backoff } type want struct { want <-chan error @@ -248,7 +263,9 @@ func Test_restorer_startRestore(t *testing.T) { dir: "", eg: nil, storage: nil, + backoffEnabled: false, backoffOpts: nil, + bo: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -267,7 +284,9 @@ func Test_restorer_startRestore(t *testing.T) { dir: "", eg: nil, storage: nil, + backoffEnabled: false, backoffOpts: nil, + bo: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -276,8 +295,10 @@ func Test_restorer_startRestore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -289,30 +310,34 @@ func Test_restorer_startRestore(t *testing.T) { test.checkFunc = defaultCheckFunc } r := &restorer{ - dir: test.fields.dir, - eg: test.fields.eg, - storage: test.fields.storage, - backoffOpts: test.fields.backoffOpts, + dir: test.fields.dir, + eg: test.fields.eg, + storage: test.fields.storage, + backoffEnabled: test.fields.backoffEnabled, + backoffOpts: test.fields.backoffOpts, + bo: test.fields.bo, } got, err := r.startRestore(test.args.ctx) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_restorer_restore(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } type fields struct { - dir string - eg errgroup.Group - storage storage.Storage - backoffOpts []backoff.Option + dir string + eg errgroup.Group + storage storage.Storage + backoffEnabled bool + backoffOpts []backoff.Option + bo backoff.Backoff } type want struct { err error @@ -344,7 +369,9 @@ func Test_restorer_restore(t *testing.T) { dir: "", eg: nil, storage: nil, + backoffEnabled: false, backoffOpts: nil, + bo: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -363,7 +390,9 @@ func Test_restorer_restore(t *testing.T) { dir: "", eg: nil, storage: nil, + backoffEnabled: false, backoffOpts: nil, + bo: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -372,8 +401,10 @@ func Test_restorer_restore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -385,17 +416,18 @@ func Test_restorer_restore(t *testing.T) { test.checkFunc = defaultCheckFunc } r := &restorer{ - dir: test.fields.dir, - eg: test.fields.eg, - storage: test.fields.storage, - backoffOpts: test.fields.backoffOpts, + dir: test.fields.dir, + eg: test.fields.eg, + storage: test.fields.storage, + backoffEnabled: test.fields.backoffEnabled, + backoffOpts: test.fields.backoffOpts, + bo: test.fields.bo, } err := r.restore(test.args.ctx) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/sidecar/service/storage/option.go b/pkg/agent/sidecar/service/storage/option.go index 63a0fb7798..b693e76fd0 100644 --- a/pkg/agent/sidecar/service/storage/option.go +++ b/pkg/agent/sidecar/service/storage/option.go @@ -25,14 +25,12 @@ import ( type Option func(b *bs) error -var ( - defaultOpts = []Option{ - WithErrGroup(errgroup.Get()), - WithCompressAlgorithm("gzip"), - WithCompressionLevel(-1), - WithFilenameSuffix(".tar.gz"), - } -) +var defaultOpts = []Option{ + WithErrGroup(errgroup.Get()), + WithCompressAlgorithm("gzip"), + WithCompressionLevel(-1), + WithFilenameSuffix(".tar.gz"), +} func WithErrGroup(eg errgroup.Group) Option { return func(b *bs) error { diff --git a/pkg/agent/sidecar/service/storage/option_test.go b/pkg/agent/sidecar/service/storage/option_test.go index b97f2eded6..31104e3243 100644 --- a/pkg/agent/sidecar/service/storage/option_test.go +++ b/pkg/agent/sidecar/service/storage/option_test.go @@ -27,6 +27,7 @@ import ( ) func TestWithErrGroup(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -101,8 +102,10 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -141,6 +144,7 @@ func TestWithErrGroup(t *testing.T) { } func TestWithType(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -215,8 +219,10 @@ func TestWithType(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -255,6 +261,7 @@ func TestWithType(t *testing.T) { } func TestWithBucketName(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -329,8 +336,10 @@ func TestWithBucketName(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -369,6 +378,7 @@ func TestWithBucketName(t *testing.T) { } func TestWithFilename(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -443,8 +453,10 @@ func TestWithFilename(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -483,6 +495,7 @@ func TestWithFilename(t *testing.T) { } func TestWithFilenameSuffix(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -557,8 +570,10 @@ func TestWithFilenameSuffix(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -597,6 +612,7 @@ func TestWithFilenameSuffix(t *testing.T) { } func TestWithS3Opts(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -671,8 +687,10 @@ func TestWithS3Opts(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -711,6 +729,7 @@ func TestWithS3Opts(t *testing.T) { } func TestWithS3SessionOpts(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -785,8 +804,10 @@ func TestWithS3SessionOpts(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -825,6 +846,7 @@ func TestWithS3SessionOpts(t *testing.T) { } func TestWithCompressAlgorithm(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -899,8 +921,10 @@ func TestWithCompressAlgorithm(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -939,6 +963,7 @@ func TestWithCompressAlgorithm(t *testing.T) { } func TestWithCompressionLevel(t *testing.T) { + t.Parallel() // Change interface type to the type of object you are testing type T = interface{} type args struct { @@ -1013,8 +1038,10 @@ func TestWithCompressionLevel(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) diff --git a/pkg/agent/sidecar/service/storage/storage_test.go b/pkg/agent/sidecar/service/storage/storage_test.go index 29eb1ef4b5..895f883dd1 100644 --- a/pkg/agent/sidecar/service/storage/storage_test.go +++ b/pkg/agent/sidecar/service/storage/storage_test.go @@ -33,6 +33,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -85,8 +86,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -102,12 +105,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_bs_initCompressor(t *testing.T) { + t.Parallel() type fields struct { eg errgroup.Group storageType string @@ -186,8 +189,10 @@ func Test_bs_initCompressor(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -216,12 +221,12 @@ func Test_bs_initCompressor(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_bs_initBucket(t *testing.T) { + t.Parallel() type fields struct { eg errgroup.Group storageType string @@ -300,8 +305,10 @@ func Test_bs_initBucket(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -330,12 +337,12 @@ func Test_bs_initBucket(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_bs_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -428,8 +435,10 @@ func Test_bs_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -458,12 +467,12 @@ func Test_bs_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_bs_Reader(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -556,8 +565,10 @@ func Test_bs_Reader(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -586,12 +597,12 @@ func Test_bs_Reader(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_bs_Writer(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -684,8 +695,10 @@ func Test_bs_Writer(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -714,7 +727,122 @@ func Test_bs_Writer(t *testing.T) { if err := test.checkFunc(test.want, gotW, err); err != nil { tt.Errorf("error = %v", err) } + }) + } +} + +func Test_bs_StorageInfo(t *testing.T) { + t.Parallel() + type fields struct { + eg errgroup.Group + storageType string + bucketName string + filename string + suffix string + s3Opts []s3.Option + s3SessionOpts []session.Option + compressAlgorithm string + compressionLevel int + bucket blob.Bucket + compressor compress.Compressor + } + type want struct { + want *StorageInfo + } + type test struct { + name string + fields fields + want want + checkFunc func(want, *StorageInfo) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got *StorageInfo) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + fields: fields { + eg: nil, + storageType: "", + bucketName: "", + filename: "", + suffix: "", + s3Opts: nil, + s3SessionOpts: nil, + compressAlgorithm: "", + compressionLevel: 0, + bucket: nil, + compressor: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + fields: fields { + eg: nil, + storageType: "", + bucketName: "", + filename: "", + suffix: "", + s3Opts: nil, + s3SessionOpts: nil, + compressAlgorithm: "", + compressionLevel: 0, + bucket: nil, + compressor: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + b := &bs{ + eg: test.fields.eg, + storageType: test.fields.storageType, + bucketName: test.fields.bucketName, + filename: test.fields.filename, + suffix: test.fields.suffix, + s3Opts: test.fields.s3Opts, + s3SessionOpts: test.fields.s3SessionOpts, + compressAlgorithm: test.fields.compressAlgorithm, + compressionLevel: test.fields.compressionLevel, + bucket: test.fields.bucket, + compressor: test.fields.compressor, + } + got := b.StorageInfo() + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } }) } } diff --git a/pkg/agent/sidecar/usecase/initcontainer/initcontainer.go b/pkg/agent/sidecar/usecase/initcontainer/initcontainer.go index 4a23985a07..6d00fbe083 100644 --- a/pkg/agent/sidecar/usecase/initcontainer/initcontainer.go +++ b/pkg/agent/sidecar/usecase/initcontainer/initcontainer.go @@ -19,7 +19,7 @@ package initcontainer import ( "context" - "github.com/vdaas/vald/apis/grpc/agent/sidecar" + "github.com/vdaas/vald/apis/grpc/v1/agent/sidecar" iconf "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/db/storage/blob/s3" "github.com/vdaas/vald/internal/db/storage/blob/s3/session" diff --git a/pkg/agent/sidecar/usecase/initcontainer/initcontainer_test.go b/pkg/agent/sidecar/usecase/initcontainer/initcontainer_test.go index 33e4061542..f329f97c52 100644 --- a/pkg/agent/sidecar/usecase/initcontainer/initcontainer_test.go +++ b/pkg/agent/sidecar/usecase/initcontainer/initcontainer_test.go @@ -32,6 +32,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -84,8 +85,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -101,12 +104,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -177,8 +180,10 @@ func Test_run_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -201,12 +206,12 @@ func Test_run_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -281,8 +286,10 @@ func Test_run_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -305,12 +312,12 @@ func Test_run_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -381,8 +388,10 @@ func Test_run_PreStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -405,12 +414,12 @@ func Test_run_PreStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Stop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -481,8 +490,10 @@ func Test_run_Stop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -505,12 +516,12 @@ func Test_run_Stop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -581,8 +592,10 @@ func Test_run_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -605,7 +618,6 @@ func Test_run_PostStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/sidecar/usecase/sidecar/sidecar.go b/pkg/agent/sidecar/usecase/sidecar/sidecar.go index c8a9c9c1bf..adb93fe303 100644 --- a/pkg/agent/sidecar/usecase/sidecar/sidecar.go +++ b/pkg/agent/sidecar/usecase/sidecar/sidecar.go @@ -19,7 +19,7 @@ package sidecar import ( "context" - "github.com/vdaas/vald/apis/grpc/agent/sidecar" + "github.com/vdaas/vald/apis/grpc/v1/agent/sidecar" iconf "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/db/storage/blob/s3" "github.com/vdaas/vald/internal/db/storage/blob/s3/session" diff --git a/pkg/agent/sidecar/usecase/sidecar/sidecar_test.go b/pkg/agent/sidecar/usecase/sidecar/sidecar_test.go index 919ab8c90a..bbf2e3a2e2 100644 --- a/pkg/agent/sidecar/usecase/sidecar/sidecar_test.go +++ b/pkg/agent/sidecar/usecase/sidecar/sidecar_test.go @@ -32,6 +32,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -84,8 +85,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -101,12 +104,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -177,8 +180,10 @@ func Test_run_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -201,12 +206,12 @@ func Test_run_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -281,8 +286,10 @@ func Test_run_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -305,12 +312,12 @@ func Test_run_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -381,8 +388,10 @@ func Test_run_PreStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -405,12 +414,12 @@ func Test_run_PreStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Stop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -481,8 +490,10 @@ func Test_run_Stop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -505,12 +516,12 @@ func Test_run_Stop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -581,8 +592,10 @@ func Test_run_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -605,7 +618,6 @@ func Test_run_PostStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/agent/sidecar/usecase/sidecard_test.go b/pkg/agent/sidecar/usecase/sidecard_test.go index 034bbe9b25..74ab1437b6 100644 --- a/pkg/agent/sidecar/usecase/sidecard_test.go +++ b/pkg/agent/sidecar/usecase/sidecard_test.go @@ -27,6 +27,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -79,9 +80,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -96,7 +99,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/discoverer/k8s/config/config_test.go b/pkg/discoverer/k8s/config/config_test.go index eba18af56a..e90809127a 100644 --- a/pkg/discoverer/k8s/config/config_test.go +++ b/pkg/discoverer/k8s/config/config_test.go @@ -26,6 +26,7 @@ import ( ) func TestNewConfig(t *testing.T) { + t.Parallel() type args struct { path string } @@ -78,9 +79,11 @@ func TestNewConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -95,7 +98,6 @@ func TestNewConfig(t *testing.T) { if err := test.checkFunc(test.want, gotCfg, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/discoverer/k8s/handler/grpc/handler.go b/pkg/discoverer/k8s/handler/grpc/handler.go index 4e1838047c..ca200ff350 100644 --- a/pkg/discoverer/k8s/handler/grpc/handler.go +++ b/pkg/discoverer/k8s/handler/grpc/handler.go @@ -22,8 +22,8 @@ import ( "fmt" "strings" - "github.com/vdaas/vald/apis/grpc/discoverer" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/discoverer" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc/proto" diff --git a/pkg/discoverer/k8s/handler/grpc/handler_test.go b/pkg/discoverer/k8s/handler/grpc/handler_test.go index fd674da4f8..67fb33b4cd 100644 --- a/pkg/discoverer/k8s/handler/grpc/handler_test.go +++ b/pkg/discoverer/k8s/handler/grpc/handler_test.go @@ -22,15 +22,15 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/singleflight" "github.com/vdaas/vald/pkg/discoverer/k8s/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -83,9 +83,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -100,12 +102,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotDs, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -163,9 +165,11 @@ func Test_server_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -189,6 +193,7 @@ func Test_server_Start(t *testing.T) { } func Test_server_Pods(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Discoverer_Request @@ -257,9 +262,11 @@ func Test_server_Pods(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -278,12 +285,12 @@ func Test_server_Pods(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Nodes(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Discoverer_Request @@ -352,9 +359,11 @@ func Test_server_Nodes(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -373,12 +382,12 @@ func Test_server_Nodes(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_singleflightKey(t *testing.T) { + t.Parallel() type args struct { pref string req *payload.Discoverer_Request @@ -430,9 +439,11 @@ func Test_singleflightKey(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -447,7 +458,6 @@ func Test_singleflightKey(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/discoverer/k8s/handler/grpc/option.go b/pkg/discoverer/k8s/handler/grpc/option.go index 4a4607671b..e706e0cba1 100644 --- a/pkg/discoverer/k8s/handler/grpc/option.go +++ b/pkg/discoverer/k8s/handler/grpc/option.go @@ -23,9 +23,7 @@ import ( type Option func(*server) error -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithDiscoverer(dsc service.Discoverer) Option { return func(s *server) error { diff --git a/pkg/discoverer/k8s/handler/grpc/option_test.go b/pkg/discoverer/k8s/handler/grpc/option_test.go index 073a0fc147..2ff4b254e8 100644 --- a/pkg/discoverer/k8s/handler/grpc/option_test.go +++ b/pkg/discoverer/k8s/handler/grpc/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/pkg/discoverer/k8s/service" - "go.uber.org/goleak" ) func TestWithDiscoverer(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dsc service.Discoverer @@ -63,7 +64,7 @@ func TestWithDiscoverer(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithDiscoverer(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithDiscoverer(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithDiscoverer(t *testing.T) { got := WithDiscoverer(test.args.dsc) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/discoverer/k8s/handler/rest/handler.go b/pkg/discoverer/k8s/handler/rest/handler.go index 4d39d49708..ea5b1c79a1 100644 --- a/pkg/discoverer/k8s/handler/rest/handler.go +++ b/pkg/discoverer/k8s/handler/rest/handler.go @@ -20,8 +20,8 @@ package rest import ( "net/http" - "github.com/vdaas/vald/apis/grpc/discoverer" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/discoverer" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/net/http/dump" "github.com/vdaas/vald/internal/net/http/json" ) diff --git a/pkg/discoverer/k8s/handler/rest/handler_test.go b/pkg/discoverer/k8s/handler/rest/handler_test.go index ddc4e95f90..1f2ae5445b 100644 --- a/pkg/discoverer/k8s/handler/rest/handler_test.go +++ b/pkg/discoverer/k8s/handler/rest/handler_test.go @@ -22,13 +22,13 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/discoverer" + "github.com/vdaas/vald/apis/grpc/v1/discoverer" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -77,9 +77,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -94,12 +96,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Index(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -165,9 +167,11 @@ func Test_handler_Index(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -185,12 +189,12 @@ func Test_handler_Index(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Pods(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -256,9 +260,11 @@ func Test_handler_Pods(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -276,12 +282,12 @@ func Test_handler_Pods(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Nodes(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -347,9 +353,11 @@ func Test_handler_Nodes(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,7 +375,6 @@ func Test_handler_Nodes(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/discoverer/k8s/handler/rest/option.go b/pkg/discoverer/k8s/handler/rest/option.go index 87f2bdc710..c66e45b26f 100644 --- a/pkg/discoverer/k8s/handler/rest/option.go +++ b/pkg/discoverer/k8s/handler/rest/option.go @@ -17,13 +17,11 @@ // Package rest provides rest api logic package rest -import "github.com/vdaas/vald/apis/grpc/discoverer" +import "github.com/vdaas/vald/apis/grpc/v1/discoverer" type Option func(*handler) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithDiscoverer(dsc discoverer.DiscovererServer) Option { return func(h *handler) { diff --git a/pkg/discoverer/k8s/handler/rest/option_test.go b/pkg/discoverer/k8s/handler/rest/option_test.go index 2053297512..78961baaf0 100644 --- a/pkg/discoverer/k8s/handler/rest/option_test.go +++ b/pkg/discoverer/k8s/handler/rest/option_test.go @@ -20,12 +20,13 @@ package rest import ( "testing" - "github.com/vdaas/vald/apis/grpc/discoverer" - + "github.com/vdaas/vald/apis/grpc/v1/discoverer" "go.uber.org/goleak" ) func TestWithDiscoverer(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dsc discoverer.DiscovererServer @@ -63,7 +64,7 @@ func TestWithDiscoverer(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithDiscoverer(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithDiscoverer(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithDiscoverer(t *testing.T) { got := WithDiscoverer(test.args.dsc) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/discoverer/k8s/router/option.go b/pkg/discoverer/k8s/router/option.go index c287e04dee..8433522322 100644 --- a/pkg/discoverer/k8s/router/option.go +++ b/pkg/discoverer/k8s/router/option.go @@ -24,11 +24,9 @@ import ( type Option func(*router) -var ( - defaultOpts = []Option{ - WithTimeout("3s"), - } -) +var defaultOpts = []Option{ + WithTimeout("3s"), +} func WithHandler(h rest.Handler) Option { return func(r *router) { diff --git a/pkg/discoverer/k8s/router/option_test.go b/pkg/discoverer/k8s/router/option_test.go index 54bd662271..ee14a8e59c 100644 --- a/pkg/discoverer/k8s/router/option_test.go +++ b/pkg/discoverer/k8s/router/option_test.go @@ -22,11 +22,12 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/pkg/discoverer/k8s/handler/rest" - "go.uber.org/goleak" ) func TestWithHandler(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { h rest.Handler @@ -64,7 +65,7 @@ func TestWithHandler(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -100,9 +101,11 @@ func TestWithHandler(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -123,7 +126,7 @@ func TestWithHandler(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -131,7 +134,7 @@ func TestWithHandler(t *testing.T) { got := WithHandler(test.args.h) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -140,6 +143,8 @@ func TestWithHandler(t *testing.T) { } func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { timeout string @@ -177,7 +182,7 @@ func TestWithTimeout(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -213,9 +218,11 @@ func TestWithTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -236,7 +243,7 @@ func TestWithTimeout(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -244,7 +251,7 @@ func TestWithTimeout(t *testing.T) { got := WithTimeout(test.args.timeout) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -253,6 +260,8 @@ func TestWithTimeout(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -290,7 +299,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -326,9 +335,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -349,7 +360,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -357,7 +368,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/discoverer/k8s/router/router.go b/pkg/discoverer/k8s/router/router.go index 10affe1b2a..26fc03d482 100644 --- a/pkg/discoverer/k8s/router/router.go +++ b/pkg/discoverer/k8s/router/router.go @@ -32,7 +32,7 @@ type router struct { timeout string } -// New returns REST route&method information from handler interface +// New returns REST route&method information from handler interface. func New(opts ...Option) http.Handler { r := new(router) diff --git a/pkg/discoverer/k8s/router/router_test.go b/pkg/discoverer/k8s/router/router_test.go index 25eaec0bef..97248787ae 100644 --- a/pkg/discoverer/k8s/router/router_test.go +++ b/pkg/discoverer/k8s/router/router_test.go @@ -23,9 +23,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -74,8 +76,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -90,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/discoverer/k8s/service/discover.go b/pkg/discoverer/k8s/service/discover.go index 38840a61d3..997a8090ef 100644 --- a/pkg/discoverer/k8s/service/discover.go +++ b/pkg/discoverer/k8s/service/discover.go @@ -25,7 +25,7 @@ import ( "sync/atomic" "time" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/k8s" @@ -355,7 +355,6 @@ func (d *discoverer) Start(ctx context.Context) (<-chan error, error) { ech <- err } } - } })) return ech, nil diff --git a/pkg/discoverer/k8s/service/discover_test.go b/pkg/discoverer/k8s/service/discover_test.go index dc3d3e8177..a4072eaa0a 100644 --- a/pkg/discoverer/k8s/service/discover_test.go +++ b/pkg/discoverer/k8s/service/discover_test.go @@ -24,15 +24,15 @@ import ( "testing" "time" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/k8s" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -85,9 +85,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -102,12 +104,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotDsc, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_discoverer_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -209,9 +211,11 @@ func Test_discoverer_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -242,12 +246,12 @@ func Test_discoverer_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_discoverer_GetPods(t *testing.T) { + t.Parallel() type args struct { req *payload.Discoverer_Request } @@ -349,9 +353,11 @@ func Test_discoverer_GetPods(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -382,12 +388,12 @@ func Test_discoverer_GetPods(t *testing.T) { if err := test.checkFunc(test.want, gotPods, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_discoverer_GetNodes(t *testing.T) { + t.Parallel() type args struct { req *payload.Discoverer_Request } @@ -489,9 +495,11 @@ func Test_discoverer_GetNodes(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -522,7 +530,6 @@ func Test_discoverer_GetNodes(t *testing.T) { if err := test.checkFunc(test.want, gotNodes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/discoverer/k8s/service/nodemap_test.go b/pkg/discoverer/k8s/service/nodemap_test.go index fc8a00c48c..3eee2a1ef5 100644 --- a/pkg/discoverer/k8s/service/nodemap_test.go +++ b/pkg/discoverer/k8s/service/nodemap_test.go @@ -25,9 +25,11 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/k8s/node" + "go.uber.org/goleak" ) func Test_newEntryNodeMap(t *testing.T) { + t.Parallel() type args struct { i node.Node } @@ -76,8 +78,11 @@ func Test_newEntryNodeMap(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -92,12 +97,12 @@ func Test_newEntryNodeMap(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMap_Load(t *testing.T) { + t.Parallel() type args struct { key string } @@ -169,8 +174,11 @@ func Test_nodeMap_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -191,12 +199,12 @@ func Test_nodeMap_Load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMap_load(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -249,8 +257,11 @@ func Test_entryNodeMap_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -268,12 +279,12 @@ func Test_entryNodeMap_load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMap_Store(t *testing.T) { + t.Parallel() type args struct { key string value node.Node @@ -340,8 +351,11 @@ func Test_nodeMap_Store(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,6 +381,7 @@ func Test_nodeMap_Store(t *testing.T) { } func Test_entryNodeMap_tryStore(t *testing.T) { + t.Parallel() type args struct { i *node.Node } @@ -425,8 +440,11 @@ func Test_entryNodeMap_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -444,12 +462,12 @@ func Test_entryNodeMap_tryStore(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMap_unexpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -498,8 +516,11 @@ func Test_entryNodeMap_unexpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -517,12 +538,12 @@ func Test_entryNodeMap_unexpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotWasExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMap_storeLocked(t *testing.T) { + t.Parallel() type args struct { i *node.Node } @@ -577,8 +598,11 @@ func Test_entryNodeMap_storeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -601,6 +625,7 @@ func Test_entryNodeMap_storeLocked(t *testing.T) { } func Test_nodeMap_LoadOrStore(t *testing.T) { + t.Parallel() type args struct { key string value node.Node @@ -675,8 +700,11 @@ func Test_nodeMap_LoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -697,12 +725,12 @@ func Test_nodeMap_LoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMap_tryLoadOrStore(t *testing.T) { + t.Parallel() type args struct { i node.Node } @@ -769,8 +797,11 @@ func Test_entryNodeMap_tryLoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -788,12 +819,12 @@ func Test_entryNodeMap_tryLoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMap_Delete(t *testing.T) { + t.Parallel() type args struct { key string } @@ -857,8 +888,11 @@ func Test_nodeMap_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -884,6 +918,7 @@ func Test_nodeMap_Delete(t *testing.T) { } func Test_entryNodeMap_delete(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -932,8 +967,11 @@ func Test_entryNodeMap_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -951,12 +989,12 @@ func Test_entryNodeMap_delete(t *testing.T) { if err := test.checkFunc(test.want, gotHadValue); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMap_Range(t *testing.T) { + t.Parallel() type args struct { f func(key string, value node.Node) bool } @@ -1020,8 +1058,11 @@ func Test_nodeMap_Range(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1047,6 +1088,7 @@ func Test_nodeMap_Range(t *testing.T) { } func Test_nodeMap_missLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1100,8 +1142,11 @@ func Test_nodeMap_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1127,6 +1172,7 @@ func Test_nodeMap_missLocked(t *testing.T) { } func Test_nodeMap_dirtyLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1180,8 +1226,11 @@ func Test_nodeMap_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1207,6 +1256,7 @@ func Test_nodeMap_dirtyLocked(t *testing.T) { } func Test_entryNodeMap_tryExpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -1255,8 +1305,11 @@ func Test_entryNodeMap_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1274,7 +1327,6 @@ func Test_entryNodeMap_tryExpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotIsExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/discoverer/k8s/service/nodemetricsmap_test.go b/pkg/discoverer/k8s/service/nodemetricsmap_test.go index 1cc6ae0368..0be14b3cdf 100644 --- a/pkg/discoverer/k8s/service/nodemetricsmap_test.go +++ b/pkg/discoverer/k8s/service/nodemetricsmap_test.go @@ -25,9 +25,11 @@ import ( "github.com/vdaas/vald/internal/errors" mnode "github.com/vdaas/vald/internal/k8s/metrics/node" + "go.uber.org/goleak" ) func Test_newEntryNodeMetricsMap(t *testing.T) { + t.Parallel() type args struct { i mnode.Node } @@ -76,8 +78,11 @@ func Test_newEntryNodeMetricsMap(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -92,12 +97,12 @@ func Test_newEntryNodeMetricsMap(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMetricsMap_Load(t *testing.T) { + t.Parallel() type args struct { key string } @@ -169,8 +174,11 @@ func Test_nodeMetricsMap_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -191,12 +199,12 @@ func Test_nodeMetricsMap_Load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMetricsMap_load(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -249,8 +257,11 @@ func Test_entryNodeMetricsMap_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -268,12 +279,12 @@ func Test_entryNodeMetricsMap_load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMetricsMap_Store(t *testing.T) { + t.Parallel() type args struct { key string value mnode.Node @@ -340,8 +351,11 @@ func Test_nodeMetricsMap_Store(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,6 +381,7 @@ func Test_nodeMetricsMap_Store(t *testing.T) { } func Test_entryNodeMetricsMap_tryStore(t *testing.T) { + t.Parallel() type args struct { i *mnode.Node } @@ -425,8 +440,11 @@ func Test_entryNodeMetricsMap_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -444,12 +462,12 @@ func Test_entryNodeMetricsMap_tryStore(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMetricsMap_unexpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -498,8 +516,11 @@ func Test_entryNodeMetricsMap_unexpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -517,12 +538,12 @@ func Test_entryNodeMetricsMap_unexpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotWasExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMetricsMap_storeLocked(t *testing.T) { + t.Parallel() type args struct { i *mnode.Node } @@ -577,8 +598,11 @@ func Test_entryNodeMetricsMap_storeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -601,6 +625,7 @@ func Test_entryNodeMetricsMap_storeLocked(t *testing.T) { } func Test_nodeMetricsMap_LoadOrStore(t *testing.T) { + t.Parallel() type args struct { key string value mnode.Node @@ -675,8 +700,11 @@ func Test_nodeMetricsMap_LoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -697,12 +725,12 @@ func Test_nodeMetricsMap_LoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMetricsMap_tryLoadOrStore(t *testing.T) { + t.Parallel() type args struct { i mnode.Node } @@ -769,8 +797,11 @@ func Test_entryNodeMetricsMap_tryLoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -788,12 +819,12 @@ func Test_entryNodeMetricsMap_tryLoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMetricsMap_Delete(t *testing.T) { + t.Parallel() type args struct { key string } @@ -857,8 +888,11 @@ func Test_nodeMetricsMap_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -884,6 +918,7 @@ func Test_nodeMetricsMap_Delete(t *testing.T) { } func Test_entryNodeMetricsMap_delete(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -932,8 +967,11 @@ func Test_entryNodeMetricsMap_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -951,12 +989,12 @@ func Test_entryNodeMetricsMap_delete(t *testing.T) { if err := test.checkFunc(test.want, gotHadValue); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMetricsMap_Range(t *testing.T) { + t.Parallel() type args struct { f func(key string, value mnode.Node) bool } @@ -1020,8 +1058,11 @@ func Test_nodeMetricsMap_Range(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1047,6 +1088,7 @@ func Test_nodeMetricsMap_Range(t *testing.T) { } func Test_nodeMetricsMap_missLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1100,8 +1142,11 @@ func Test_nodeMetricsMap_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1127,6 +1172,7 @@ func Test_nodeMetricsMap_missLocked(t *testing.T) { } func Test_nodeMetricsMap_dirtyLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1180,8 +1226,11 @@ func Test_nodeMetricsMap_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1207,6 +1256,7 @@ func Test_nodeMetricsMap_dirtyLocked(t *testing.T) { } func Test_entryNodeMetricsMap_tryExpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -1255,8 +1305,11 @@ func Test_entryNodeMetricsMap_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1274,7 +1327,6 @@ func Test_entryNodeMetricsMap_tryExpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotIsExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/discoverer/k8s/service/option.go b/pkg/discoverer/k8s/service/option.go index 5cd1cf7959..cb9ebd4dfe 100644 --- a/pkg/discoverer/k8s/service/option.go +++ b/pkg/discoverer/k8s/service/option.go @@ -26,12 +26,10 @@ import ( type Option func(d *discoverer) error -var ( - defaultOpts = []Option{ - WithDiscoverDuration("2s"), - WithErrGroup(errgroup.Get()), - } -) +var defaultOpts = []Option{ + WithDiscoverDuration("2s"), + WithErrGroup(errgroup.Get()), +} func WithName(name string) Option { return func(d *discoverer) error { diff --git a/pkg/discoverer/k8s/service/option_test.go b/pkg/discoverer/k8s/service/option_test.go index b849099162..5f4320f9a5 100644 --- a/pkg/discoverer/k8s/service/option_test.go +++ b/pkg/discoverer/k8s/service/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/internal/errgroup" - "go.uber.org/goleak" ) func TestWithName(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { name string @@ -63,7 +64,7 @@ func TestWithName(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithName(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithName(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithName(t *testing.T) { got := WithName(test.args.name) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -139,6 +142,8 @@ func TestWithName(t *testing.T) { } func TestWithNamespace(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { ns string @@ -176,7 +181,7 @@ func TestWithNamespace(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -212,9 +217,11 @@ func TestWithNamespace(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -235,7 +242,7 @@ func TestWithNamespace(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -243,7 +250,7 @@ func TestWithNamespace(t *testing.T) { got := WithNamespace(test.args.ns) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -252,6 +259,8 @@ func TestWithNamespace(t *testing.T) { } func TestWithDiscoverDuration(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -289,7 +298,7 @@ func TestWithDiscoverDuration(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -325,9 +334,11 @@ func TestWithDiscoverDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -348,7 +359,7 @@ func TestWithDiscoverDuration(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -356,7 +367,7 @@ func TestWithDiscoverDuration(t *testing.T) { got := WithDiscoverDuration(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -365,6 +376,8 @@ func TestWithDiscoverDuration(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -402,7 +415,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -438,9 +451,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -461,7 +476,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -469,7 +484,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/discoverer/k8s/service/podmetricsmap_test.go b/pkg/discoverer/k8s/service/podmetricsmap_test.go index 3c224e71ca..a212f80753 100644 --- a/pkg/discoverer/k8s/service/podmetricsmap_test.go +++ b/pkg/discoverer/k8s/service/podmetricsmap_test.go @@ -25,9 +25,11 @@ import ( "github.com/vdaas/vald/internal/errors" mpod "github.com/vdaas/vald/internal/k8s/metrics/pod" + "go.uber.org/goleak" ) func Test_newEntryPodMetricsMap(t *testing.T) { + t.Parallel() type args struct { i mpod.Pod } @@ -76,8 +78,11 @@ func Test_newEntryPodMetricsMap(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -92,12 +97,12 @@ func Test_newEntryPodMetricsMap(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podMetricsMap_Load(t *testing.T) { + t.Parallel() type args struct { key string } @@ -169,8 +174,11 @@ func Test_podMetricsMap_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -191,12 +199,12 @@ func Test_podMetricsMap_Load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodMetricsMap_load(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -249,8 +257,11 @@ func Test_entryPodMetricsMap_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -268,12 +279,12 @@ func Test_entryPodMetricsMap_load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podMetricsMap_Store(t *testing.T) { + t.Parallel() type args struct { key string value mpod.Pod @@ -340,8 +351,11 @@ func Test_podMetricsMap_Store(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,6 +381,7 @@ func Test_podMetricsMap_Store(t *testing.T) { } func Test_entryPodMetricsMap_tryStore(t *testing.T) { + t.Parallel() type args struct { i *mpod.Pod } @@ -425,8 +440,11 @@ func Test_entryPodMetricsMap_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -444,12 +462,12 @@ func Test_entryPodMetricsMap_tryStore(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodMetricsMap_unexpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -498,8 +516,11 @@ func Test_entryPodMetricsMap_unexpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -517,12 +538,12 @@ func Test_entryPodMetricsMap_unexpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotWasExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodMetricsMap_storeLocked(t *testing.T) { + t.Parallel() type args struct { i *mpod.Pod } @@ -577,8 +598,11 @@ func Test_entryPodMetricsMap_storeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -601,6 +625,7 @@ func Test_entryPodMetricsMap_storeLocked(t *testing.T) { } func Test_podMetricsMap_LoadOrStore(t *testing.T) { + t.Parallel() type args struct { key string value mpod.Pod @@ -675,8 +700,11 @@ func Test_podMetricsMap_LoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -697,12 +725,12 @@ func Test_podMetricsMap_LoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodMetricsMap_tryLoadOrStore(t *testing.T) { + t.Parallel() type args struct { i mpod.Pod } @@ -769,8 +797,11 @@ func Test_entryPodMetricsMap_tryLoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -788,12 +819,12 @@ func Test_entryPodMetricsMap_tryLoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podMetricsMap_Delete(t *testing.T) { + t.Parallel() type args struct { key string } @@ -857,8 +888,11 @@ func Test_podMetricsMap_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -884,6 +918,7 @@ func Test_podMetricsMap_Delete(t *testing.T) { } func Test_entryPodMetricsMap_delete(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -932,8 +967,11 @@ func Test_entryPodMetricsMap_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -951,12 +989,12 @@ func Test_entryPodMetricsMap_delete(t *testing.T) { if err := test.checkFunc(test.want, gotHadValue); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podMetricsMap_Range(t *testing.T) { + t.Parallel() type args struct { f func(key string, value mpod.Pod) bool } @@ -1020,8 +1058,11 @@ func Test_podMetricsMap_Range(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1047,6 +1088,7 @@ func Test_podMetricsMap_Range(t *testing.T) { } func Test_podMetricsMap_missLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1100,8 +1142,11 @@ func Test_podMetricsMap_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1127,6 +1172,7 @@ func Test_podMetricsMap_missLocked(t *testing.T) { } func Test_podMetricsMap_dirtyLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1180,8 +1226,11 @@ func Test_podMetricsMap_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1207,6 +1256,7 @@ func Test_podMetricsMap_dirtyLocked(t *testing.T) { } func Test_entryPodMetricsMap_tryExpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -1255,8 +1305,11 @@ func Test_entryPodMetricsMap_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1274,7 +1327,6 @@ func Test_entryPodMetricsMap_tryExpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotIsExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/discoverer/k8s/service/podsmap_test.go b/pkg/discoverer/k8s/service/podsmap_test.go index deeb4e7554..55261144e0 100644 --- a/pkg/discoverer/k8s/service/podsmap_test.go +++ b/pkg/discoverer/k8s/service/podsmap_test.go @@ -25,9 +25,11 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/k8s/pod" + "go.uber.org/goleak" ) func Test_newEntryPodsMap(t *testing.T) { + t.Parallel() type args struct { i []pod.Pod } @@ -76,8 +78,11 @@ func Test_newEntryPodsMap(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -92,12 +97,12 @@ func Test_newEntryPodsMap(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podsMap_Load(t *testing.T) { + t.Parallel() type args struct { key string } @@ -169,8 +174,11 @@ func Test_podsMap_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -191,12 +199,12 @@ func Test_podsMap_Load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodsMap_load(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -249,8 +257,11 @@ func Test_entryPodsMap_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -268,12 +279,12 @@ func Test_entryPodsMap_load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podsMap_Store(t *testing.T) { + t.Parallel() type args struct { key string value []pod.Pod @@ -340,8 +351,11 @@ func Test_podsMap_Store(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,6 +381,7 @@ func Test_podsMap_Store(t *testing.T) { } func Test_entryPodsMap_tryStore(t *testing.T) { + t.Parallel() type args struct { i *[]pod.Pod } @@ -425,8 +440,11 @@ func Test_entryPodsMap_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -444,12 +462,12 @@ func Test_entryPodsMap_tryStore(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodsMap_unexpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -498,8 +516,11 @@ func Test_entryPodsMap_unexpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -517,12 +538,12 @@ func Test_entryPodsMap_unexpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotWasExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodsMap_storeLocked(t *testing.T) { + t.Parallel() type args struct { i *[]pod.Pod } @@ -577,8 +598,11 @@ func Test_entryPodsMap_storeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -601,6 +625,7 @@ func Test_entryPodsMap_storeLocked(t *testing.T) { } func Test_podsMap_LoadOrStore(t *testing.T) { + t.Parallel() type args struct { key string value []pod.Pod @@ -675,8 +700,11 @@ func Test_podsMap_LoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -697,12 +725,12 @@ func Test_podsMap_LoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodsMap_tryLoadOrStore(t *testing.T) { + t.Parallel() type args struct { i []pod.Pod } @@ -769,8 +797,11 @@ func Test_entryPodsMap_tryLoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -788,12 +819,12 @@ func Test_entryPodsMap_tryLoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podsMap_Delete(t *testing.T) { + t.Parallel() type args struct { key string } @@ -857,8 +888,11 @@ func Test_podsMap_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -884,6 +918,7 @@ func Test_podsMap_Delete(t *testing.T) { } func Test_entryPodsMap_delete(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -932,8 +967,11 @@ func Test_entryPodsMap_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -951,12 +989,12 @@ func Test_entryPodsMap_delete(t *testing.T) { if err := test.checkFunc(test.want, gotHadValue); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podsMap_Range(t *testing.T) { + t.Parallel() type args struct { f func(key string, value []pod.Pod) bool } @@ -1020,8 +1058,11 @@ func Test_podsMap_Range(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1047,6 +1088,7 @@ func Test_podsMap_Range(t *testing.T) { } func Test_podsMap_missLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1100,8 +1142,11 @@ func Test_podsMap_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1127,6 +1172,7 @@ func Test_podsMap_missLocked(t *testing.T) { } func Test_podsMap_dirtyLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1180,8 +1226,11 @@ func Test_podsMap_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1207,6 +1256,7 @@ func Test_podsMap_dirtyLocked(t *testing.T) { } func Test_entryPodsMap_tryExpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -1255,8 +1305,11 @@ func Test_entryPodsMap_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1274,7 +1327,6 @@ func Test_entryPodsMap_tryExpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotIsExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/discoverer/k8s/usecase/discovered.go b/pkg/discoverer/k8s/usecase/discovered.go index 3da3591904..88a9699393 100644 --- a/pkg/discoverer/k8s/usecase/discovered.go +++ b/pkg/discoverer/k8s/usecase/discovered.go @@ -19,7 +19,7 @@ package usecase import ( "context" - "github.com/vdaas/vald/apis/grpc/discoverer" + "github.com/vdaas/vald/apis/grpc/v1/discoverer" iconf "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/net/grpc" @@ -114,7 +114,6 @@ func New(cfg *config.Data) (r runner.Runner, err error) { }), // TODO add GraphQL handler ) - if err != nil { return nil, err } diff --git a/pkg/discoverer/k8s/usecase/discovered_test.go b/pkg/discoverer/k8s/usecase/discovered_test.go index 999866b2c1..783867069e 100644 --- a/pkg/discoverer/k8s/usecase/discovered_test.go +++ b/pkg/discoverer/k8s/usecase/discovered_test.go @@ -29,11 +29,11 @@ import ( "github.com/vdaas/vald/pkg/discoverer/k8s/config" handler "github.com/vdaas/vald/pkg/discoverer/k8s/handler/grpc" "github.com/vdaas/vald/pkg/discoverer/k8s/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -86,9 +86,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -103,12 +105,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -182,9 +184,11 @@ func Test_run_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -207,12 +211,12 @@ func Test_run_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -290,9 +294,11 @@ func Test_run_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -315,12 +321,12 @@ func Test_run_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -394,9 +400,11 @@ func Test_run_PreStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -419,12 +427,12 @@ func Test_run_PreStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Stop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -498,9 +506,11 @@ func Test_run_Stop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -523,12 +533,12 @@ func Test_run_Stop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -602,9 +612,11 @@ func Test_run_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -627,7 +639,6 @@ func Test_run_PostStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/gateway/backup/README.md b/pkg/gateway/backup/README.md new file mode 100755 index 0000000000..f95c917d21 --- /dev/null +++ b/pkg/gateway/backup/README.md @@ -0,0 +1 @@ +# vald backup gateway diff --git a/pkg/gateway/backup/config/config.go b/pkg/gateway/backup/config/config.go new file mode 100644 index 0000000000..f016423341 --- /dev/null +++ b/pkg/gateway/backup/config/config.go @@ -0,0 +1,158 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package setting stores all server application settings +package config + +import ( + "github.com/vdaas/vald/internal/config" +) + +type ( + GlobalConfig = config.GlobalConfig + Server = config.Server +) + +// Config represent a application setting data content (config.yaml). +// In K8s environment, this configuration is stored in K8s ConfigMap. +type Data struct { + config.GlobalConfig `json:",inline" yaml:",inline"` + + // Server represent all server configurations + Server *config.Servers `json:"server_config" yaml:"server_config"` + + // Observability represent observability configurations + Observability *config.Observability `json:"observability" yaml:"observability"` + + // Client represent gateway client configuration + Client *config.GRPCClient `json:"client" yaml:"client"` + + // BackupManager represent backup manager configuration + Backup *config.BackupManager `json:"backup" yaml:"backup"` +} + +func NewConfig(path string) (cfg *Data, err error) { + err = config.Read(path, &cfg) + + if err != nil { + return nil, err + } + + if cfg != nil { + cfg.Bind() + } + + if cfg.Server != nil { + cfg.Server = cfg.Server.Bind() + } + + if cfg.Observability != nil { + cfg.Observability = cfg.Observability.Bind() + } + + if cfg.Backup != nil { + cfg.Backup = cfg.Backup.Bind() + } + + if cfg.Client != nil { + cfg.Client = cfg.Client.Bind() + } + + return cfg, nil +} + +// func FakeData() { +// d := Data{ +// Version: "v0.0.1", +// Server: &config.Servers{ +// Servers: []*config.Server{ +// { +// Name: "agent-rest", +// Host: "127.0.0.1", +// Port: 8080, +// Mode: "REST", +// ProbeWaitTime: "3s", +// ShutdownDuration: "5s", +// HandlerTimeout: "5s", +// IdleTimeout: "2s", +// ReadHeaderTimeout: "1s", +// ReadTimeout: "1s", +// WriteTimeout: "1s", +// }, +// { +// Name: "agent-grpc", +// Host: "127.0.0.1", +// Port: 8082, +// Mode: "GRPC", +// }, +// }, +// MetricsServers: []*config.Server{ +// { +// Name: "pprof", +// Host: "127.0.0.1", +// Port: 6060, +// Mode: "REST", +// ProbeWaitTime: "3s", +// ShutdownDuration: "5s", +// HandlerTimeout: "5s", +// IdleTimeout: "2s", +// ReadHeaderTimeout: "1s", +// ReadTimeout: "1s", +// WriteTimeout: "1s", +// }, +// }, +// HealthCheckServers: []*config.Server{ +// { +// Name: "livenesss", +// Host: "127.0.0.1", +// Port: 3000, +// }, +// { +// Name: "readiness", +// Host: "127.0.0.1", +// Port: 3001, +// }, +// }, +// StartUpStrategy: []string{ +// "livenesss", +// "pprof", +// "agent-grpc", +// "agent-rest", +// "readiness", +// }, +// ShutdownStrategy: []string{ +// "readiness", +// "agent-rest", +// "agent-grpc", +// "pprof", +// "livenesss", +// }, +// FullShutdownDuration: "30s", +// TLS: &config.TLS{ +// Enabled: false, +// Cert: "/path/to/cert", +// Key: "/path/to/key", +// CA: "/path/to/ca", +// }, +// }, +// Gateway: &config.Gateway{ +// AgentPort: 8080, +// AgentName: "vald-agent", +// BackoffEnabled: false,, +// }, +// } +// fmt.Println(config.ToRawYaml(d)) +// } diff --git a/pkg/gateway/backup/config/config_test.go b/pkg/gateway/backup/config/config_test.go new file mode 100644 index 0000000000..e90809127a --- /dev/null +++ b/pkg/gateway/backup/config/config_test.go @@ -0,0 +1,103 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package setting stores all server application settings +package config + +import ( + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNewConfig(t *testing.T) { + t.Parallel() + type args struct { + path string + } + type want struct { + wantCfg *Data + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, *Data, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCfg *Data, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCfg, w.wantCfg) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCfg, w.wantCfg) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + path: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + path: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + gotCfg, err := NewConfig(test.args.path) + if err := test.checkFunc(test.want, gotCfg, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/apis/proto/manager/traffic/traffic_manager.proto b/pkg/gateway/backup/handler/doc.go similarity index 80% rename from apis/proto/manager/traffic/traffic_manager.proto rename to pkg/gateway/backup/handler/doc.go index 805b3c90f8..86b6d1869d 100644 --- a/apis/proto/manager/traffic/traffic_manager.proto +++ b/pkg/gateway/backup/handler/doc.go @@ -14,10 +14,4 @@ // limitations under the License. // -syntax = "proto3"; - -package traffic_manager; - -option go_package = "github.com/vdaas/vald/apis/grpc/manager/traffic"; - -import "google/api/annotations.proto"; +package handler diff --git a/pkg/gateway/backup/handler/grpc/handler.go b/pkg/gateway/backup/handler/grpc/handler.go new file mode 100644 index 0000000000..60e68fab57 --- /dev/null +++ b/pkg/gateway/backup/handler/grpc/handler.go @@ -0,0 +1,707 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package grpc provides grpc server logic +package grpc + +import ( + "context" + "fmt" + + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + client "github.com/vdaas/vald/internal/client/v1/client/vald" + "github.com/vdaas/vald/internal/core/algorithm" + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/info" + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/internal/net/grpc/status" + "github.com/vdaas/vald/internal/observability/trace" + "github.com/vdaas/vald/internal/safety" + "github.com/vdaas/vald/pkg/gateway/backup/service" + "github.com/vdaas/vald/pkg/gateway/internal/location" +) + +type server struct { + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int +} + +const apiName = "vald/gateway-backup" + +func New(opts ...Option) vald.Server { + s := new(server) + + for _, opt := range append(defaultOpts, opts...) { + opt(s) + } + return s +} + +func (s *server) Exists(ctx context.Context, meta *payload.Object_ID) (*payload.Object_ID, error) { + ctx, span := trace.StartSpan(ctx, apiName+".Exists") + defer func() { + if span != nil { + span.End() + } + }() + ips, err := s.backup.GetLocation(ctx, meta.GetId()) + if err != nil { + log.Debug("an error occurred during calling meta Exists:", err) + return s.gateway.Exists(ctx, meta, s.copts...) + } + if len(ips) > 0 { + return meta, nil + } + return nil, status.WrapWithNotFound(fmt.Sprintf("Exists API meta %s's uuid not found", meta.GetId()), err, meta.GetId(), info.Get()) +} + +func (s *server) Search(ctx context.Context, req *payload.Search_Request) (res *payload.Search_Response, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Search") + defer func() { + if span != nil { + span.End() + } + }() + return s.gateway.Search(ctx, req, s.copts...) +} + +func (s *server) SearchByID(ctx context.Context, req *payload.Search_IDRequest) ( + res *payload.Search_Response, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".SearchByID") + defer func() { + if span != nil { + span.End() + } + }() + vec, err := s.backup.GetObject(ctx, req.GetId()) + if err != nil { + return s.gateway.SearchByID(ctx, req, s.copts...) + } + return s.gateway.Search(ctx, &payload.Search_Request{ + Vector: vec.GetVector(), + Config: req.GetConfig(), + }, s.copts...) +} + +func (s *server) StreamSearch(stream vald.Search_StreamSearchServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamSearch") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Search_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Search(ctx, data.(*payload.Search_Request)) + }) +} + +func (s *server) StreamSearchByID(stream vald.Search_StreamSearchByIDServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamSearchByID") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Search_IDRequest) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.SearchByID(ctx, data.(*payload.Search_IDRequest)) + }) +} + +func (s *server) MultiSearch(ctx context.Context, reqs *payload.Search_MultiRequest) (res *payload.Search_Responses, errs error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiSearch") + defer func() { + if span != nil { + span.End() + } + }() + return s.gateway.MultiSearch(ctx, reqs, s.copts...) +} + +func (s *server) MultiSearchByID(ctx context.Context, reqs *payload.Search_MultiIDRequest) (res *payload.Search_Responses, errs error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiSearchByID") + defer func() { + if span != nil { + span.End() + } + }() + + return s.gateway.MultiSearchByID(ctx, reqs, s.copts...) +} + +func (s *server) Insert(ctx context.Context, req *payload.Insert_Request) (loc *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Insert") + defer func() { + if span != nil { + span.End() + } + }() + vec := req.GetVector() + uuid := vec.GetId() + if len(vec.GetVector()) < algorithm.MinimumVectorDimensionSize { + err = errors.ErrInvalidDimensionSize(len(vec.GetVector()), 0) + if span != nil { + span.SetStatus(trace.StatusCodeInvalidArgument(err.Error())) + } + return nil, status.WrapWithInvalidArgument("Insert API invalid vector argument", err, req, info.Get()) + } + if !req.GetConfig().GetSkipStrictExistCheck() { + locs, err := s.backup.GetLocation(ctx, uuid) + if err != nil { + log.Debug("an error occurred while calling meta Exists:", err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal( + fmt.Sprintf("Insert API ID %s couldn't check meta already exists or not", uuid), err, info.Get()) + } + if len(locs) > 0 { + err = errors.Wrap(err, errors.ErrMetaDataAlreadyExists(uuid).Error()) + if span != nil { + span.SetStatus(trace.StatusCodeAlreadyExists(err.Error())) + } + return nil, status.WrapWithAlreadyExists(fmt.Sprintf("Insert API ID %s already exists", vec.GetId()), err, info.Get()) + } + req.Config.SkipStrictExistCheck = true + } + + loc, err = s.gateway.Insert(ctx, req, s.copts...) + if err != nil { + err = errors.Wrapf(err, "Insert API failed to Insert uuid = %s\tinfo = %#v", uuid, info.Get()) + log.Debug(err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Insert API failed to Execute DoMulti error = %s", err.Error()), err, info.Get()) + } + vecs := &payload.Backup_Vector{ + Uuid: uuid, + Ips: loc.GetIps(), + } + if vec != nil { + vecs.Vector = vec.GetVector() + } + err = s.backup.Register(ctx, vecs) + if err != nil { + _, rerr := s.gateway.Remove(ctx, &payload.Remove_Request{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) + if rerr != nil { + err = errors.Wrap(err, rerr.Error()) + } + err = errors.Wrapf(err, "Insert API (backup.Register) failed to Backup Vectors = %#v\t info = %#v", vecs, info.Get()) + log.Debug(err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Insert API uuid %s couldn't store", uuid), err, info.Get()) + } + return loc, nil +} + +func (s *server) StreamInsert(stream vald.Insert_StreamInsertServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamInsert") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Insert_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Insert(ctx, data.(*payload.Insert_Request)) + }) +} + +func (s *server) MultiInsert(ctx context.Context, reqs *payload.Insert_MultiRequest) (res *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiInsert") + defer func() { + if span != nil { + span.End() + } + }() + for i, req := range reqs.GetRequests() { + if !req.GetConfig().GetSkipStrictExistCheck() { + id := req.GetVector().GetId() + loc, err := s.backup.GetLocation(ctx, id) + if err != nil { + log.Debug("an error occurred during calling meta Exists:", err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal( + fmt.Sprintf("MultiInsert API couldn't check metadata exists or not metas = %v", id), err, info.Get()) + } + if len(loc) > 0 { + if span != nil { + span.SetStatus(trace.StatusCodeAlreadyExists(err.Error())) + } + return nil, status.WrapWithAlreadyExists( + fmt.Sprintf("MultiInsert API failed metadata already exists meta = %s", id), err, info.Get()) + } + reqs.Requests[i].Config.SkipStrictExistCheck = true + } + } + + res, err = s.gateway.MultiInsert(ctx, reqs, s.copts...) + if err != nil { + err = errors.Wrapf(err, "MultiInsert API failed to Insert info = %#v", info.Get()) + log.Debug(err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiInsert API failed to Insert error = %s", err.Error()), err, info.Get()) + } + + mvecs := &payload.Backup_Vectors{ + Vectors: make([]*payload.Backup_Vector, 0, len(reqs.GetRequests())), + } + for i, req := range reqs.GetRequests() { + vec := req.GetVector() + uuid := vec.GetId() + mvecs.Vectors = append(mvecs.Vectors, &payload.Backup_Vector{ + Uuid: uuid, + Vector: vec.GetVector(), + Ips: res.Locations[i].GetIps(), + }) + } + err = s.backup.RegisterMultiple(ctx, mvecs) + if err != nil { + removeList := make([]*payload.Remove_Request, 0, len(reqs.GetRequests())) + for _, req := range reqs.GetRequests() { + removeList = append(removeList, &payload.Remove_Request{ + Id: &payload.Object_ID{ + Id: req.GetVector().GetId(), + }, + }) + } + _, rerr := s.gateway.MultiRemove(ctx, &payload.Remove_MultiRequest{ + Requests: removeList, + }, s.copts...) + if rerr != nil { + err = errors.Wrap(err, rerr.Error()) + } + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiInsert API failed RegisterMultiple %#v", mvecs), err, info.Get()) + } + return res, nil +} + +func (s *server) Update(ctx context.Context, req *payload.Update_Request) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Update") + defer func() { + if span != nil { + span.End() + } + }() + id := req.GetVector().GetId() + res, err = s.Remove(ctx, &payload.Remove_Request{ + Id: &payload.Object_ID{ + Id: id, + }, + Config: &payload.Remove_Config{ + SkipStrictExistCheck: true, + }, + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Update API failed to remove exsisting data for update %#v", req), err, info.Get()) + } + res, err = s.Insert(ctx, &payload.Insert_Request{ + Vector: &payload.Object_Vector{ + Id: id, + Vector: req.GetVector().GetVector(), + }, + Config: &payload.Insert_Config{ + SkipStrictExistCheck: true, + Filters: req.GetConfig().GetFilters(), + }, + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Update API failed to insert data for update %#v", req), err, info.Get()) + } + return res, nil +} + +func (s *server) StreamUpdate(stream vald.Update_StreamUpdateServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamUpdate") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Update_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Update(ctx, data.(*payload.Update_Request)) + }) +} + +func (s *server) MultiUpdate(ctx context.Context, reqs *payload.Update_MultiRequest) (res *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiUpdate") + defer func() { + if span != nil { + span.End() + } + }() + rreqs := make([]*payload.Remove_Request, 0, len(reqs.GetRequests())) + ireqs := make([]*payload.Insert_Request, 0, len(reqs.GetRequests())) + for _, req := range reqs.GetRequests() { + rreqs = append(rreqs, &payload.Remove_Request{ + Id: &payload.Object_ID{ + Id: req.GetVector().GetId(), + }, + Config: &payload.Remove_Config{ + SkipStrictExistCheck: true, + }, + }) + ireqs = append(ireqs, &payload.Insert_Request{ + Vector: req.GetVector(), + Config: &payload.Insert_Config{ + SkipStrictExistCheck: true, + Filters: req.GetConfig().GetFilters(), + }, + }) + } + _, err = s.MultiRemove(ctx, &payload.Remove_MultiRequest{ + Requests: rreqs, + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiUpdate API failed Remove request %#v", rreqs), err, info.Get()) + } + res, err = s.MultiInsert(ctx, &payload.Insert_MultiRequest{ + Requests: ireqs, + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiUpdate API failed Insert request %#v", ireqs), err, info.Get()) + } + return res, nil +} + +func (s *server) Upsert(ctx context.Context, req *payload.Upsert_Request) (loc *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Upsert") + defer func() { + if span != nil { + span.End() + } + }() + + vec := req.GetVector() + id := vec.GetId() + filters := req.GetConfig().GetFilters() + ips, err := s.backup.GetLocation(ctx, req.GetVector().GetId()) + if err != nil { + log.Debug("an error occurred during calling meta Exists:", err) + } + if len(ips) <= 0 { + loc, err = s.Insert(ctx, &payload.Insert_Request{ + Vector: vec, + Config: &payload.Insert_Config{ + SkipStrictExistCheck: true, + Filters: filters, + }, + }) + } else { + loc, err = s.Update(ctx, &payload.Update_Request{ + Vector: vec, + Config: &payload.Update_Config{ + SkipStrictExistCheck: true, + Filters: filters, + }, + }) + } + if err != nil { + log.Debugf("Upsert API failed to process request uuid:\t%s\terror:\t%s", id, err.Error()) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Upsert API failed to Upsert request %#v", req), err, info.Get()) + } + return loc, nil +} + +func (s *server) StreamUpsert(stream vald.Upsert_StreamUpsertServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamUpsert") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Upsert_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Upsert(ctx, data.(*payload.Upsert_Request)) + }) +} + +func (s *server) MultiUpsert(ctx context.Context, reqs *payload.Upsert_MultiRequest) (locs *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiUpsert") + defer func() { + if span != nil { + span.End() + } + }() + + insertReqs := make([]*payload.Insert_Request, 0, len(reqs.GetRequests())) + updateReqs := make([]*payload.Update_Request, 0, len(reqs.GetRequests())) + + ids := make([]string, 0, len(reqs.GetRequests())) + for _, req := range reqs.GetRequests() { + vec := req.GetVector() + uuid := vec.GetId() + ids = append(ids, uuid) + _, err = s.Exists(ctx, &payload.Object_ID{ + Id: uuid, + }) + filters := req.GetConfig().GetFilters() + if err != nil { + insertReqs = append(insertReqs, &payload.Insert_Request{ + Vector: vec, + Config: &payload.Insert_Config{ + SkipStrictExistCheck: true, + Filters: filters, + }, + }) + } else { + updateReqs = append(updateReqs, &payload.Update_Request{ + Vector: vec, + Config: &payload.Update_Config{ + SkipStrictExistCheck: true, + Filters: filters, + }, + }) + } + } + + insertLocs := make([]*payload.Object_Location, 0, len(insertReqs)) + updateLocs := make([]*payload.Object_Location, 0, len(updateReqs)) + + eg, ectx := errgroup.New(ctx) + if len(updateReqs) <= 0 { + eg.Go(safety.RecoverFunc(func() error { + ectx, span := trace.StartSpan(ectx, apiName+".MultiUpsert/Go-MultiUpdate") + defer func() { + if span != nil { + span.End() + } + }() + var err error + loc, err := s.MultiUpdate(ectx, &payload.Update_MultiRequest{ + Requests: updateReqs, + }) + if err == nil { + updateLocs = loc.GetLocations() + } + return err + })) + } + if len(insertReqs) <= 0 { + eg.Go(safety.RecoverFunc(func() error { + ectx, span := trace.StartSpan(ectx, apiName+".MultiUpsert/Go-MultiInsert") + defer func() { + if span != nil { + span.End() + } + }() + var err error + loc, err := s.MultiInsert(ectx, &payload.Insert_MultiRequest{ + Requests: insertReqs, + }) + if err == nil { + insertLocs = loc.GetLocations() + } + return err + })) + } + err = eg.Wait() + if err != nil { + log.Debugf("MultiUpsert API failed to process request uuids:\t%s\terror:\t%s", ids, err.Error()) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiUpsert API failed to process request %v", ids), err, info.Get()) + } + + return location.ReStructure(ids, &payload.Object_Locations{ + Locations: append(insertLocs, updateLocs...), + }), nil +} + +func (s *server) Remove(ctx context.Context, req *payload.Remove_Request) (loc *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Remove") + defer func() { + if span != nil { + span.End() + } + }() + id := req.GetId() + if !req.GetConfig().GetSkipStrictExistCheck() { + ips, err := s.backup.GetLocation(ctx, id.GetId()) + if err != nil { + log.Debug("an error occurred while calling meta Exists:", err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal( + fmt.Sprintf("Remove API ID %s couldn't check meta already exists or not", id.GetId()), err, info.Get()) + } + if len(ips) <= 0 { + err = errors.Wrap(err, errors.ErrMetaDataAlreadyExists(id.GetId()).Error()) + if span != nil { + span.SetStatus(trace.StatusCodeAlreadyExists(err.Error())) + } + return nil, status.WrapWithAlreadyExists(fmt.Sprintf("Remove API ID %s not found", id.GetId()), err, info.Get()) + } + req.Config.SkipStrictExistCheck = true + } + + loc, err = s.gateway.Remove(ctx, req, s.copts...) + if err != nil { + log.Debugf("Remove API failed to process request uuid:\t%s\terror:\t%s", id.GetId(), err.Error()) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Remove API failed to Remove backup uuid = %s", id.GetId()), err, info.Get()) + } + err = s.backup.Remove(ctx, id.GetId()) + if err != nil { + log.Debugf("Remove API failed to remove backup data\tid:\t%s\terror:\t%s", id.GetId(), err.Error()) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Remove API failed to Remove backup uuid = %s", id.GetId()), err, info.Get()) + } + return loc, nil +} + +func (s *server) StreamRemove(stream vald.Remove_StreamRemoveServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamRemove") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Remove_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Remove(ctx, data.(*payload.Remove_Request)) + }) +} + +func (s *server) MultiRemove(ctx context.Context, reqs *payload.Remove_MultiRequest) (locs *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiRemove") + defer func() { + if span != nil { + span.End() + } + }() + ids := make([]string, 0, len(reqs.GetRequests())) + for _, req := range reqs.GetRequests() { + id := req.GetId().GetId() + ids = append(ids, id) + if !req.GetConfig().GetSkipStrictExistCheck() { + ips, err := s.backup.GetLocation(ctx, id) + if err != nil { + log.Debug("an error occurred while calling meta Exists:", err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal( + fmt.Sprintf("MultiRemove API ID %s couldn't check meta already exists or not", id), err, info.Get()) + } + if len(ips) <= 0 { + err = errors.Wrap(err, errors.ErrMetaDataAlreadyExists(id).Error()) + if span != nil { + span.SetStatus(trace.StatusCodeAlreadyExists(err.Error())) + } + return nil, status.WrapWithAlreadyExists(fmt.Sprintf("MultiRemove API ID %s not found", id), err, info.Get()) + } + } + } + locs, err = s.gateway.MultiRemove(ctx, reqs, s.copts...) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiRemove API failed to Remove backup uuids = %v", ids), err, info.Get()) + } + err = s.backup.RemoveMultiple(ctx, ids...) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiRemove API failed to Remove backup uuids %v ", ids), err, info.Get()) + } + return locs, nil +} + +func (s *server) GetObject(ctx context.Context, id *payload.Object_ID) (vec *payload.Object_Vector, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".GetObject") + defer func() { + if span != nil { + span.End() + } + }() + mvec, err := s.backup.GetObject(ctx, id.GetId()) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil, status.WrapWithNotFound(fmt.Sprintf("GetObject API uuid %s Object not found", id.GetId()), err, info.Get()) + } + return &payload.Object_Vector{ + Id: mvec.GetUuid(), + Vector: mvec.GetVector(), + }, nil +} + +func (s *server) StreamGetObject(stream vald.Object_StreamGetObjectServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamGetObject") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Object_ID) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.GetObject(ctx, data.(*payload.Object_ID)) + }) +} diff --git a/hack/benchmark/internal/client/ngtd/rest/client_test.go b/pkg/gateway/backup/handler/grpc/handler_test.go similarity index 54% rename from hack/benchmark/internal/client/ngtd/rest/client_test.go rename to pkg/gateway/backup/handler/grpc/handler_test.go index 7b7773a98b..2a6b84de6d 100644 --- a/hack/benchmark/internal/client/ngtd/rest/client_test.go +++ b/pkg/gateway/backup/handler/grpc/handler_test.go @@ -14,42 +14,41 @@ // limitations under the License. // -// Package rest provides rest client functions -package rest +// Package grpc provides grpc server logic +package grpc import ( "context" "reflect" "testing" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + client "github.com/vdaas/vald/internal/client/v1/client/vald" + "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" - "github.com/yahoojapan/ngtd/model" - + "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/pkg/gateway/backup/service" "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context opts []Option } type want struct { - want Client - err error + want vald.Server } type test struct { name string args args want want - checkFunc func(want, Client, error) error + checkFunc func(want, vald.Server) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got Client, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) - } + defaultCheckFunc := func(w want, got vald.Server) error { if !reflect.DeepEqual(got, w.want) { return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } @@ -61,7 +60,6 @@ func TestNew(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, opts: nil, }, want: want{}, @@ -75,7 +73,6 @@ func TestNew(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, opts: nil, }, want: want{}, @@ -85,9 +82,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -98,25 +97,29 @@ func TestNew(t *testing.T) { test.checkFunc = defaultCheckFunc } - got, err := New(test.args.ctx, test.args.opts...) - if err := test.checkFunc(test.want, got, err); err != nil { + got := New(test.args.opts...) + if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_Exists(t *testing.T) { +func Test_server_Exists(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectID + ctx context.Context + meta *payload.Object_ID } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - want *client.ObjectID + want *payload.Object_ID err error } type test struct { @@ -124,11 +127,11 @@ func Test_ngtdClient_Exists(t *testing.T) { args args fields fields want want - checkFunc func(want, *client.ObjectID, error) error + checkFunc func(want, *payload.Object_ID, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.ObjectID, err error) error { + defaultCheckFunc := func(w want, got *payload.Object_ID, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -144,10 +147,14 @@ func Test_ngtdClient_Exists(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + meta: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -161,10 +168,14 @@ func Test_ngtdClient_Exists(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + meta: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -173,9 +184,11 @@ func Test_ngtdClient_Exists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -185,46 +198,54 @@ func Test_ngtdClient_Exists(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - got, err := c.Exists(test.args.ctx, test.args.req) + got, err := s.Exists(test.args.ctx, test.args.meta) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_Search(t *testing.T) { +func Test_server_Search(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.SearchRequest + req *payload.Search_Request } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - want *client.SearchResponse - err error + wantRes *payload.Search_Response + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.SearchResponse, error) error + checkFunc func(want, *payload.Search_Response, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.SearchResponse, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) } return nil } @@ -238,7 +259,11 @@ func Test_ngtdClient_Search(t *testing.T) { req: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -255,7 +280,11 @@ func Test_ngtdClient_Search(t *testing.T) { req: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -264,9 +293,11 @@ func Test_ngtdClient_Search(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -276,46 +307,54 @@ func Test_ngtdClient_Search(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - got, err := c.Search(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotRes, err := s.Search(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_SearchByID(t *testing.T) { +func Test_server_SearchByID(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.SearchIDRequest + req *payload.Search_IDRequest } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - want *client.SearchResponse - err error + wantRes *payload.Search_Response + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.SearchResponse, error) error + checkFunc func(want, *payload.Search_Response, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.SearchResponse, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) } return nil } @@ -329,7 +368,11 @@ func Test_ngtdClient_SearchByID(t *testing.T) { req: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -346,7 +389,11 @@ func Test_ngtdClient_SearchByID(t *testing.T) { req: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -355,9 +402,11 @@ func Test_ngtdClient_SearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,27 +416,33 @@ func Test_ngtdClient_SearchByID(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - got, err := c.SearchByID(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotRes, err := s.SearchByID(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_StreamSearch(t *testing.T) { +func Test_server_StreamSearch(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.SearchRequest - f func(*client.SearchResponse, error) + stream vald.Search_StreamSearchServer } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { err error @@ -413,12 +468,14 @@ func Test_ngtdClient_StreamSearch(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -431,12 +488,14 @@ func Test_ngtdClient_StreamSearch(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -445,9 +504,11 @@ func Test_ngtdClient_StreamSearch(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -457,27 +518,33 @@ func Test_ngtdClient_StreamSearch(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamSearch(test.args.ctx, test.args.dataProvider, test.args.f) + err := s.StreamSearch(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_StreamSearchByID(t *testing.T) { +func Test_server_StreamSearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.SearchIDRequest - f func(*client.SearchResponse, error) + stream vald.Search_StreamSearchByIDServer } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { err error @@ -503,12 +570,14 @@ func Test_ngtdClient_StreamSearchByID(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -521,12 +590,14 @@ func Test_ngtdClient_StreamSearchByID(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -535,9 +606,11 @@ func Test_ngtdClient_StreamSearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -547,43 +620,55 @@ func Test_ngtdClient_StreamSearchByID(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamSearchByID(test.args.ctx, test.args.dataProvider, test.args.f) + err := s.StreamSearchByID(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_Insert(t *testing.T) { +func Test_server_MultiSearch(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVector + ctx context.Context + reqs *payload.Search_MultiRequest } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantRes *payload.Search_Responses + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Search_Responses, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Responses, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -593,10 +678,14 @@ func Test_ngtdClient_Insert(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -610,10 +699,14 @@ func Test_ngtdClient_Insert(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -622,9 +715,11 @@ func Test_ngtdClient_Insert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -634,44 +729,55 @@ func Test_ngtdClient_Insert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.Insert(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.MultiSearch(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_StreamInsert(t *testing.T) { +func Test_server_MultiSearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectVector - f func(error) + ctx context.Context + reqs *payload.Search_MultiIDRequest } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantRes *payload.Search_Responses + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Search_Responses, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Responses, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -681,11 +787,14 @@ func Test_ngtdClient_StreamInsert(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, - f: nil, + reqs: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -699,11 +808,14 @@ func Test_ngtdClient_StreamInsert(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, - f: nil, + reqs: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -712,9 +824,11 @@ func Test_ngtdClient_StreamInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -724,43 +838,55 @@ func Test_ngtdClient_StreamInsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamInsert(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.MultiSearchByID(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_MultiInsert(t *testing.T) { +func Test_server_Insert(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.ObjectVectors + req *payload.Insert_Request } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantLoc *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotLoc *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotLoc, w.wantLoc) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoc, w.wantLoc) + } return nil } tests := []test{ @@ -773,7 +899,11 @@ func Test_ngtdClient_MultiInsert(t *testing.T) { req: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -790,7 +920,11 @@ func Test_ngtdClient_MultiInsert(t *testing.T) { req: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -799,9 +933,11 @@ func Test_ngtdClient_MultiInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -811,26 +947,33 @@ func Test_ngtdClient_MultiInsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.MultiInsert(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotLoc, err := s.Insert(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotLoc, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_Update(t *testing.T) { +func Test_server_StreamInsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVector + stream vald.Insert_StreamInsertServer } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { err error @@ -856,11 +999,14 @@ func Test_ngtdClient_Update(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + stream: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -873,11 +1019,14 @@ func Test_ngtdClient_Update(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + stream: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -886,9 +1035,11 @@ func Test_ngtdClient_Update(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -898,44 +1049,55 @@ func Test_ngtdClient_Update(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.Update(test.args.ctx, test.args.req) + err := s.StreamInsert(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_StreamUpdate(t *testing.T) { +func Test_server_MultiInsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectVector - f func(error) + ctx context.Context + reqs *payload.Insert_MultiRequest } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantRes *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -945,11 +1107,14 @@ func Test_ngtdClient_StreamUpdate(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, - f: nil, + reqs: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -963,11 +1128,14 @@ func Test_ngtdClient_StreamUpdate(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, - f: nil, + reqs: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -976,9 +1144,11 @@ func Test_ngtdClient_StreamUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -988,43 +1158,55 @@ func Test_ngtdClient_StreamUpdate(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamUpdate(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.MultiInsert(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_MultiUpdate(t *testing.T) { +func Test_server_Update(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.ObjectVectors + req *payload.Update_Request } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantRes *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1037,7 +1219,11 @@ func Test_ngtdClient_MultiUpdate(t *testing.T) { req: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1054,7 +1240,11 @@ func Test_ngtdClient_MultiUpdate(t *testing.T) { req: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1063,9 +1253,11 @@ func Test_ngtdClient_MultiUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1075,26 +1267,33 @@ func Test_ngtdClient_MultiUpdate(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.MultiUpdate(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.Update(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_Remove(t *testing.T) { +func Test_server_StreamUpdate(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectID + stream vald.Update_StreamUpdateServer } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { err error @@ -1120,11 +1319,14 @@ func Test_ngtdClient_Remove(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + stream: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1137,11 +1339,14 @@ func Test_ngtdClient_Remove(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + stream: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1150,9 +1355,11 @@ func Test_ngtdClient_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1162,44 +1369,55 @@ func Test_ngtdClient_Remove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.Remove(test.args.ctx, test.args.req) + err := s.StreamUpdate(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_StreamRemove(t *testing.T) { +func Test_server_MultiUpdate(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectID - f func(error) + ctx context.Context + reqs *payload.Update_MultiRequest } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantRes *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1209,11 +1427,14 @@ func Test_ngtdClient_StreamRemove(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, - f: nil, + reqs: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1227,11 +1448,14 @@ func Test_ngtdClient_StreamRemove(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, - f: nil, + reqs: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1240,9 +1464,11 @@ func Test_ngtdClient_StreamRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1252,43 +1478,55 @@ func Test_ngtdClient_StreamRemove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamRemove(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.MultiUpdate(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_MultiRemove(t *testing.T) { +func Test_server_Upsert(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.ObjectIDs + req *payload.Upsert_Request } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantLoc *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotLoc *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotLoc, w.wantLoc) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoc, w.wantLoc) + } return nil } tests := []test{ @@ -1301,7 +1539,11 @@ func Test_ngtdClient_MultiRemove(t *testing.T) { req: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1318,7 +1560,11 @@ func Test_ngtdClient_MultiRemove(t *testing.T) { req: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1327,9 +1573,11 @@ func Test_ngtdClient_MultiRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1339,47 +1587,50 @@ func Test_ngtdClient_MultiRemove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.MultiRemove(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotLoc, err := s.Upsert(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotLoc, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_GetObject(t *testing.T) { +func Test_server_StreamUpsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectID + stream vald.Upsert_StreamUpsertServer } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - want *client.ObjectVector - err error + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.ObjectVector, error) error + checkFunc func(want, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.ObjectVector, err error) error { + defaultCheckFunc := func(w want, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } return nil } tests := []test{ @@ -1388,11 +1639,14 @@ func Test_ngtdClient_GetObject(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + stream: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1405,11 +1659,14 @@ func Test_ngtdClient_GetObject(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + stream: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1418,9 +1675,11 @@ func Test_ngtdClient_GetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1430,44 +1689,55 @@ func Test_ngtdClient_GetObject(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - got, err := c.GetObject(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + err := s.StreamUpsert(test.args.stream) + if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_StreamGetObject(t *testing.T) { +func Test_server_MultiUpsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectID - f func(*client.ObjectVector, error) + ctx context.Context + reqs *payload.Upsert_MultiRequest } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantLocs *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotLocs *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotLocs, w.wantLocs) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLocs, w.wantLocs) + } return nil } tests := []test{ @@ -1477,11 +1747,14 @@ func Test_ngtdClient_StreamGetObject(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, - f: nil, + reqs: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1495,11 +1768,14 @@ func Test_ngtdClient_StreamGetObject(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, - f: nil, + reqs: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1508,9 +1784,11 @@ func Test_ngtdClient_StreamGetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1520,43 +1798,55 @@ func Test_ngtdClient_StreamGetObject(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamGetObject(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotLocs, err := s.MultiUpsert(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotLocs, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_CreateIndex(t *testing.T) { +func Test_server_Remove(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.ControlCreateIndexRequest + req *payload.Remove_Request } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantLoc *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotLoc *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotLoc, w.wantLoc) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoc, w.wantLoc) + } return nil } tests := []test{ @@ -1569,7 +1859,11 @@ func Test_ngtdClient_CreateIndex(t *testing.T) { req: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1586,7 +1880,11 @@ func Test_ngtdClient_CreateIndex(t *testing.T) { req: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1595,9 +1893,11 @@ func Test_ngtdClient_CreateIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1607,25 +1907,33 @@ func Test_ngtdClient_CreateIndex(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.CreateIndex(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotLoc, err := s.Remove(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotLoc, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_SaveIndex(t *testing.T) { +func Test_server_StreamRemove(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context + stream vald.Remove_StreamRemoveServer } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { err error @@ -1651,10 +1959,14 @@ func Test_ngtdClient_SaveIndex(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, + stream: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1667,10 +1979,14 @@ func Test_ngtdClient_SaveIndex(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, + stream: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1679,9 +1995,11 @@ func Test_ngtdClient_SaveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1691,43 +2009,55 @@ func Test_ngtdClient_SaveIndex(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.SaveIndex(test.args.ctx) + err := s.StreamRemove(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_CreateAndSaveIndex(t *testing.T) { +func Test_server_MultiRemove(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ControlCreateIndexRequest + ctx context.Context + reqs *payload.Remove_MultiRequest } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantLocs *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotLocs *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotLocs, w.wantLocs) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLocs, w.wantLocs) + } return nil } tests := []test{ @@ -1737,10 +2067,14 @@ func Test_ngtdClient_CreateAndSaveIndex(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1754,10 +2088,14 @@ func Test_ngtdClient_CreateAndSaveIndex(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1766,9 +2104,11 @@ func Test_ngtdClient_CreateAndSaveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1778,45 +2118,54 @@ func Test_ngtdClient_CreateAndSaveIndex(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.CreateAndSaveIndex(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotLocs, err := s.MultiRemove(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotLocs, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_IndexInfo(t *testing.T) { +func Test_server_GetObject(t *testing.T) { + t.Parallel() type args struct { ctx context.Context + id *payload.Object_ID } type fields struct { - addr string + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - want *client.InfoIndex - err error + wantVec *payload.Object_Vector + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.InfoIndex, error) error + checkFunc func(want, *payload.Object_Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.InfoIndex, err error) error { + defaultCheckFunc := func(w want, gotVec *payload.Object_Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotVec, w.wantVec) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) } return nil } @@ -1827,9 +2176,14 @@ func Test_ngtdClient_IndexInfo(t *testing.T) { name: "test_case_1", args: args { ctx: nil, + id: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1843,9 +2197,14 @@ func Test_ngtdClient_IndexInfo(t *testing.T) { name: "test_case_2", args: args { ctx: nil, + id: nil, }, fields: fields { - addr: "", + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1854,9 +2213,11 @@ func Test_ngtdClient_IndexInfo(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1866,108 +2227,49 @@ func Test_ngtdClient_IndexInfo(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - got, err := c.IndexInfo(test.args.ctx) - if err := test.checkFunc(test.want, got, err); err != nil { + gotVec, err := s.GetObject(test.args.ctx, test.args.id) + if err := test.checkFunc(test.want, gotVec, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_searchRequestToNgtdSearchRequest(t *testing.T) { +func Test_server_StreamGetObject(t *testing.T) { + t.Parallel() type args struct { - in *client.SearchRequest - } - type want struct { - want *model.SearchRequest - } - type test struct { - name string - args args - want want - checkFunc func(want, *model.SearchRequest) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got *model.SearchRequest) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := searchRequestToNgtdSearchRequest(test.args.in) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) + stream vald.Object_StreamGetObjectServer } -} - -func Test_searchIDRequestToNgtdSearchRequest(t *testing.T) { - type args struct { - in *client.SearchIDRequest + type fields struct { + eg errgroup.Group + backup service.Backup + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - want *model.SearchRequest + err error } type test struct { name string args args + fields fields want want - checkFunc func(want, *model.SearchRequest) error + checkFunc func(want, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *model.SearchRequest) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } return nil } @@ -1977,7 +2279,14 @@ func Test_searchIDRequestToNgtdSearchRequest(t *testing.T) { { name: "test_case_1", args: args { - in: nil, + stream: nil, + }, + fields: fields { + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1990,7 +2299,14 @@ func Test_searchIDRequestToNgtdSearchRequest(t *testing.T) { return test { name: "test_case_2", args: args { - in: nil, + stream: nil, + }, + fields: fields { + eg: nil, + backup: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1999,9 +2315,11 @@ func Test_searchIDRequestToNgtdSearchRequest(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2011,442 +2329,18 @@ func Test_searchIDRequestToNgtdSearchRequest(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } + s := &server{ + eg: test.fields.eg, + backup: test.fields.backup, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, + } - got := searchIDRequestToNgtdSearchRequest(test.args.in) - if err := test.checkFunc(test.want, got); err != nil { + err := s.StreamGetObject(test.args.stream) + if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - - }) - } -} - -func Test_objectVectorToNgtdInsertRequest(t *testing.T) { - type args struct { - in *client.ObjectVector - } - type want struct { - want *model.InsertRequest - } - type test struct { - name string - args args - want want - checkFunc func(want, *model.InsertRequest) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got *model.InsertRequest) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := objectVectorToNgtdInsertRequest(test.args.in) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_objectVectorsToNgtdMultiInsertRequest(t *testing.T) { - type args struct { - in *client.ObjectVectors - } - type want struct { - want *model.MultiInsertRequest - } - type test struct { - name string - args args - want want - checkFunc func(want, *model.MultiInsertRequest) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got *model.MultiInsertRequest) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := objectVectorsToNgtdMultiInsertRequest(test.args.in) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_objectIDsToNgtdMultiRemoveRequest(t *testing.T) { - type args struct { - in *client.ObjectIDs - } - type want struct { - want *model.MultiRemoveRequest - } - type test struct { - name string - args args - want want - checkFunc func(want, *model.MultiRemoveRequest) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got *model.MultiRemoveRequest) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := objectIDsToNgtdMultiRemoveRequest(test.args.in) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_ngtdSearchResponseToSearchResponse(t *testing.T) { - type args struct { - in *model.SearchResponse - } - type want struct { - want *client.SearchResponse - } - type test struct { - name string - args args - want want - checkFunc func(want, *client.SearchResponse) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got *client.SearchResponse) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := ngtdSearchResponseToSearchResponse(test.args.in) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_getSizeAndEpsilon(t *testing.T) { - type args struct { - cfg *client.SearchConfig - } - type want struct { - wantSize int - wantEpsilon float32 - } - type test struct { - name string - args args - want want - checkFunc func(want, int, float32) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, gotSize int, gotEpsilon float32) error { - if !reflect.DeepEqual(gotSize, w.wantSize) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotSize, w.wantSize) - } - if !reflect.DeepEqual(gotEpsilon, w.wantEpsilon) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotEpsilon, w.wantEpsilon) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - cfg: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - cfg: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - gotSize, gotEpsilon := getSizeAndEpsilon(test.args.cfg) - if err := test.checkFunc(test.want, gotSize, gotEpsilon); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_tofloat64(t *testing.T) { - type args struct { - in []float32 - } - type want struct { - wantOut []float64 - } - type test struct { - name string - args args - want want - checkFunc func(want, []float64) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, gotOut []float64) error { - if !reflect.DeepEqual(gotOut, w.wantOut) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOut, w.wantOut) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - gotOut := tofloat64(test.args.in) - if err := test.checkFunc(test.want, gotOut); err != nil { - tt.Errorf("error = %v", err) - } - }) } } diff --git a/pkg/gateway/backup/handler/grpc/option.go b/pkg/gateway/backup/handler/grpc/option.go new file mode 100644 index 0000000000..7a3edb54b6 --- /dev/null +++ b/pkg/gateway/backup/handler/grpc/option.go @@ -0,0 +1,63 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package grpc provides grpc server logic +package grpc + +import ( + "github.com/vdaas/vald/internal/client/v1/client/vald" + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/pkg/gateway/backup/service" +) + +type Option func(*server) + +var defaultOpts = []Option{ + WithErrGroup(errgroup.Get()), + WithStreamConcurrency(20), +} + +func WithBackup(b service.Backup) Option { + return func(s *server) { + if b != nil { + s.backup = b + } + } +} + +func WithValdClient(g vald.Client) Option { + return func(s *server) { + if g != nil { + s.gateway = g + } + } +} + +func WithErrGroup(eg errgroup.Group) Option { + return func(s *server) { + if eg != nil { + s.eg = eg + } + } +} + +func WithStreamConcurrency(c int) Option { + return func(s *server) { + if c != 0 { + s.streamConcurrency = c + } + } +} diff --git a/pkg/gateway/backup/handler/grpc/option_test.go b/pkg/gateway/backup/handler/grpc/option_test.go new file mode 100644 index 0000000000..5c690cf2e3 --- /dev/null +++ b/pkg/gateway/backup/handler/grpc/option_test.go @@ -0,0 +1,495 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package grpc provides grpc server logic +package grpc + +import ( + "testing" + + "github.com/vdaas/vald/internal/client/v1/client/vald" + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/pkg/gateway/backup/service" + "go.uber.org/goleak" +) + +func TestWithBackup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + b service.Backup + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + b: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + b: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithBackup(test.args.b) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithBackup(test.args.b) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithValdClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + g vald.Client + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + g: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + g: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithValdClient(test.args.g) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithValdClient(test.args.g) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + eg errgroup.Group + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + eg: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + eg: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithErrGroup(test.args.eg) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithErrGroup(test.args.eg) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithStreamConcurrency(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + c int + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + c: 0, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + c: 0, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithStreamConcurrency(test.args.c) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithStreamConcurrency(test.args.c) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/gateway/backup/handler/rest/handler.go b/pkg/gateway/backup/handler/rest/handler.go new file mode 100644 index 0000000000..59032d8e1f --- /dev/null +++ b/pkg/gateway/backup/handler/rest/handler.go @@ -0,0 +1,163 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package rest provides rest api logic +package rest + +import ( + "net/http" + + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/net/http/dump" + "github.com/vdaas/vald/internal/net/http/json" +) + +type Handler interface { + Index(w http.ResponseWriter, r *http.Request) (int, error) + Exists(w http.ResponseWriter, r *http.Request) (int, error) + Search(w http.ResponseWriter, r *http.Request) (int, error) + SearchByID(w http.ResponseWriter, r *http.Request) (int, error) + MultiSearch(w http.ResponseWriter, r *http.Request) (int, error) + MultiSearchByID(w http.ResponseWriter, r *http.Request) (int, error) + Insert(w http.ResponseWriter, r *http.Request) (int, error) + MultiInsert(w http.ResponseWriter, r *http.Request) (int, error) + Update(w http.ResponseWriter, r *http.Request) (int, error) + MultiUpdate(w http.ResponseWriter, r *http.Request) (int, error) + Upsert(w http.ResponseWriter, r *http.Request) (int, error) + MultiUpsert(w http.ResponseWriter, r *http.Request) (int, error) + Remove(w http.ResponseWriter, r *http.Request) (int, error) + MultiRemove(w http.ResponseWriter, r *http.Request) (int, error) + GetObject(w http.ResponseWriter, r *http.Request) (int, error) +} + +type handler struct { + vald vald.Server +} + +func New(opts ...Option) Handler { + h := new(handler) + + for _, opt := range append(defaultOpts, opts...) { + opt(h) + } + return h +} + +func (h *handler) Index(w http.ResponseWriter, r *http.Request) (int, error) { + data := make(map[string]interface{}) + return json.Handler(w, r, &data, func() (interface{}, error) { + return dump.Request(nil, data, r) + }) +} + +func (h *handler) Search(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Search_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Search(r.Context(), req) + }) +} + +func (h *handler) SearchByID(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Search_IDRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.SearchByID(r.Context(), req) + }) +} + +func (h *handler) MultiSearch(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Search_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiSearch(r.Context(), req) + }) +} + +func (h *handler) MultiSearchByID(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Search_MultiIDRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiSearchByID(r.Context(), req) + }) +} + +func (h *handler) Insert(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Insert_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Insert(r.Context(), req) + }) +} + +func (h *handler) MultiInsert(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Insert_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiInsert(r.Context(), req) + }) +} + +func (h *handler) Update(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Update_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Update(r.Context(), req) + }) +} + +func (h *handler) MultiUpdate(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Update_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiUpdate(r.Context(), req) + }) +} + +func (h *handler) Upsert(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Upsert_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Upsert(r.Context(), req) + }) +} + +func (h *handler) MultiUpsert(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Upsert_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiUpsert(r.Context(), req) + }) +} + +func (h *handler) Remove(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Remove_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Remove(r.Context(), req) + }) +} + +func (h *handler) MultiRemove(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Remove_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiRemove(r.Context(), req) + }) +} + +func (h *handler) GetObject(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Object_ID + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.GetObject(r.Context(), req) + }) +} + +func (h *handler) Exists(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Object_ID + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Exists(r.Context(), req) + }) +} diff --git a/internal/client/gateway/vald/rest/client_test.go b/pkg/gateway/backup/handler/rest/handler_test.go similarity index 53% rename from internal/client/gateway/vald/rest/client_test.go rename to pkg/gateway/backup/handler/rest/handler_test.go index b8097caecb..2987f032b3 100644 --- a/internal/client/gateway/vald/rest/client_test.go +++ b/pkg/gateway/backup/handler/rest/handler_test.go @@ -14,36 +14,36 @@ // limitations under the License. // -// Package rest provides vald REST client functions +// Package rest provides rest api logic package rest import ( - "context" + "net/http" "reflect" "testing" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/apis/grpc/v1/vald" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } type want struct { - want Client + want Handler } type test struct { name string args args want want - checkFunc func(want, Client) error + checkFunc func(want, Handler) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got Client) error { + defaultCheckFunc := func(w want, got Handler) error { if !reflect.DeepEqual(got, w.want) { return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } @@ -77,9 +77,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -94,38 +96,38 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_Exists(t *testing.T) { +func Test_handler_Index(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectID + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - wantResp *client.ObjectID - err error + want int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.ObjectID, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotResp *client.ObjectID, err error) error { + defaultCheckFunc := func(w want, got int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(gotResp, w.wantResp) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotResp, w.wantResp) + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -135,11 +137,11 @@ func Test_gatewayClient_Exists(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -152,11 +154,11 @@ func Test_gatewayClient_Exists(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -165,9 +167,11 @@ func Test_gatewayClient_Exists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -177,29 +181,29 @@ func Test_gatewayClient_Exists(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - gotResp, err := c.Exists(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, gotResp, err); err != nil { + got, err := h.Index(test.args.w, test.args.r) + if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_Search(t *testing.T) { +func Test_handler_Search(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.SearchRequest + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - wantResp *client.SearchResponse + wantCode int err error } type test struct { @@ -207,16 +211,16 @@ func Test_gatewayClient_Search(t *testing.T) { args args fields fields want want - checkFunc func(want, *client.SearchResponse, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotResp *client.SearchResponse, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(gotResp, w.wantResp) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotResp, w.wantResp) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -226,11 +230,11 @@ func Test_gatewayClient_Search(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -243,11 +247,11 @@ func Test_gatewayClient_Search(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -256,9 +260,11 @@ func Test_gatewayClient_Search(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -268,29 +274,29 @@ func Test_gatewayClient_Search(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - gotResp, err := c.Search(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, gotResp, err); err != nil { + gotCode, err := h.Search(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_SearchByID(t *testing.T) { +func Test_handler_SearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.SearchIDRequest + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - wantResp *client.SearchResponse + wantCode int err error } type test struct { @@ -298,16 +304,16 @@ func Test_gatewayClient_SearchByID(t *testing.T) { args args fields fields want want - checkFunc func(want, *client.SearchResponse, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotResp *client.SearchResponse, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(gotResp, w.wantResp) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotResp, w.wantResp) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -317,11 +323,11 @@ func Test_gatewayClient_SearchByID(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -334,11 +340,11 @@ func Test_gatewayClient_SearchByID(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -347,9 +353,11 @@ func Test_gatewayClient_SearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -359,133 +367,46 @@ func Test_gatewayClient_SearchByID(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - gotResp, err := c.SearchByID(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, gotResp, err); err != nil { + gotCode, err := h.SearchByID(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_StreamSearch(t *testing.T) { +func Test_handler_MultiSearch(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.SearchRequest - f func(*client.SearchResponse, error) + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - ctx: nil, - dataProvider: nil, - f: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - ctx: nil, - dataProvider: nil, - f: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - c := &gatewayClient{ - addr: test.fields.addr, - } - - err := c.StreamSearch(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_gatewayClient_StreamSearchByID(t *testing.T) { - type args struct { - ctx context.Context - dataProvider func() *client.SearchIDRequest - f func(*client.SearchResponse, error) - } - type fields struct { - addr string - } - type want struct { - err error - } - type test struct { - name string - args args - fields fields - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -495,12 +416,11 @@ func Test_gatewayClient_StreamSearchByID(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -513,12 +433,11 @@ func Test_gatewayClient_StreamSearchByID(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -527,9 +446,11 @@ func Test_gatewayClient_StreamSearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -539,130 +460,46 @@ func Test_gatewayClient_StreamSearchByID(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.StreamSearchByID(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.MultiSearch(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_Insert(t *testing.T) { +func Test_handler_MultiSearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVector + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - ctx: nil, - req: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - ctx: nil, - req: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - c := &gatewayClient{ - addr: test.fields.addr, - } - - err := c.Insert(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_gatewayClient_StreamInsert(t *testing.T) { - type args struct { - ctx context.Context - dataProvider func() *client.ObjectVector - f func(error) - } - type fields struct { - addr string - } - type want struct { - err error - } - type test struct { - name string - args args - fields fields - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -672,12 +509,11 @@ func Test_gatewayClient_StreamInsert(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -690,12 +526,11 @@ func Test_gatewayClient_StreamInsert(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -704,9 +539,11 @@ func Test_gatewayClient_StreamInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -716,129 +553,46 @@ func Test_gatewayClient_StreamInsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.StreamInsert(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.MultiSearchByID(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_MultiInsert(t *testing.T) { +func Test_handler_Insert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVectors + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - ctx: nil, - req: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - ctx: nil, - req: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - c := &gatewayClient{ - addr: test.fields.addr, - } - - err := c.MultiInsert(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_gatewayClient_Update(t *testing.T) { - type args struct { - ctx context.Context - req *client.ObjectVector - } - type fields struct { - addr string - } - type want struct { - err error - } - type test struct { - name string - args args - fields fields - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -848,11 +602,11 @@ func Test_gatewayClient_Update(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -865,11 +619,11 @@ func Test_gatewayClient_Update(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -878,9 +632,11 @@ func Test_gatewayClient_Update(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -890,132 +646,46 @@ func Test_gatewayClient_Update(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.Update(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.Insert(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_StreamUpdate(t *testing.T) { +func Test_handler_MultiInsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectVector - f func(error) + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - ctx: nil, - dataProvider: nil, - f: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - ctx: nil, - dataProvider: nil, - f: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - c := &gatewayClient{ - addr: test.fields.addr, - } - - err := c.StreamUpdate(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_gatewayClient_MultiUpdate(t *testing.T) { - type args struct { - ctx context.Context - req *client.ObjectVectors - } - type fields struct { - addr string - } - type want struct { - err error - } - type test struct { - name string - args args - fields fields - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -1025,11 +695,11 @@ func Test_gatewayClient_MultiUpdate(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1042,11 +712,11 @@ func Test_gatewayClient_MultiUpdate(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1055,9 +725,11 @@ func Test_gatewayClient_MultiUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1067,43 +739,47 @@ func Test_gatewayClient_MultiUpdate(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.MultiUpdate(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.MultiInsert(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_Upsert(t *testing.T) { +func Test_handler_Update(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVector + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } return nil } tests := []test{ @@ -1112,11 +788,11 @@ func Test_gatewayClient_Upsert(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1129,11 +805,11 @@ func Test_gatewayClient_Upsert(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1142,9 +818,11 @@ func Test_gatewayClient_Upsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1154,43 +832,47 @@ func Test_gatewayClient_Upsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.Upsert(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.Update(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_MultiUpsert(t *testing.T) { +func Test_handler_MultiUpdate(t *testing.T) { + t.Parallel() type args struct { - in0 context.Context - in1 *client.ObjectVectors + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } return nil } tests := []test{ @@ -1199,11 +881,11 @@ func Test_gatewayClient_MultiUpsert(t *testing.T) { { name: "test_case_1", args: args { - in0: nil, - in1: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1216,11 +898,11 @@ func Test_gatewayClient_MultiUpsert(t *testing.T) { return test { name: "test_case_2", args: args { - in0: nil, - in1: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1229,9 +911,11 @@ func Test_gatewayClient_MultiUpsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1241,44 +925,47 @@ func Test_gatewayClient_MultiUpsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.MultiUpsert(test.args.in0, test.args.in1) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.MultiUpdate(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_StreamUpsert(t *testing.T) { +func Test_handler_Upsert(t *testing.T) { + t.Parallel() type args struct { - in0 context.Context - in1 func() *client.ObjectVector - in2 func(error) + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } return nil } tests := []test{ @@ -1287,12 +974,11 @@ func Test_gatewayClient_StreamUpsert(t *testing.T) { { name: "test_case_1", args: args { - in0: nil, - in1: nil, - in2: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1305,12 +991,11 @@ func Test_gatewayClient_StreamUpsert(t *testing.T) { return test { name: "test_case_2", args: args { - in0: nil, - in1: nil, - in2: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1319,9 +1004,11 @@ func Test_gatewayClient_StreamUpsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1331,43 +1018,47 @@ func Test_gatewayClient_StreamUpsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.StreamUpsert(test.args.in0, test.args.in1, test.args.in2) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.Upsert(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_Remove(t *testing.T) { +func Test_handler_MultiUpsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectID + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } return nil } tests := []test{ @@ -1376,11 +1067,11 @@ func Test_gatewayClient_Remove(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1393,11 +1084,11 @@ func Test_gatewayClient_Remove(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1406,9 +1097,11 @@ func Test_gatewayClient_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1418,44 +1111,47 @@ func Test_gatewayClient_Remove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.Remove(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.MultiUpsert(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_StreamRemove(t *testing.T) { +func Test_handler_Remove(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectID - f func(error) + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } return nil } tests := []test{ @@ -1464,12 +1160,11 @@ func Test_gatewayClient_StreamRemove(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1482,12 +1177,11 @@ func Test_gatewayClient_StreamRemove(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1496,9 +1190,11 @@ func Test_gatewayClient_StreamRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1508,43 +1204,47 @@ func Test_gatewayClient_StreamRemove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.StreamRemove(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.Remove(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_MultiRemove(t *testing.T) { +func Test_handler_MultiRemove(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectIDs + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } return nil } tests := []test{ @@ -1553,11 +1253,11 @@ func Test_gatewayClient_MultiRemove(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1570,11 +1270,11 @@ func Test_gatewayClient_MultiRemove(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1583,9 +1283,11 @@ func Test_gatewayClient_MultiRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1595,29 +1297,29 @@ func Test_gatewayClient_MultiRemove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.MultiRemove(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.MultiRemove(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_GetObject(t *testing.T) { +func Test_handler_GetObject(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectID + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - wantResp *client.MetaObject + wantCode int err error } type test struct { @@ -1625,16 +1327,16 @@ func Test_gatewayClient_GetObject(t *testing.T) { args args fields fields want want - checkFunc func(want, *client.MetaObject, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotResp *client.MetaObject, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(gotResp, w.wantResp) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotResp, w.wantResp) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -1644,11 +1346,11 @@ func Test_gatewayClient_GetObject(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1661,11 +1363,11 @@ func Test_gatewayClient_GetObject(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1674,9 +1376,11 @@ func Test_gatewayClient_GetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1686,44 +1390,47 @@ func Test_gatewayClient_GetObject(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - gotResp, err := c.GetObject(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, gotResp, err); err != nil { + gotCode, err := h.GetObject(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_StreamGetObject(t *testing.T) { +func Test_handler_Exists(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectID - f func(*client.MetaObject, error) + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } return nil } tests := []test{ @@ -1732,12 +1439,11 @@ func Test_gatewayClient_StreamGetObject(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1750,12 +1456,11 @@ func Test_gatewayClient_StreamGetObject(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1764,9 +1469,11 @@ func Test_gatewayClient_StreamGetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1776,15 +1483,14 @@ func Test_gatewayClient_StreamGetObject(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.StreamGetObject(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.Exists(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/gateway/backup/handler/rest/option.go b/pkg/gateway/backup/handler/rest/option.go new file mode 100644 index 0000000000..478a365a77 --- /dev/null +++ b/pkg/gateway/backup/handler/rest/option.go @@ -0,0 +1,30 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package rest provides rest api logic +package rest + +import "github.com/vdaas/vald/apis/grpc/v1/vald" + +type Option func(*handler) + +var defaultOpts = []Option{} + +func WithVald(v vald.Server) Option { + return func(h *handler) { + h.vald = v + } +} diff --git a/pkg/gateway/backup/handler/rest/option_test.go b/pkg/gateway/backup/handler/rest/option_test.go new file mode 100644 index 0000000000..5efca60aa7 --- /dev/null +++ b/pkg/gateway/backup/handler/rest/option_test.go @@ -0,0 +1,142 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package rest provides rest api logic +package rest + +import ( + "testing" + + "github.com/vdaas/vald/apis/grpc/v1/vald" + "go.uber.org/goleak" +) + +func TestWithVald(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + v vald.Server + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + v: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + v: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithVald(test.args.v) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithVald(test.args.v) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/gateway/backup/router/option.go b/pkg/gateway/backup/router/option.go new file mode 100644 index 0000000000..20ee0bac56 --- /dev/null +++ b/pkg/gateway/backup/router/option.go @@ -0,0 +1,40 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package router provides implementation of Go API for routing http Handler wrapped by rest.Func +package router + +import ( + "github.com/vdaas/vald/pkg/gateway/backup/handler/rest" +) + +type Option func(*router) + +var defaultOpts = []Option{ + WithTimeout("3s"), +} + +func WithHandler(h rest.Handler) Option { + return func(r *router) { + r.handler = h + } +} + +func WithTimeout(timeout string) Option { + return func(r *router) { + r.timeout = timeout + } +} diff --git a/pkg/gateway/backup/router/option_test.go b/pkg/gateway/backup/router/option_test.go new file mode 100644 index 0000000000..a72772af3e --- /dev/null +++ b/pkg/gateway/backup/router/option_test.go @@ -0,0 +1,259 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package router provides implementation of Go API for routing http Handler wrapped by rest.Func +package router + +import ( + "testing" + + "github.com/vdaas/vald/pkg/gateway/backup/handler/rest" + "go.uber.org/goleak" +) + +func TestWithHandler(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + h rest.Handler + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + h: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + h: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithHandler(test.args.h) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithHandler(test.args.h) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + timeout string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + timeout: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + timeout: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithTimeout(test.args.timeout) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithTimeout(test.args.timeout) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/gateway/backup/router/router.go b/pkg/gateway/backup/router/router.go new file mode 100644 index 0000000000..919432507b --- /dev/null +++ b/pkg/gateway/backup/router/router.go @@ -0,0 +1,167 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package router provides implementation of Go API for routing http Handler wrapped by rest.Func +package router + +import ( + "net/http" + + "github.com/vdaas/vald/internal/net/http/routing" + "github.com/vdaas/vald/pkg/gateway/backup/handler/rest" +) + +type router struct { + handler rest.Handler + timeout string +} + +// New returns REST route&method information from handler interface. +func New(opts ...Option) http.Handler { + r := new(router) + + for _, opt := range append(defaultOpts, opts...) { + opt(r) + } + + h := r.handler + + return routing.New( + routing.WithRoutes([]routing.Route{ + { + "Index", + []string{ + http.MethodGet, + }, + "/", + h.Index, + }, + { + "Search", + []string{ + http.MethodPost, + }, + "/search", + h.Search, + }, + { + "Search By ID", + []string{ + http.MethodGet, + }, + "/search/{id}", + h.SearchByID, + }, + + { + "Multi Search", + []string{ + http.MethodPost, + }, + "/search/multi", + h.MultiSearch, + }, + { + "Multi Search By ID", + []string{ + http.MethodGet, + }, + "/search/multi/{id}", + h.MultiSearchByID, + }, + { + "Insert", + []string{ + http.MethodPost, + }, + "/insert", + h.Insert, + }, + { + "Multiple Insert", + []string{ + http.MethodPost, + }, + "/insert/multi", + h.MultiInsert, + }, + { + "Update", + []string{ + http.MethodPost, + http.MethodPatch, + http.MethodPut, + }, + "/update", + h.Update, + }, + { + "Multiple Update", + []string{ + http.MethodPost, + http.MethodPatch, + http.MethodPut, + }, + "/update/multi", + h.MultiUpdate, + }, + { + "Upsert", + []string{ + http.MethodPost, + http.MethodPatch, + http.MethodPut, + }, + "/upsert", + h.Upsert, + }, + { + "Multiple Upsert", + []string{ + http.MethodPost, + http.MethodPatch, + http.MethodPut, + }, + "/upsert/multi", + h.MultiUpsert, + }, + { + "Remove", + []string{ + http.MethodDelete, + }, + "/delete/{id}", + h.Remove, + }, + { + "Multiple Remove", + []string{ + http.MethodDelete, + http.MethodPost, + }, + "/delete/multi", + h.MultiRemove, + }, + { + "GetObject", + []string{ + http.MethodGet, + }, + "/object/{id}", + h.GetObject, + }, + }...)) +} diff --git a/pkg/gateway/backup/router/router_test.go b/pkg/gateway/backup/router/router_test.go new file mode 100644 index 0000000000..97248787ae --- /dev/null +++ b/pkg/gateway/backup/router/router_test.go @@ -0,0 +1,100 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package router provides implementation of Go API for routing http Handler wrapped by rest.Func +package router + +import ( + "net/http" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type args struct { + opts []Option + } + type want struct { + want http.Handler + } + type test struct { + name string + args args + want want + checkFunc func(want, http.Handler) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got http.Handler) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := New(test.args.opts...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/pkg/gateway/backup/service/backup.go b/pkg/gateway/backup/service/backup.go new file mode 100644 index 0000000000..d3da03b943 --- /dev/null +++ b/pkg/gateway/backup/service/backup.go @@ -0,0 +1,177 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package service + +import ( + "context" + "reflect" + + "github.com/vdaas/vald/apis/grpc/v1/manager/compressor" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/internal/observability/trace" +) + +type Backup interface { + Start(ctx context.Context) (<-chan error, error) + GetObject(ctx context.Context, uuid string) (*payload.Backup_Vector, error) + GetLocation(ctx context.Context, uuid string) ([]string, error) + Register(ctx context.Context, vec *payload.Backup_Vector) error + RegisterMultiple(ctx context.Context, vecs *payload.Backup_Vectors) error + Remove(ctx context.Context, uuid string) error + RemoveMultiple(ctx context.Context, uuids ...string) error +} + +type backup struct { + addr string + client grpc.Client +} + +const apiName = "vald/gateway-backup" + +func NewBackup(opts ...BackupOption) (bu Backup, err error) { + b := new(backup) + for _, opt := range append(defaultBackupOpts, opts...) { + if err := opt(b); err != nil { + return nil, errors.ErrOptionFailed(err, reflect.ValueOf(opt)) + } + } + + return b, nil +} + +func (b *backup) Start(ctx context.Context) (<-chan error, error) { + return b.client.StartConnectionMonitor(ctx) +} + +func (b *backup) GetObject(ctx context.Context, uuid string) (vec *payload.Backup_Vector, err error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/backup.GetObject/"+uuid) + defer func() { + if span != nil { + span.End() + } + }() + _, err = b.client.Do(ctx, b.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { + vec, err = compressor.NewBackupClient(conn).GetVector(ctx, &payload.Backup_GetVector_Request{ + Uuid: uuid, + }, copts...) + if err != nil { + return nil, err + } + return + }) + return +} + +func (b *backup) GetLocation(ctx context.Context, uuid string) (ipList []string, err error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/backup.GetLocation/"+uuid) + defer func() { + if span != nil { + span.End() + } + }() + _, err = b.client.Do(ctx, b.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { + ips, err := compressor.NewBackupClient(conn).Locations(ctx, &payload.Backup_Locations_Request{ + Uuid: uuid, + }, copts...) + if err != nil { + return nil, err + } + ipList = ips.GetIp() + return + }) + return +} + +func (b *backup) Register(ctx context.Context, vec *payload.Backup_Vector) (err error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/backup.Register/"+vec.GetUuid()) + defer func() { + if span != nil { + span.End() + } + }() + _, err = b.client.Do(ctx, b.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { + _, err = compressor.NewBackupClient(conn).Register(ctx, vec, copts...) + if err != nil { + return nil, err + } + return + }) + return +} + +func (b *backup) RegisterMultiple(ctx context.Context, vecs *payload.Backup_Vectors) (err error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/backup.RegisterMultiple") + defer func() { + if span != nil { + span.End() + } + }() + _, err = b.client.Do(ctx, b.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { + _, err = compressor.NewBackupClient(conn).RegisterMulti(ctx, vecs, copts...) + if err != nil { + return nil, err + } + return + }) + return +} + +func (b *backup) Remove(ctx context.Context, uuid string) (err error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/backup.Remove/"+uuid) + defer func() { + if span != nil { + span.End() + } + }() + _, err = b.client.Do(ctx, b.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { + _, err = compressor.NewBackupClient(conn).Remove(ctx, &payload.Backup_Remove_Request{ + Uuid: uuid, + }, copts...) + if err != nil { + return nil, err + } + return + }) + return +} + +func (b *backup) RemoveMultiple(ctx context.Context, uuids ...string) (err error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/backup.RemoveMultiple") + defer func() { + if span != nil { + span.End() + } + }() + req := new(payload.Backup_Remove_RequestMulti) + req.Uuids = uuids + _, err = b.client.Do(ctx, b.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { + _, err = compressor.NewBackupClient(conn).RemoveMulti(ctx, req, copts...) + if err != nil { + return nil, err + } + return + }) + return +} diff --git a/pkg/gateway/backup/service/backup_test.go b/pkg/gateway/backup/service/backup_test.go new file mode 100644 index 0000000000..4356acb982 --- /dev/null +++ b/pkg/gateway/backup/service/backup_test.go @@ -0,0 +1,765 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package service + +import ( + "context" + "reflect" + "testing" + + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/net/grpc" + "go.uber.org/goleak" +) + +func TestNewBackup(t *testing.T) { + t.Parallel() + type args struct { + opts []BackupOption + } + type want struct { + wantBu Backup + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, Backup, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotBu Backup, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotBu, w.wantBu) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotBu, w.wantBu) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + gotBu, err := NewBackup(test.args.opts...) + if err := test.checkFunc(test.want, gotBu, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_backup_Start(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + addr string + client grpc.Client + } + type want struct { + want <-chan error + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, <-chan error, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got <-chan error, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + addr: "", + client: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + addr: "", + client: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + b := &backup{ + addr: test.fields.addr, + client: test.fields.client, + } + + got, err := b.Start(test.args.ctx) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_backup_GetObject(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + uuid string + } + type fields struct { + addr string + client grpc.Client + } + type want struct { + wantVec *payload.Backup_Vector + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, *payload.Backup_Vector, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotVec *payload.Backup_Vector, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotVec, w.wantVec) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + uuid: "", + }, + fields: fields { + addr: "", + client: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + uuid: "", + }, + fields: fields { + addr: "", + client: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + b := &backup{ + addr: test.fields.addr, + client: test.fields.client, + } + + gotVec, err := b.GetObject(test.args.ctx, test.args.uuid) + if err := test.checkFunc(test.want, gotVec, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_backup_GetLocation(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + uuid string + } + type fields struct { + addr string + client grpc.Client + } + type want struct { + wantIpList []string + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, []string, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotIpList []string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotIpList, w.wantIpList) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotIpList, w.wantIpList) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + uuid: "", + }, + fields: fields { + addr: "", + client: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + uuid: "", + }, + fields: fields { + addr: "", + client: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + b := &backup{ + addr: test.fields.addr, + client: test.fields.client, + } + + gotIpList, err := b.GetLocation(test.args.ctx, test.args.uuid) + if err := test.checkFunc(test.want, gotIpList, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_backup_Register(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + vec *payload.Backup_Vector + } + type fields struct { + addr string + client grpc.Client + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + vec: nil, + }, + fields: fields { + addr: "", + client: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + vec: nil, + }, + fields: fields { + addr: "", + client: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + b := &backup{ + addr: test.fields.addr, + client: test.fields.client, + } + + err := b.Register(test.args.ctx, test.args.vec) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_backup_RegisterMultiple(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + vecs *payload.Backup_Vectors + } + type fields struct { + addr string + client grpc.Client + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + vecs: nil, + }, + fields: fields { + addr: "", + client: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + vecs: nil, + }, + fields: fields { + addr: "", + client: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + b := &backup{ + addr: test.fields.addr, + client: test.fields.client, + } + + err := b.RegisterMultiple(test.args.ctx, test.args.vecs) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_backup_Remove(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + uuid string + } + type fields struct { + addr string + client grpc.Client + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + uuid: "", + }, + fields: fields { + addr: "", + client: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + uuid: "", + }, + fields: fields { + addr: "", + client: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + b := &backup{ + addr: test.fields.addr, + client: test.fields.client, + } + + err := b.Remove(test.args.ctx, test.args.uuid) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_backup_RemoveMultiple(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + uuids []string + } + type fields struct { + addr string + client grpc.Client + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + uuids: nil, + }, + fields: fields { + addr: "", + client: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + uuids: nil, + }, + fields: fields { + addr: "", + client: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + b := &backup{ + addr: test.fields.addr, + client: test.fields.client, + } + + err := b.RemoveMultiple(test.args.ctx, test.args.uuids...) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/pkg/manager/compressor/model/model.go b/pkg/gateway/backup/service/doc.go similarity index 75% rename from pkg/manager/compressor/model/model.go rename to pkg/gateway/backup/service/doc.go index 65c925be7d..c13956cbbe 100644 --- a/pkg/manager/compressor/model/model.go +++ b/pkg/gateway/backup/service/doc.go @@ -14,12 +14,5 @@ // limitations under the License. // -// Package grpc provides grpc server logic -package model - -type MetaVector struct { - UUID string `db:"uuid"` - Vector string `db:"vector"` - Meta string `db:"meta"` - IPs []string `db:"ips"` -} +// Package service manages the main logic of server. +package service diff --git a/pkg/gateway/backup/service/option.go b/pkg/gateway/backup/service/option.go new file mode 100644 index 0000000000..cb930eb267 --- /dev/null +++ b/pkg/gateway/backup/service/option.go @@ -0,0 +1,40 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package service +package service + +import "github.com/vdaas/vald/internal/net/grpc" + +type BackupOption func(b *backup) error + +var defaultBackupOpts = []BackupOption{} + +func WithBackupAddr(addr string) BackupOption { + return func(b *backup) error { + b.addr = addr + return nil + } +} + +func WithBackupClient(client grpc.Client) BackupOption { + return func(b *backup) error { + if client != nil { + b.client = client + } + return nil + } +} diff --git a/internal/client/gateway/vald/grpc/option_test.go b/pkg/gateway/backup/service/option_test.go similarity index 83% rename from internal/client/gateway/vald/grpc/option_test.go rename to pkg/gateway/backup/service/option_test.go index 8efe0236ca..366cc18fff 100644 --- a/internal/client/gateway/vald/grpc/option_test.go +++ b/pkg/gateway/backup/service/option_test.go @@ -14,18 +14,19 @@ // limitations under the License. // -// Package grpc provides vald gRPC client functions -package grpc +// Package service +package service import ( "testing" - "github.com/vdaas/vald/internal/config" - + "github.com/vdaas/vald/internal/net/grpc" "go.uber.org/goleak" ) -func TestWithAddr(t *testing.T) { +func TestWithBackupAddr(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { addr string @@ -63,7 +64,7 @@ func TestWithAddr(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithAddr(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -115,22 +118,22 @@ func TestWithAddr(t *testing.T) { test.checkFunc = defaultCheckFunc } - got := WithAddr(test.args.addr) + got := WithBackupAddr(test.args.addr) obj := new(T) if err := test.checkFunc(test.want, obj, got(obj)); err != nil { tt.Errorf("error = %v", err) } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - got := WithAddr(test.args.addr) + got := WithBackupAddr(test.args.addr) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -138,10 +141,12 @@ func TestWithAddr(t *testing.T) { } } -func TestWithGRPCClientConfig(t *testing.T) { +func TestWithBackupClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { - cfg *config.GRPCClient + client grpc.Client } type want struct { obj *T @@ -176,7 +181,7 @@ func TestWithGRPCClientConfig(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -188,7 +193,7 @@ func TestWithGRPCClientConfig(t *testing.T) { { name: "test_case_1", args: args { - cfg: nil, + client: nil, }, want: want { obj: new(T), @@ -202,7 +207,7 @@ func TestWithGRPCClientConfig(t *testing.T) { return test { name: "test_case_2", args: args { - cfg: nil, + client: nil, }, want: want { obj: new(T), @@ -212,9 +217,11 @@ func TestWithGRPCClientConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -228,22 +235,22 @@ func TestWithGRPCClientConfig(t *testing.T) { test.checkFunc = defaultCheckFunc } - got := WithGRPCClientConfig(test.args.cfg) + got := WithBackupClient(test.args.client) obj := new(T) if err := test.checkFunc(test.want, obj, got(obj)); err != nil { tt.Errorf("error = %v", err) } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - got := WithGRPCClientConfig(test.args.cfg) + got := WithBackupClient(test.args.client) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/gateway/backup/usecase/vald.go b/pkg/gateway/backup/usecase/vald.go new file mode 100644 index 0000000000..f58378c367 --- /dev/null +++ b/pkg/gateway/backup/usecase/vald.go @@ -0,0 +1,209 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package usecase + +import ( + "context" + + "github.com/vdaas/vald/apis/grpc/v1/vald" + client "github.com/vdaas/vald/internal/client/v1/client/vald" + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/internal/net/grpc/metric" + "github.com/vdaas/vald/internal/observability" + "github.com/vdaas/vald/internal/runner" + "github.com/vdaas/vald/internal/safety" + "github.com/vdaas/vald/internal/servers/server" + "github.com/vdaas/vald/internal/servers/starter" + "github.com/vdaas/vald/pkg/gateway/backup/config" + handler "github.com/vdaas/vald/pkg/gateway/backup/handler/grpc" + "github.com/vdaas/vald/pkg/gateway/backup/handler/rest" + "github.com/vdaas/vald/pkg/gateway/backup/router" + "github.com/vdaas/vald/pkg/gateway/backup/service" +) + +type run struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + backup service.Backup +} + +func New(cfg *config.Data) (r runner.Runner, err error) { + eg := errgroup.Get() + + var backup service.Backup + + if addrs := cfg.Backup.Client.Addrs; len(addrs) == 0 { + return nil, errors.ErrInvalidBackupConfig + } + + backupClientOptions := append( + cfg.Backup.Client.Opts(), + grpc.WithErrGroup(eg), + ) + + var obs observability.Observability + if cfg.Observability.Enabled { + obs, err = observability.NewWithConfig(cfg.Observability) + if err != nil { + return nil, err + } + backupClientOptions = append( + backupClientOptions, + grpc.WithDialOptions( + grpc.WithStatsHandler(metric.NewClientHandler()), + ), + ) + } + + backup, err = service.NewBackup( + service.WithBackupAddr(cfg.Backup.Client.Addrs[0]), + service.WithBackupClient( + grpc.New(backupClientOptions...), + ), + ) + if err != nil { + return nil, err + } + + if addrs := cfg.Client.Addrs; len(addrs) == 0 { + return nil, errors.ErrGRPCTargetAddrNotFound + } + + v := handler.New( + handler.WithValdClient(client.New( + client.WithAddr(cfg.Client.Addrs[0]), + client.WithClient(grpc.New(cfg.Client.Opts()...)), + )), + handler.WithBackup(backup), + handler.WithErrGroup(eg), + handler.WithStreamConcurrency(cfg.Server.GetGRPCStreamConcurrency()), + ) + + grpcServerOptions := []server.Option{ + server.WithGRPCRegistFunc(func(srv *grpc.Server) { + vald.RegisterValdServer(srv, v) + }), + server.WithPreStopFunction(func() error { + // TODO notify another gateway and scheduler + return nil + }), + } + + if cfg.Observability.Enabled { + grpcServerOptions = append( + grpcServerOptions, + server.WithGRPCOption( + grpc.StatsHandler(metric.NewServerHandler()), + ), + ) + } + + srv, err := starter.New( + starter.WithConfig(cfg.Server), + starter.WithREST(func(sc *config.Server) []server.Option { + return []server.Option{ + server.WithHTTPHandler( + router.New( + router.WithHandler( + rest.New( + rest.WithVald(v), + ), + ), + ), + ), + } + }), + starter.WithGRPC(func(sc *config.Server) []server.Option { + return grpcServerOptions + }), + // TODO add GraphQL handler + ) + if err != nil { + return nil, err + } + + return &run{ + eg: eg, + cfg: cfg, + server: srv, + observability: obs, + backup: backup, + }, nil +} + +func (r *run) PreStart(ctx context.Context) error { + if r.observability != nil { + return r.observability.PreStart(ctx) + } + return nil +} + +func (r *run) Start(ctx context.Context) (<-chan error, error) { + ech := make(chan error, 6) + var bech, sech, oech <-chan error + var err error + if r.observability != nil { + oech = r.observability.Start(ctx) + } + if r.backup != nil { + bech, err = r.backup.Start(ctx) + if err != nil { + close(ech) + return nil, err + } + } + sech = r.server.ListenAndServe(ctx) + r.eg.Go(safety.RecoverFunc(func() (err error) { + defer close(ech) + for { + select { + case <-ctx.Done(): + return ctx.Err() + case err = <-oech: + case err = <-bech: + case err = <-sech: + } + if err != nil { + select { + case <-ctx.Done(): + return ctx.Err() + case ech <- err: + } + } + } + })) + return ech, nil +} + +func (r *run) PreStop(ctx context.Context) error { + return nil +} + +func (r *run) Stop(ctx context.Context) error { + if r.observability != nil { + r.observability.Stop(ctx) + } + return r.server.Shutdown(ctx) +} + +func (r *run) PostStop(ctx context.Context) error { + return nil +} diff --git a/pkg/gateway/backup/usecase/vald_test.go b/pkg/gateway/backup/usecase/vald_test.go new file mode 100644 index 0000000000..2e8db706e9 --- /dev/null +++ b/pkg/gateway/backup/usecase/vald_test.go @@ -0,0 +1,623 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package usecase + +import ( + "context" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/observability" + "github.com/vdaas/vald/internal/runner" + "github.com/vdaas/vald/internal/servers/starter" + "github.com/vdaas/vald/pkg/gateway/backup/config" + "github.com/vdaas/vald/pkg/gateway/backup/service" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type args struct { + cfg *config.Data + } + type want struct { + wantR runner.Runner + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, runner.Runner, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotR runner.Runner, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotR, w.wantR) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotR, w.wantR) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + cfg: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + cfg: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + gotR, err := New(test.args.cfg) + if err := test.checkFunc(test.want, gotR, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_PreStart(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + backup service.Backup + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + backup: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + backup: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + backup: test.fields.backup, + } + + err := r.PreStart(test.args.ctx) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_Start(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + backup service.Backup + } + type want struct { + want <-chan error + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, <-chan error, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got <-chan error, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + backup: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + backup: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + backup: test.fields.backup, + } + + got, err := r.Start(test.args.ctx) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_PreStop(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + backup service.Backup + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + backup: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + backup: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + backup: test.fields.backup, + } + + err := r.PreStop(test.args.ctx) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_Stop(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + backup service.Backup + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + backup: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + backup: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + backup: test.fields.backup, + } + + err := r.Stop(test.args.ctx) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_PostStop(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + backup service.Backup + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + backup: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + backup: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + backup: test.fields.backup, + } + + err := r.PostStop(test.args.ctx) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/pkg/gateway/internal/location/location.go b/pkg/gateway/internal/location/location.go new file mode 100644 index 0000000000..d520cc71f3 --- /dev/null +++ b/pkg/gateway/internal/location/location.go @@ -0,0 +1,44 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package location + +import "github.com/vdaas/vald/apis/grpc/v1/payload" + +func ReStructure(uuids []string, locs *payload.Object_Locations) *payload.Object_Locations { + if locs == nil { + return nil + } + lmap := make(map[string]*payload.Object_Location, len(locs.Locations)) + for _, loc := range locs.Locations { + uuid := loc.GetUuid() + _, ok := lmap[uuid] + if !ok { + lmap[uuid] = new(payload.Object_Location) + } + lmap[uuid].Ips = append(lmap[uuid].GetIps(), loc.GetIps()...) + } + locs = &payload.Object_Locations{ + Locations: make([]*payload.Object_Location, 0, len(lmap)), + } + for _, id := range uuids { + loc, ok := lmap[id] + if !ok { + loc = new(payload.Object_Location) + } + locs.Locations = append(locs.Locations, loc) + } + return locs +} diff --git a/pkg/gateway/internal/location/location_test.go b/pkg/gateway/internal/location/location_test.go new file mode 100644 index 0000000000..ae7340fd21 --- /dev/null +++ b/pkg/gateway/internal/location/location_test.go @@ -0,0 +1,101 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +package location + +import ( + "reflect" + "testing" + + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestReStructure(t *testing.T) { + t.Parallel() + type args struct { + uuids []string + locs *payload.Object_Locations + } + type want struct { + want *payload.Object_Locations + } + type test struct { + name string + args args + want want + checkFunc func(want, *payload.Object_Locations) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got *payload.Object_Locations) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + uuids: nil, + locs: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + uuids: nil, + locs: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := ReStructure(test.args.uuids, test.args.locs) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/pkg/gateway/lb/README.md b/pkg/gateway/lb/README.md new file mode 100755 index 0000000000..c3ac3c384a --- /dev/null +++ b/pkg/gateway/lb/README.md @@ -0,0 +1 @@ +# vald LB gateway diff --git a/pkg/gateway/lb/config/config.go b/pkg/gateway/lb/config/config.go new file mode 100644 index 0000000000..5e5c112bed --- /dev/null +++ b/pkg/gateway/lb/config/config.go @@ -0,0 +1,151 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package setting stores all server application settings +package config + +import ( + "github.com/vdaas/vald/internal/config" +) + +type ( + GlobalConfig = config.GlobalConfig + Server = config.Server +) + +// Config represent a application setting data content (config.yaml). +// In K8s environment, this configuration is stored in K8s ConfigMap. +type Data struct { + config.GlobalConfig `json:",inline" yaml:",inline"` + + // Server represent all server configurations + Server *config.Servers `json:"server_config" yaml:"server_config"` + + // Observability represent observability configurations + Observability *config.Observability `json:"observability" yaml:"observability"` + + // Gateway represent agent gateway service configuration + Gateway *config.LB `json:"gateway" yaml:"gateway"` +} + +func NewConfig(path string) (cfg *Data, err error) { + err = config.Read(path, &cfg) + + if err != nil { + return nil, err + } + + if cfg != nil { + cfg.Bind() + } + + if cfg.Server != nil { + cfg.Server = cfg.Server.Bind() + } + + if cfg.Observability != nil { + cfg.Observability = cfg.Observability.Bind() + } + + if cfg.Gateway != nil { + cfg.Gateway = cfg.Gateway.Bind() + } + + return cfg, nil +} + +// func FakeData() { +// d := Data{ +// Version: "v0.0.1", +// Server: &config.Servers{ +// Servers: []*config.Server{ +// { +// Name: "agent-rest", +// Host: "127.0.0.1", +// Port: 8080, +// Mode: "REST", +// ProbeWaitTime: "3s", +// ShutdownDuration: "5s", +// HandlerTimeout: "5s", +// IdleTimeout: "2s", +// ReadHeaderTimeout: "1s", +// ReadTimeout: "1s", +// WriteTimeout: "1s", +// }, +// { +// Name: "agent-grpc", +// Host: "127.0.0.1", +// Port: 8082, +// Mode: "GRPC", +// }, +// }, +// MetricsServers: []*config.Server{ +// { +// Name: "pprof", +// Host: "127.0.0.1", +// Port: 6060, +// Mode: "REST", +// ProbeWaitTime: "3s", +// ShutdownDuration: "5s", +// HandlerTimeout: "5s", +// IdleTimeout: "2s", +// ReadHeaderTimeout: "1s", +// ReadTimeout: "1s", +// WriteTimeout: "1s", +// }, +// }, +// HealthCheckServers: []*config.Server{ +// { +// Name: "livenesss", +// Host: "127.0.0.1", +// Port: 3000, +// }, +// { +// Name: "readiness", +// Host: "127.0.0.1", +// Port: 3001, +// }, +// }, +// StartUpStrategy: []string{ +// "livenesss", +// "pprof", +// "agent-grpc", +// "agent-rest", +// "readiness", +// }, +// ShutdownStrategy: []string{ +// "readiness", +// "agent-rest", +// "agent-grpc", +// "pprof", +// "livenesss", +// }, +// FullShutdownDuration: "30s", +// TLS: &config.TLS{ +// Enabled: false, +// Cert: "/path/to/cert", +// Key: "/path/to/key", +// CA: "/path/to/ca", +// }, +// }, +// Gateway: &config.Gateway{ +// AgentPort: 8080, +// AgentName: "vald-agent", +// BackoffEnabled: false,, +// }, +// } +// fmt.Println(config.ToRawYaml(d)) +// } diff --git a/pkg/gateway/lb/config/config_test.go b/pkg/gateway/lb/config/config_test.go new file mode 100644 index 0000000000..e90809127a --- /dev/null +++ b/pkg/gateway/lb/config/config_test.go @@ -0,0 +1,103 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package setting stores all server application settings +package config + +import ( + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNewConfig(t *testing.T) { + t.Parallel() + type args struct { + path string + } + type want struct { + wantCfg *Data + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, *Data, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCfg *Data, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCfg, w.wantCfg) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCfg, w.wantCfg) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + path: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + path: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + gotCfg, err := NewConfig(test.args.path) + if err := test.checkFunc(test.want, gotCfg, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/pkg/gateway/lb/handler/doc.go b/pkg/gateway/lb/handler/doc.go new file mode 100644 index 0000000000..86b6d1869d --- /dev/null +++ b/pkg/gateway/lb/handler/doc.go @@ -0,0 +1,17 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package handler diff --git a/pkg/gateway/lb/handler/grpc/handler.go b/pkg/gateway/lb/handler/grpc/handler.go new file mode 100644 index 0000000000..d2b56dbf85 --- /dev/null +++ b/pkg/gateway/lb/handler/grpc/handler.go @@ -0,0 +1,994 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package grpc provides grpc server logic +package grpc + +import ( + "context" + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/core/algorithm" + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/info" + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/internal/net/grpc/status" + "github.com/vdaas/vald/internal/observability/trace" + "github.com/vdaas/vald/internal/safety" + "github.com/vdaas/vald/pkg/gateway/internal/location" + "github.com/vdaas/vald/pkg/gateway/lb/service" +) + +type server struct { + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int +} + +const apiName = "vald/gateway-lb" + +func New(opts ...Option) vald.Server { + s := new(server) + + for _, opt := range append(defaultOpts, opts...) { + opt(s) + } + return nil +} + +func (s *server) Exists(ctx context.Context, meta *payload.Object_ID) (id *payload.Object_ID, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Exists") + defer func() { + if span != nil { + span.End() + } + }() + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + var once sync.Once + err = s.gateway.BroadCast(ctx, func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) error { + ctx, span := trace.StartSpan(ctx, apiName+".Exists/"+target) + defer func() { + if span != nil { + span.End() + } + }() + oid, err := vc.Exists(ctx, meta, copts...) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil + } + if oid != nil && oid.Id != "" { + once.Do(func() { + id = &payload.Object_ID{ + Id: oid.Id, + } + cancel() + }) + } + return nil + }) + if err != nil || id == nil || id.Id == "" { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil, status.WrapWithNotFound(fmt.Sprintf("Exists API meta %s's uuid not found", meta.GetId()), err, meta.GetId(), info.Get()) + } + return id, nil +} + +func (s *server) Search(ctx context.Context, req *payload.Search_Request) (res *payload.Search_Response, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Search") + defer func() { + if span != nil { + span.End() + } + }() + vl := len(req.GetVector()) + if vl < algorithm.MinimumVectorDimensionSize { + err = errors.ErrInvalidDimensionSize(vl, 0) + if span != nil { + span.SetStatus(trace.StatusCodeInvalidArgument(err.Error())) + } + return nil, status.WrapWithInvalidArgument("Search API invalid vector argument", err, req, info.Get()) + } + res, err = s.search(ctx, req.GetConfig(), + func(ctx context.Context, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { + return vc.Search(ctx, req, copts...) + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal("Search API failed to process search request", err, req, info.Get()) + } + return res, nil +} + +func (s *server) SearchByID(ctx context.Context, req *payload.Search_IDRequest) ( + res *payload.Search_Response, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".SearchByID") + defer func() { + if span != nil { + span.End() + } + }() + if len(req.GetId()) == 0 { + err = errors.ErrInvalidMetaDataConfig + if span != nil { + span.SetStatus(trace.StatusCodeInvalidArgument(err.Error())) + } + return nil, status.WrapWithInvalidArgument("SearchByID API invalid uuid", err, req, info.Get()) + } + vec, err := s.GetObject(ctx, &payload.Object_ID{ + Id: req.GetId(), + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil, status.WrapWithNotFound(fmt.Sprintf("SearchByID API uuid %s's object not found", req.GetId()), err, info.Get()) + } + vl := len(vec.GetVector()) + if vl < algorithm.MinimumVectorDimensionSize { + err = errors.ErrInvalidDimensionSize(vl, 0) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInvalidArgument("SearchByID API invalid vector length fetched", err, req, info.Get()) + } + res, err = s.search(ctx, req.GetConfig(), + func(ctx context.Context, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { + return vc.Search(ctx, &payload.Search_Request{ + Vector: vec.GetVector(), + Config: req.GetConfig(), + }, copts...) + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal("SearchByID API failed to process search request", err, req, info.Get()) + } + return res, nil +} + +func (s *server) search(ctx context.Context, cfg *payload.Search_Config, + f func(ctx context.Context, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error)) ( + res *payload.Search_Response, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".search") + defer func() { + if span != nil { + span.End() + } + }() + + num := int(cfg.GetNum()) + res = new(payload.Search_Response) + res.Results = make([]*payload.Object_Distance, 0, s.gateway.GetAgentCount(ctx)*num) + dch := make(chan *payload.Object_Distance, cap(res.GetResults())/2) + eg, ectx := errgroup.New(ctx) + var cancel context.CancelFunc + var timeout time.Duration + if to := cfg.GetTimeout(); to != 0 { + timeout = time.Duration(to) + } else { + timeout = s.timeout + } + + var maxDist uint32 + atomic.StoreUint32(&maxDist, math.Float32bits(math.MaxFloat32)) + ectx, cancel = context.WithTimeout(ectx, timeout) + eg.Go(safety.RecoverFunc(func() error { + defer cancel() + visited := new(sync.Map) + return s.gateway.BroadCast(ectx, func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) error { + ctx, span := trace.StartSpan(ctx, apiName+".search/"+target) + defer func() { + if span != nil { + span.End() + } + }() + r, err := f(ctx, vc, copts...) + if err != nil { + log.Debug(err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil + } + if r == nil || len(r.GetResults()) == 0 { + err = errors.ErrIndexNotFound + log.Debug(err) + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil + } + for _, dist := range r.GetResults() { + if dist == nil { + continue + } + if dist.GetDistance() >= math.Float32frombits(atomic.LoadUint32(&maxDist)) { + return nil + } + if _, already := visited.LoadOrStore(dist.GetId(), struct{}{}); !already { + select { + case <-ectx.Done(): + return nil + case dch <- dist: + } + } + } + return nil + }) + })) + for { + select { + case <-ectx.Done(): + err = eg.Wait() + if err != nil { + log.Error(err) + } + close(dch) + if num != 0 && len(res.GetResults()) > num { + res.Results = res.Results[:num] + } + return res, nil + case dist := <-dch: + rl := len(res.GetResults()) // result length + if rl >= num && dist.GetDistance() >= math.Float32frombits(atomic.LoadUint32(&maxDist)) { + continue + } + switch rl { + case 0: + res.Results = append(res.Results, dist) + case 1: + if res.GetResults()[0].GetDistance() <= dist.GetDistance() { + res.Results = append(res.GetResults(), dist) + } else { + res.Results = []*payload.Object_Distance{dist, res.GetResults()[0]} + } + default: + pos := rl + for idx := rl; idx >= 1; idx-- { + if res.GetResults()[idx-1].GetDistance() <= dist.GetDistance() { + pos = idx - 1 + break + } + } + switch { + case pos == rl: + res.Results = append([]*payload.Object_Distance{dist}, res.Results...) + case pos == rl-1: + res.Results = append(res.GetResults(), dist) + case pos >= 0: + res.Results = append(res.GetResults()[:pos+1], res.GetResults()[pos:]...) + res.Results[pos+1] = dist + } + } + rl = len(res.GetResults()) + if rl > num && num != 0 { + res.Results = res.GetResults()[:num] + rl = len(res.GetResults()) + } + if distEnd := res.GetResults()[rl-1].GetDistance(); rl >= num && + distEnd < math.Float32frombits(atomic.LoadUint32(&maxDist)) { + atomic.StoreUint32(&maxDist, math.Float32bits(distEnd)) + } + } + } +} + +func (s *server) StreamSearch(stream vald.Search_StreamSearchServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamSearch") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Search_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Search(ctx, data.(*payload.Search_Request)) + }) +} + +func (s *server) StreamSearchByID(stream vald.Search_StreamSearchByIDServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamSearchByID") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Search_IDRequest) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.SearchByID(ctx, data.(*payload.Search_IDRequest)) + }) +} + +func (s *server) MultiSearch(ctx context.Context, reqs *payload.Search_MultiRequest) (res *payload.Search_Responses, errs error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiSearch") + defer func() { + if span != nil { + span.End() + } + }() + + res = &payload.Search_Responses{ + Responses: make([]*payload.Search_Response, len(reqs.Requests)), + } + var wg sync.WaitGroup + var mu sync.Mutex + for i, req := range reqs.Requests { + idx, query := i, req + wg.Add(1) + s.eg.Go(func() error { + defer wg.Done() + r, err := s.Search(ctx, query) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + mu.Lock() + errs = errors.Wrap(errs, status.WrapWithNotFound(fmt.Sprintf("MultiSearch API vector %v's search request result not found", query.GetVector()), err, info.Get()).Error()) + mu.Unlock() + return nil + } + res.Responses[idx] = r + return nil + }) + } + wg.Wait() + return res, errs +} + +func (s *server) MultiSearchByID(ctx context.Context, reqs *payload.Search_MultiIDRequest) (res *payload.Search_Responses, errs error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiSearchByID") + defer func() { + if span != nil { + span.End() + } + }() + + res = &payload.Search_Responses{ + Responses: make([]*payload.Search_Response, len(reqs.Requests)), + } + var wg sync.WaitGroup + var mu sync.Mutex + for i, req := range reqs.Requests { + idx, query := i, req + wg.Add(1) + s.eg.Go(func() error { + defer wg.Done() + r, err := s.SearchByID(ctx, query) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + mu.Lock() + errs = errors.Wrap(errs, status.WrapWithNotFound(fmt.Sprintf("MultiSearchByID API uuid %v's search by id request result not found", query.GetId()), err, info.Get()).Error()) + mu.Unlock() + return nil + } + res.Responses[idx] = r + return nil + }) + } + wg.Wait() + return res, errs +} + +func (s *server) Insert(ctx context.Context, req *payload.Insert_Request) (ce *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Insert") + defer func() { + if span != nil { + span.End() + } + }() + vec := req.GetVector().GetVector() + uuid := req.GetVector().GetId() + vl := len(vec) + if vl < algorithm.MinimumVectorDimensionSize { + err = errors.ErrInvalidDimensionSize(vl, 0) + if span != nil { + span.SetStatus(trace.StatusCodeInvalidArgument(err.Error())) + } + return nil, status.WrapWithInvalidArgument("Search API invalid vector argument", err, req, info.Get()) + } + if !req.GetConfig().GetSkipStrictExistCheck() { + id, err := s.Exists(ctx, &payload.Object_ID{ + Id: uuid, + }) + if err == nil && id != nil && len(id.GetId()) != 0 { + err = errors.ErrMetaDataAlreadyExists(uuid) + log.Error(err) + if span != nil { + span.SetStatus(trace.StatusCodeAlreadyExists(err.Error())) + } + return nil, status.WrapWithAlreadyExists( + fmt.Sprintf("Insert API ID = %v already exists", uuid), err, info.Get()) + } + } + + mu := new(sync.Mutex) + ce = &payload.Object_Location{ + Uuid: uuid, + Ips: make([]string, 0, s.replica), + } + err = s.gateway.DoMulti(ctx, s.replica, func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) (err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Insert/"+target) + defer func() { + if span != nil { + span.End() + } + }() + loc, err := vc.Insert(ctx, req, copts...) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + if err == errors.ErrRPCCallFailed(target, context.Canceled) { + return nil + } + return err + } + mu.Lock() + ce.Ips = append(ce.GetIps(), loc.GetIps()...) + ce.Name = loc.GetName() + mu.Unlock() + return nil + }) + if err != nil { + err = errors.Wrapf(err, "Insert API (do multiple) failed to Insert uuid = %s\t info = %#v", uuid, info.Get()) + log.Error(err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Insert API failed to Execute DoMulti error = %s", err.Error()), err, info.Get()) + } + log.Debugf("Insert API insert succeeded to %#v", ce) + return ce, nil +} + +func (s *server) StreamInsert(stream vald.Insert_StreamInsertServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamInsert") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Insert_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Insert(ctx, data.(*payload.Insert_Request)) + }) +} + +func (s *server) MultiInsert(ctx context.Context, reqs *payload.Insert_MultiRequest) (locs *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiInsert") + defer func() { + if span != nil { + span.End() + } + }() + vecs := reqs.GetRequests() + ids := make([]string, 0, len(vecs)) + for _, vec := range vecs { + uuid := vec.GetVector().GetId() + if !vec.GetConfig().GetSkipStrictExistCheck() { + id, err := s.Exists(ctx, &payload.Object_ID{ + Id: uuid, + }) + if err == nil && id != nil && len(id.GetId()) != 0 { + err = errors.ErrMetaDataAlreadyExists(uuid) + log.Error(err) + if span != nil { + span.SetStatus(trace.StatusCodeAlreadyExists(err.Error())) + } + return nil, status.WrapWithAlreadyExists( + fmt.Sprintf("MultiInsert API ID = %v already exists", uuid), err, info.Get()) + } + } + ids = append(ids, uuid) + } + + mu := new(sync.Mutex) + locs = &payload.Object_Locations{ + Locations: make([]*payload.Object_Location, 0, s.replica), + } + err = s.gateway.DoMulti(ctx, s.replica, func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) (err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiInsert/"+target) + defer func() { + if span != nil { + span.End() + } + }() + loc, err := vc.MultiInsert(ctx, reqs, copts...) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return err + } + mu.Lock() + locs.Locations = append(locs.Locations, loc.Locations...) + mu.Unlock() + return nil + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiInsert API failed request %#v", vecs), err, info.Get()) + } + return location.ReStructure(ids, locs), nil +} + +func (s *server) Update(ctx context.Context, req *payload.Update_Request) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Update") + defer func() { + if span != nil { + span.End() + } + }() + + res, err = s.Remove(ctx, &payload.Remove_Request{ + Id: &payload.Object_ID{ + Id: req.GetVector().GetId(), + }, + Config: &payload.Remove_Config{ + SkipStrictExistCheck: req.GetConfig().GetSkipStrictExistCheck(), + }, + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeAlreadyExists(err.Error())) + } + return nil, err + } + + res, err = s.Insert(ctx, &payload.Insert_Request{ + Vector: req.GetVector(), + Config: &payload.Insert_Config{ + SkipStrictExistCheck: true, + Filters: req.GetConfig().Filters, + }, + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Update API failed to remove exsisting data for update %#v", req), err, info.Get()) + } + return res, nil +} + +func (s *server) StreamUpdate(stream vald.Update_StreamUpdateServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamUpdate") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Update_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Update(ctx, data.(*payload.Update_Request)) + }) +} + +func (s *server) MultiUpdate(ctx context.Context, reqs *payload.Update_MultiRequest) (res *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiUpdate") + defer func() { + if span != nil { + span.End() + } + }() + vecs := reqs.GetRequests() + ids := make([]string, 0, len(vecs)) + ireqs := make([]*payload.Insert_Request, 0, len(vecs)) + rreqs := make([]*payload.Remove_Request, 0, len(vecs)) + for _, vec := range vecs { + ids = append(ids, vec.GetVector().GetId()) + ireqs = append(ireqs, &payload.Insert_Request{ + Vector: vec.GetVector(), + Config: &payload.Insert_Config{ + SkipStrictExistCheck: true, + Filters: vec.GetConfig().GetFilters(), + }, + }) + rreqs = append(rreqs, &payload.Remove_Request{ + Id: &payload.Object_ID{ + Id: vec.GetVector().GetId(), + }, + Config: &payload.Remove_Config{ + SkipStrictExistCheck: vec.GetConfig().GetSkipStrictExistCheck(), + }, + }) + } + locs, err := s.MultiRemove(ctx, &payload.Remove_MultiRequest{ + Requests: rreqs, + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiUpdate API failed Remove request %#v", ids), err, info.Get()) + } + log.Debugf("uuids %v were removed from %v for MultiUpdate it will execute MultiInsert soon, see detailt %#v", ids, locs.GetLocations(), locs) + locs, err = s.MultiInsert(ctx, &payload.Insert_MultiRequest{ + Requests: ireqs, + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiUpdate API failed Insert request %#v", vecs), err, info.Get()) + } + return locs, nil +} + +func (s *server) Upsert(ctx context.Context, req *payload.Upsert_Request) (loc *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Upsert") + defer func() { + if span != nil { + span.End() + } + }() + + vec := req.GetVector() + uuid := vec.GetId() + filters := req.GetConfig().GetFilters() + _, err = s.Exists(ctx, &payload.Object_ID{ + Id: uuid, + }) + if err != nil { + log.Debugf("Upsert API metadata exists check to Agent error:\t%s", err.Error()) + loc, err = s.Insert(ctx, &payload.Insert_Request{ + Vector: vec, + Config: &payload.Insert_Config{ + SkipStrictExistCheck: true, + Filters: filters, + }, + }) + } else { + loc, err = s.Update(ctx, &payload.Update_Request{ + Vector: vec, + Config: &payload.Update_Config{ + SkipStrictExistCheck: true, + Filters: filters, + }, + }) + } + + if err != nil { + log.Debugf("Upsert API failed to process request uuid:\t%s\terror:\t%s", uuid, err.Error()) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Upsert API failed to process request %s", uuid), err, info.Get()) + } + return loc, nil +} + +func (s *server) StreamUpsert(stream vald.Upsert_StreamUpsertServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamUpsert") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Upsert_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Upsert(ctx, data.(*payload.Upsert_Request)) + }) +} + +func (s *server) MultiUpsert(ctx context.Context, reqs *payload.Upsert_MultiRequest) (locs *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiUpsert") + defer func() { + if span != nil { + span.End() + } + }() + + insertReqs := make([]*payload.Insert_Request, 0, len(reqs.GetRequests())) + updateReqs := make([]*payload.Update_Request, 0, len(reqs.GetRequests())) + + ids := make([]string, 0, len(reqs.GetRequests())) + for _, req := range reqs.GetRequests() { + vec := req.GetVector() + uuid := vec.GetId() + ids = append(ids, uuid) + _, err = s.Exists(ctx, &payload.Object_ID{ + Id: uuid, + }) + filters := req.GetConfig().GetFilters() + if err != nil { + insertReqs = append(insertReqs, &payload.Insert_Request{ + Vector: vec, + Config: &payload.Insert_Config{ + SkipStrictExistCheck: true, + Filters: filters, + }, + }) + } else { + updateReqs = append(updateReqs, &payload.Update_Request{ + Vector: vec, + Config: &payload.Update_Config{ + SkipStrictExistCheck: true, + Filters: filters, + }, + }) + } + } + + insertLocs := make([]*payload.Object_Location, 0, len(insertReqs)) + updateLocs := make([]*payload.Object_Location, 0, len(updateReqs)) + + eg, ectx := errgroup.New(ctx) + eg.Go(safety.RecoverFunc(func() error { + if len(updateReqs) <= 0 { + return nil + } + + ectx, span := trace.StartSpan(ectx, apiName+".MultiUpsert/Go-MultiUpdate") + defer func() { + if span != nil { + span.End() + } + }() + var err error + loc, err := s.MultiUpdate(ectx, &payload.Update_MultiRequest{ + Requests: updateReqs, + }) + if err == nil { + updateLocs = loc.GetLocations() + } + return err + })) + eg.Go(safety.RecoverFunc(func() error { + if len(insertReqs) <= 0 { + return nil + } + + ectx, span := trace.StartSpan(ectx, apiName+".MultiUpsert/Go-MultiInsert") + defer func() { + if span != nil { + span.End() + } + }() + var err error + loc, err := s.MultiInsert(ectx, &payload.Insert_MultiRequest{ + Requests: insertReqs, + }) + if err == nil { + insertLocs = loc.GetLocations() + } + return err + })) + + err = eg.Wait() + if err != nil { + log.Debugf("MultiUpsert API failed to process request uuids:\t%s\terror:\t%s", ids, err.Error()) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiUpsert API failed to process request %v", ids), err, info.Get()) + } + + return location.ReStructure(ids, &payload.Object_Locations{ + Locations: append(insertLocs, updateLocs...), + }), nil +} + +func (s *server) Remove(ctx context.Context, req *payload.Remove_Request) (locs *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Remove") + defer func() { + if span != nil { + span.End() + } + }() + + id := req.GetId() + if !req.GetConfig().GetSkipStrictExistCheck() { + sid, err := s.Exists(ctx, id) + if err != nil || sid == nil || len(sid.GetId()) == 0 { + err = errors.ErrObjectNotFound(err, id.GetId()) + log.Error(err) + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil, status.WrapWithNotFound( + fmt.Sprintf("Remove API ID = %v not found", id), err, info.Get()) + } + } + var mu sync.Mutex + locs = &payload.Object_Location{ + Uuid: id.GetId(), + } + err = s.gateway.BroadCast(ctx, func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) (err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Remove/"+target) + defer func() { + if span != nil { + span.End() + } + }() + loc, err := vc.Remove(ctx, req, copts...) + if err != nil { + log.Debug(err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil + } + mu.Lock() + locs.Ips = append(locs.Ips, loc.GetIps()...) + locs.Name = loc.GetName() + mu.Unlock() + return nil + }) + if err != nil { + log.Debugf("Remove API failed to remove uuid:\t%s\terror:\t%s", id.GetId(), err.Error()) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Remove API failed request uuid %s", id.GetId()), err, info.Get()) + } + return locs, nil +} + +func (s *server) StreamRemove(stream vald.Remove_StreamRemoveServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamRemove") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Remove_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Remove(ctx, data.(*payload.Remove_Request)) + }) +} + +func (s *server) MultiRemove(ctx context.Context, reqs *payload.Remove_MultiRequest) (locs *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiRemove") + defer func() { + if span != nil { + span.End() + } + }() + ids := make([]string, 0, len(reqs.GetRequests())) + for _, req := range reqs.GetRequests() { + id := req.GetId() + ids = append(ids, id.GetId()) + if !req.GetConfig().GetSkipStrictExistCheck() { + sid, err := s.Exists(ctx, id) + if err != nil || sid == nil || len(sid.GetId()) == 0 { + err = errors.ErrObjectNotFound(err, id.GetId()) + log.Error(err) + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil, status.WrapWithNotFound( + fmt.Sprintf("MultiRemove API ID = %v not found", id.GetId()), err, info.Get()) + } + } + } + var mu sync.Mutex + locs = &payload.Object_Locations{ + Locations: make([]*payload.Object_Location, 0, len(reqs.GetRequests())), + } + err = s.gateway.BroadCast(ctx, func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) error { + ctx, span := trace.StartSpan(ctx, apiName+".MultiRemove/"+target) + defer func() { + if span != nil { + span.End() + } + }() + loc, err := vc.MultiRemove(ctx, reqs, copts...) + if err != nil { + log.Debug(err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil + } + mu.Lock() + locs.Locations = append(locs.Locations, loc.Locations...) + mu.Unlock() + return nil + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiRemove API failed to request uuids %v ", ids), err, info.Get()) + } + return location.ReStructure(ids, locs), nil +} + +func (s *server) GetObject(ctx context.Context, id *payload.Object_ID) (vec *payload.Object_Vector, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".GetObject") + defer func() { + if span != nil { + span.End() + } + }() + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + var once sync.Once + err = s.gateway.BroadCast(ctx, func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) error { + ctx, span := trace.StartSpan(ctx, apiName+".GetObject/"+target) + defer func() { + if span != nil { + span.End() + } + }() + ovec, err := vc.GetObject(ctx, id, copts...) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil + } + if ovec != nil && ovec.GetId() != "" && ovec.GetVector() != nil { + once.Do(func() { + vec = ovec + cancel() + }) + } + return nil + }) + if err != nil || vec == nil || vec.GetId() != "" || vec.GetVector() != nil { + err = errors.ErrObjectNotFound(err, id.GetId()) + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil, status.WrapWithNotFound(fmt.Sprintf("GetObject API uuid %s's object not found", vec.GetId()), err, info.Get()) + } + return vec, nil +} + +func (s *server) StreamGetObject(stream vald.Object_StreamGetObjectServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamGetObject") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Object_ID) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.GetObject(ctx, data.(*payload.Object_ID)) + }) +} diff --git a/internal/client/gateway/vald/grpc/client_test.go b/pkg/gateway/lb/handler/grpc/handler_test.go similarity index 50% rename from internal/client/gateway/vald/grpc/client_test.go rename to pkg/gateway/lb/handler/grpc/handler_test.go index 5369ad364d..db395b33c7 100644 --- a/internal/client/gateway/vald/grpc/client_test.go +++ b/pkg/gateway/lb/handler/grpc/handler_test.go @@ -14,44 +14,41 @@ // limitations under the License. // -// Package grpc provides vald gRPC client functions +// Package grpc provides grpc server logic package grpc import ( "context" "reflect" "testing" + "time" - "github.com/vdaas/vald/internal/client" - "github.com/vdaas/vald/internal/config" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" - igrpc "github.com/vdaas/vald/internal/net/grpc" - "google.golang.org/grpc" - + "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/pkg/gateway/lb/service" "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context opts []Option } type want struct { - want Client - err error + want vald.Server } type test struct { name string args args want want - checkFunc func(want, Client, error) error + checkFunc func(want, vald.Server) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got Client, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) - } + defaultCheckFunc := func(w want, got vald.Server) error { if !reflect.DeepEqual(got, w.want) { return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } @@ -63,7 +60,6 @@ func TestNew(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, opts: nil, }, want: want{}, @@ -77,7 +73,6 @@ func TestNew(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, opts: nil, }, want: want{}, @@ -87,9 +82,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -100,44 +97,46 @@ func TestNew(t *testing.T) { test.checkFunc = defaultCheckFunc } - got, err := New(test.args.ctx, test.args.opts...) - if err := test.checkFunc(test.want, got, err); err != nil { + got := New(test.args.opts...) + if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_Exists(t *testing.T) { +func Test_server_Exists(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectID + ctx context.Context + meta *payload.Object_ID } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { - want *client.ObjectID - err error + wantId *payload.Object_ID + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.ObjectID, error) error + checkFunc func(want, *payload.Object_ID, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.ObjectID, err error) error { + defaultCheckFunc := func(w want, gotId *payload.Object_ID, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotId, w.wantId) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotId, w.wantId) } return nil } @@ -148,12 +147,14 @@ func Test_gatewayClient_Exists(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + meta: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -167,12 +168,14 @@ func Test_gatewayClient_Exists(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + meta: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -181,9 +184,11 @@ func Test_gatewayClient_Exists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -193,50 +198,54 @@ func Test_gatewayClient_Exists(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - got, err := c.Exists(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotId, err := s.Exists(test.args.ctx, test.args.meta) + if err := test.checkFunc(test.want, gotId, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_Search(t *testing.T) { +func Test_server_Search(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.SearchRequest + req *payload.Search_Request } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { - want *client.SearchResponse - err error + wantRes *payload.Search_Response + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.SearchResponse, error) error + checkFunc func(want, *payload.Search_Response, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.SearchResponse, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) } return nil } @@ -250,9 +259,11 @@ func Test_gatewayClient_Search(t *testing.T) { req: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -269,9 +280,11 @@ func Test_gatewayClient_Search(t *testing.T) { req: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -280,9 +293,11 @@ func Test_gatewayClient_Search(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -292,50 +307,54 @@ func Test_gatewayClient_Search(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - got, err := c.Search(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotRes, err := s.Search(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_SearchByID(t *testing.T) { +func Test_server_SearchByID(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.SearchIDRequest + req *payload.Search_IDRequest } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { - want *client.SearchResponse - err error + wantRes *payload.Search_Response + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.SearchResponse, error) error + checkFunc func(want, *payload.Search_Response, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.SearchResponse, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) } return nil } @@ -349,9 +368,11 @@ func Test_gatewayClient_SearchByID(t *testing.T) { req: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -368,9 +389,11 @@ func Test_gatewayClient_SearchByID(t *testing.T) { req: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -379,9 +402,11 @@ func Test_gatewayClient_SearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -391,48 +416,56 @@ func Test_gatewayClient_SearchByID(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - got, err := c.SearchByID(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotRes, err := s.SearchByID(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_StreamSearch(t *testing.T) { +func Test_server_search(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.SearchRequest - f func(*client.SearchResponse, error) + ctx context.Context + cfg *payload.Search_Config + f func(ctx context.Context, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { - err error + wantRes *payload.Search_Response + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Search_Response, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -442,13 +475,15 @@ func Test_gatewayClient_StreamSearch(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, + cfg: nil, f: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -462,13 +497,15 @@ func Test_gatewayClient_StreamSearch(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, + cfg: nil, f: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -477,9 +514,11 @@ func Test_gatewayClient_StreamSearch(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -489,31 +528,33 @@ func Test_gatewayClient_StreamSearch(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamSearch(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.search(test.args.ctx, test.args.cfg, test.args.f) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_StreamSearchByID(t *testing.T) { +func Test_server_StreamSearch(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.SearchIDRequest - f func(*client.SearchResponse, error) + stream vald.Search_StreamSearchServer } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { err error @@ -539,14 +580,14 @@ func Test_gatewayClient_StreamSearchByID(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -559,14 +600,14 @@ func Test_gatewayClient_StreamSearchByID(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -575,9 +616,11 @@ func Test_gatewayClient_StreamSearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -587,30 +630,33 @@ func Test_gatewayClient_StreamSearchByID(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamSearchByID(test.args.ctx, test.args.dataProvider, test.args.f) + err := s.StreamSearch(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_Insert(t *testing.T) { +func Test_server_StreamSearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVector + stream vald.Search_StreamSearchByIDServer } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { err error @@ -636,13 +682,14 @@ func Test_gatewayClient_Insert(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + stream: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -655,13 +702,14 @@ func Test_gatewayClient_Insert(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + stream: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -670,9 +718,11 @@ func Test_gatewayClient_Insert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -682,48 +732,55 @@ func Test_gatewayClient_Insert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.Insert(test.args.ctx, test.args.req) + err := s.StreamSearchByID(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_StreamInsert(t *testing.T) { +func Test_server_MultiSearch(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectVector - f func(error) + ctx context.Context + reqs *payload.Search_MultiRequest } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { - err error + wantRes *payload.Search_Responses + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Search_Responses, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Responses, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -733,13 +790,14 @@ func Test_gatewayClient_StreamInsert(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, - f: nil, + reqs: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -753,13 +811,14 @@ func Test_gatewayClient_StreamInsert(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, - f: nil, + reqs: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -768,9 +827,11 @@ func Test_gatewayClient_StreamInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -780,47 +841,55 @@ func Test_gatewayClient_StreamInsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamInsert(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.MultiSearch(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_MultiInsert(t *testing.T) { +func Test_server_MultiSearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVectors + ctx context.Context + reqs *payload.Search_MultiIDRequest } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { - err error + wantRes *payload.Search_Responses + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Search_Responses, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Responses, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -830,12 +899,14 @@ func Test_gatewayClient_MultiInsert(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -849,12 +920,14 @@ func Test_gatewayClient_MultiInsert(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -863,9 +936,11 @@ func Test_gatewayClient_MultiInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -875,47 +950,55 @@ func Test_gatewayClient_MultiInsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.MultiInsert(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.MultiSearchByID(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_Update(t *testing.T) { +func Test_server_Insert(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.ObjectVector + req *payload.Insert_Request } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { - err error + wantCe *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCe *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotCe, w.wantCe) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCe, w.wantCe) + } return nil } tests := []test{ @@ -928,9 +1011,11 @@ func Test_gatewayClient_Update(t *testing.T) { req: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -947,9 +1032,11 @@ func Test_gatewayClient_Update(t *testing.T) { req: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -958,9 +1045,11 @@ func Test_gatewayClient_Update(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -970,31 +1059,33 @@ func Test_gatewayClient_Update(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.Update(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotCe, err := s.Insert(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotCe, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_StreamUpdate(t *testing.T) { +func Test_server_StreamInsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectVector - f func(error) + stream vald.Insert_StreamInsertServer } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { err error @@ -1020,14 +1111,14 @@ func Test_gatewayClient_StreamUpdate(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1040,14 +1131,14 @@ func Test_gatewayClient_StreamUpdate(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1056,9 +1147,11 @@ func Test_gatewayClient_StreamUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1068,47 +1161,55 @@ func Test_gatewayClient_StreamUpdate(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamUpdate(test.args.ctx, test.args.dataProvider, test.args.f) + err := s.StreamInsert(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_MultiUpdate(t *testing.T) { +func Test_server_MultiInsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVectors + ctx context.Context + reqs *payload.Insert_MultiRequest } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { - err error + wantLocs *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotLocs *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotLocs, w.wantLocs) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLocs, w.wantLocs) + } return nil } tests := []test{ @@ -1118,12 +1219,14 @@ func Test_gatewayClient_MultiUpdate(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1137,12 +1240,14 @@ func Test_gatewayClient_MultiUpdate(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1151,9 +1256,11 @@ func Test_gatewayClient_MultiUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1163,47 +1270,55 @@ func Test_gatewayClient_MultiUpdate(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.MultiUpdate(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotLocs, err := s.MultiInsert(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotLocs, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_Upsert(t *testing.T) { +func Test_server_Update(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.ObjectVector + req *payload.Update_Request } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { - err error + wantRes *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1216,9 +1331,11 @@ func Test_gatewayClient_Upsert(t *testing.T) { req: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1235,9 +1352,11 @@ func Test_gatewayClient_Upsert(t *testing.T) { req: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1246,9 +1365,11 @@ func Test_gatewayClient_Upsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1258,30 +1379,33 @@ func Test_gatewayClient_Upsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.Upsert(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.Update(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_MultiUpsert(t *testing.T) { +func Test_server_StreamUpdate(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVectors + stream vald.Update_StreamUpdateServer } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { err error @@ -1307,13 +1431,14 @@ func Test_gatewayClient_MultiUpsert(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + stream: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1326,13 +1451,14 @@ func Test_gatewayClient_MultiUpsert(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + stream: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1341,9 +1467,11 @@ func Test_gatewayClient_MultiUpsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1353,48 +1481,55 @@ func Test_gatewayClient_MultiUpsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.MultiUpsert(test.args.ctx, test.args.req) + err := s.StreamUpdate(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_StreamUpsert(t *testing.T) { +func Test_server_MultiUpdate(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectVector - f func(error) + ctx context.Context + reqs *payload.Update_MultiRequest } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { - err error + wantRes *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1404,13 +1539,14 @@ func Test_gatewayClient_StreamUpsert(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, - f: nil, + reqs: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1424,13 +1560,14 @@ func Test_gatewayClient_StreamUpsert(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, - f: nil, + reqs: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1439,9 +1576,11 @@ func Test_gatewayClient_StreamUpsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1451,47 +1590,55 @@ func Test_gatewayClient_StreamUpsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamUpsert(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.MultiUpdate(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_Remove(t *testing.T) { +func Test_server_Upsert(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.ObjectID + req *payload.Upsert_Request } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { - err error + wantLoc *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotLoc *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotLoc, w.wantLoc) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoc, w.wantLoc) + } return nil } tests := []test{ @@ -1504,9 +1651,11 @@ func Test_gatewayClient_Remove(t *testing.T) { req: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1523,9 +1672,11 @@ func Test_gatewayClient_Remove(t *testing.T) { req: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1534,9 +1685,11 @@ func Test_gatewayClient_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1546,31 +1699,33 @@ func Test_gatewayClient_Remove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.Remove(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotLoc, err := s.Upsert(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotLoc, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_StreamRemove(t *testing.T) { +func Test_server_StreamUpsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectID - f func(error) + stream vald.Upsert_StreamUpsertServer } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { err error @@ -1596,14 +1751,14 @@ func Test_gatewayClient_StreamRemove(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1616,14 +1771,14 @@ func Test_gatewayClient_StreamRemove(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1632,9 +1787,11 @@ func Test_gatewayClient_StreamRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1644,47 +1801,55 @@ func Test_gatewayClient_StreamRemove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamRemove(test.args.ctx, test.args.dataProvider, test.args.f) + err := s.StreamUpsert(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_MultiRemove(t *testing.T) { +func Test_server_MultiUpsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectIDs + ctx context.Context + reqs *payload.Upsert_MultiRequest } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { - err error + wantLocs *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotLocs *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotLocs, w.wantLocs) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLocs, w.wantLocs) + } return nil } tests := []test{ @@ -1694,12 +1859,14 @@ func Test_gatewayClient_MultiRemove(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1713,12 +1880,14 @@ func Test_gatewayClient_MultiRemove(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1727,9 +1896,11 @@ func Test_gatewayClient_MultiRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1739,50 +1910,54 @@ func Test_gatewayClient_MultiRemove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.MultiRemove(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotLocs, err := s.MultiUpsert(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotLocs, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_GetObject(t *testing.T) { +func Test_server_Remove(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.ObjectID + req *payload.Remove_Request } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { - want *client.MetaObject - err error + wantLocs *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.MetaObject, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.MetaObject, err error) error { + defaultCheckFunc := func(w want, gotLocs *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotLocs, w.wantLocs) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLocs, w.wantLocs) } return nil } @@ -1796,9 +1971,11 @@ func Test_gatewayClient_GetObject(t *testing.T) { req: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1815,9 +1992,11 @@ func Test_gatewayClient_GetObject(t *testing.T) { req: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1826,9 +2005,11 @@ func Test_gatewayClient_GetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1838,31 +2019,33 @@ func Test_gatewayClient_GetObject(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - got, err := c.GetObject(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotLocs, err := s.Remove(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotLocs, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_gatewayClient_StreamGetObject(t *testing.T) { +func Test_server_StreamRemove(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectID - f func(*client.MetaObject, error) + stream vald.Remove_StreamRemoveServer } type fields struct { - addr string - cfg *config.GRPCClient - Client igrpc.Client + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { err error @@ -1888,14 +2071,14 @@ func Test_gatewayClient_StreamGetObject(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1908,14 +2091,14 @@ func Test_gatewayClient_StreamGetObject(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - cfg: nil, - Client: nil, + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1924,9 +2107,11 @@ func Test_gatewayClient_StreamGetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1936,42 +2121,55 @@ func Test_gatewayClient_StreamGetObject(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &gatewayClient{ - addr: test.fields.addr, - cfg: test.fields.cfg, - Client: test.fields.Client, + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamGetObject(test.args.ctx, test.args.dataProvider, test.args.f) + err := s.StreamRemove(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_streamSearch(t *testing.T) { +func Test_server_MultiRemove(t *testing.T) { + t.Parallel() type args struct { - st grpc.ClientStream - dataProvider func() interface{} - f func(*client.SearchResponse, error) + ctx context.Context + reqs *payload.Remove_MultiRequest + } + type fields struct { + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { - err error + wantLocs *payload.Object_Locations + err error } type test struct { name string args args + fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotLocs *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotLocs, w.wantLocs) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLocs, w.wantLocs) + } return nil } tests := []test{ @@ -1980,9 +2178,15 @@ func Test_streamSearch(t *testing.T) { { name: "test_case_1", args: args { - st: nil, - dataProvider: nil, - f: nil, + ctx: nil, + reqs: nil, + }, + fields: fields { + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1995,9 +2199,15 @@ func Test_streamSearch(t *testing.T) { return test { name: "test_case_2", args: args { - st: nil, - dataProvider: nil, - f: nil, + ctx: nil, + reqs: nil, + }, + fields: fields { + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -2006,9 +2216,11 @@ func Test_streamSearch(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2018,21 +2230,142 @@ func Test_streamSearch(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, + } - err := streamSearch(test.args.st, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotLocs, err := s.MultiRemove(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotLocs, err); err != nil { tt.Errorf("error = %v", err) } + }) + } +} + +func Test_server_GetObject(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + id *payload.Object_ID + } + type fields struct { + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int + } + type want struct { + wantVec *payload.Object_Vector + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, *payload.Object_Vector, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotVec *payload.Object_Vector, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotVec, w.wantVec) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + id: nil, + }, + fields: fields { + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + id: nil, + }, + fields: fields { + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, + } + gotVec, err := s.GetObject(test.args.ctx, test.args.id) + if err := test.checkFunc(test.want, gotVec, err); err != nil { + tt.Errorf("error = %v", err) + } }) } } -func Test_stream(t *testing.T) { +func Test_server_StreamGetObject(t *testing.T) { + t.Parallel() type args struct { - st grpc.ClientStream - dataProvider func() interface{} - f func(error) + stream vald.Object_StreamGetObjectServer + } + type fields struct { + eg errgroup.Group + gateway service.Gateway + timeout time.Duration + replica int + streamConcurrency int } type want struct { err error @@ -2040,6 +2373,7 @@ func Test_stream(t *testing.T) { type test struct { name string args args + fields fields want want checkFunc func(want, error) error beforeFunc func(args) @@ -2057,9 +2391,14 @@ func Test_stream(t *testing.T) { { name: "test_case_1", args: args { - st: nil, - dataProvider: nil, - f: nil, + stream: nil, + }, + fields: fields { + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -2072,9 +2411,14 @@ func Test_stream(t *testing.T) { return test { name: "test_case_2", args: args { - st: nil, - dataProvider: nil, - f: nil, + stream: nil, + }, + fields: fields { + eg: nil, + gateway: nil, + timeout: nil, + replica: 0, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -2083,9 +2427,11 @@ func Test_stream(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2095,12 +2441,18 @@ func Test_stream(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } + s := &server{ + eg: test.fields.eg, + gateway: test.fields.gateway, + timeout: test.fields.timeout, + replica: test.fields.replica, + streamConcurrency: test.fields.streamConcurrency, + } - err := stream(test.args.st, test.args.dataProvider, test.args.f) + err := s.StreamGetObject(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/gateway/lb/handler/grpc/option.go b/pkg/gateway/lb/handler/grpc/option.go new file mode 100644 index 0000000000..cfb8daf222 --- /dev/null +++ b/pkg/gateway/lb/handler/grpc/option.go @@ -0,0 +1,77 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package grpc provides grpc server logic +package grpc + +import ( + "time" + + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/internal/timeutil" + "github.com/vdaas/vald/pkg/gateway/lb/service" +) + +type Option func(*server) + +var defaultOpts = []Option{ + WithErrGroup(errgroup.Get()), + WithReplicationCount(3), + WithStreamConcurrency(20), + WithTimeout("5s"), +} + +func WithGateway(g service.Gateway) Option { + return func(s *server) { + if g != nil { + s.gateway = g + } + } +} + +func WithErrGroup(eg errgroup.Group) Option { + return func(s *server) { + if eg != nil { + s.eg = eg + } + } +} + +func WithTimeout(dur string) Option { + return func(s *server) { + d, err := timeutil.Parse(dur) + if err != nil { + d = time.Second * 10 + } + s.timeout = d + } +} + +func WithReplicationCount(rep int) Option { + return func(s *server) { + if rep > 1 { + s.replica = rep + } + } +} + +func WithStreamConcurrency(c int) Option { + return func(s *server) { + if c != 0 { + s.streamConcurrency = c + } + } +} diff --git a/pkg/gateway/lb/handler/grpc/option_test.go b/pkg/gateway/lb/handler/grpc/option_test.go new file mode 100644 index 0000000000..90ea85ff69 --- /dev/null +++ b/pkg/gateway/lb/handler/grpc/option_test.go @@ -0,0 +1,611 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package grpc provides grpc server logic +package grpc + +import ( + "testing" + + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/pkg/gateway/lb/service" + "go.uber.org/goleak" +) + +func TestWithGateway(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + g service.Gateway + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + g: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + g: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithGateway(test.args.g) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithGateway(test.args.g) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + eg errgroup.Group + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + eg: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + eg: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithErrGroup(test.args.eg) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithErrGroup(test.args.eg) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + dur string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + dur: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + dur: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithTimeout(test.args.dur) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithTimeout(test.args.dur) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithReplicationCount(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + rep int + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + rep: 0, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + rep: 0, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithReplicationCount(test.args.rep) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithReplicationCount(test.args.rep) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithStreamConcurrency(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + c int + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + c: 0, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + c: 0, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithStreamConcurrency(test.args.c) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithStreamConcurrency(test.args.c) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/gateway/lb/handler/rest/handler.go b/pkg/gateway/lb/handler/rest/handler.go new file mode 100644 index 0000000000..59032d8e1f --- /dev/null +++ b/pkg/gateway/lb/handler/rest/handler.go @@ -0,0 +1,163 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package rest provides rest api logic +package rest + +import ( + "net/http" + + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/net/http/dump" + "github.com/vdaas/vald/internal/net/http/json" +) + +type Handler interface { + Index(w http.ResponseWriter, r *http.Request) (int, error) + Exists(w http.ResponseWriter, r *http.Request) (int, error) + Search(w http.ResponseWriter, r *http.Request) (int, error) + SearchByID(w http.ResponseWriter, r *http.Request) (int, error) + MultiSearch(w http.ResponseWriter, r *http.Request) (int, error) + MultiSearchByID(w http.ResponseWriter, r *http.Request) (int, error) + Insert(w http.ResponseWriter, r *http.Request) (int, error) + MultiInsert(w http.ResponseWriter, r *http.Request) (int, error) + Update(w http.ResponseWriter, r *http.Request) (int, error) + MultiUpdate(w http.ResponseWriter, r *http.Request) (int, error) + Upsert(w http.ResponseWriter, r *http.Request) (int, error) + MultiUpsert(w http.ResponseWriter, r *http.Request) (int, error) + Remove(w http.ResponseWriter, r *http.Request) (int, error) + MultiRemove(w http.ResponseWriter, r *http.Request) (int, error) + GetObject(w http.ResponseWriter, r *http.Request) (int, error) +} + +type handler struct { + vald vald.Server +} + +func New(opts ...Option) Handler { + h := new(handler) + + for _, opt := range append(defaultOpts, opts...) { + opt(h) + } + return h +} + +func (h *handler) Index(w http.ResponseWriter, r *http.Request) (int, error) { + data := make(map[string]interface{}) + return json.Handler(w, r, &data, func() (interface{}, error) { + return dump.Request(nil, data, r) + }) +} + +func (h *handler) Search(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Search_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Search(r.Context(), req) + }) +} + +func (h *handler) SearchByID(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Search_IDRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.SearchByID(r.Context(), req) + }) +} + +func (h *handler) MultiSearch(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Search_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiSearch(r.Context(), req) + }) +} + +func (h *handler) MultiSearchByID(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Search_MultiIDRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiSearchByID(r.Context(), req) + }) +} + +func (h *handler) Insert(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Insert_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Insert(r.Context(), req) + }) +} + +func (h *handler) MultiInsert(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Insert_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiInsert(r.Context(), req) + }) +} + +func (h *handler) Update(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Update_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Update(r.Context(), req) + }) +} + +func (h *handler) MultiUpdate(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Update_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiUpdate(r.Context(), req) + }) +} + +func (h *handler) Upsert(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Upsert_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Upsert(r.Context(), req) + }) +} + +func (h *handler) MultiUpsert(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Upsert_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiUpsert(r.Context(), req) + }) +} + +func (h *handler) Remove(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Remove_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Remove(r.Context(), req) + }) +} + +func (h *handler) MultiRemove(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Remove_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiRemove(r.Context(), req) + }) +} + +func (h *handler) GetObject(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Object_ID + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.GetObject(r.Context(), req) + }) +} + +func (h *handler) Exists(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Object_ID + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Exists(r.Context(), req) + }) +} diff --git a/internal/client/agent/rest/client_test.go b/pkg/gateway/lb/handler/rest/handler_test.go similarity index 50% rename from internal/client/agent/rest/client_test.go rename to pkg/gateway/lb/handler/rest/handler_test.go index 822806b6ff..2987f032b3 100644 --- a/internal/client/agent/rest/client_test.go +++ b/pkg/gateway/lb/handler/rest/handler_test.go @@ -14,37 +14,36 @@ // limitations under the License. // -// Package rest provides agent ngt REST client functions +// Package rest provides rest api logic package rest import ( - "context" + "net/http" "reflect" "testing" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/apis/grpc/v1/vald" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context opts []Option } type want struct { - want Client + want Handler } type test struct { name string args args want want - checkFunc func(want, Client) error + checkFunc func(want, Handler) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got Client) error { + defaultCheckFunc := func(w want, got Handler) error { if !reflect.DeepEqual(got, w.want) { return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } @@ -56,7 +55,6 @@ func TestNew(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, opts: nil, }, want: want{}, @@ -70,7 +68,6 @@ func TestNew(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, opts: nil, }, want: want{}, @@ -80,9 +77,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -93,42 +92,42 @@ func TestNew(t *testing.T) { test.checkFunc = defaultCheckFunc } - got := New(test.args.ctx, test.args.opts...) + got := New(test.args.opts...) if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_Exists(t *testing.T) { +func Test_handler_Index(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectID + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - wantRes *client.ObjectID - err error + want int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.ObjectID, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *client.ObjectID, err error) error { + defaultCheckFunc := func(w want, got int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(gotRes, w.wantRes) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -138,11 +137,11 @@ func Test_agentClient_Exists(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -155,11 +154,11 @@ func Test_agentClient_Exists(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -168,9 +167,11 @@ func Test_agentClient_Exists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -180,46 +181,46 @@ func Test_agentClient_Exists(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - gotRes, err := c.Exists(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, gotRes, err); err != nil { + got, err := h.Index(test.args.w, test.args.r) + if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_Search(t *testing.T) { +func Test_handler_Search(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.SearchRequest + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - wantRes *client.SearchResponse - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.SearchResponse, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *client.SearchResponse, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(gotRes, w.wantRes) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -229,11 +230,11 @@ func Test_agentClient_Search(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -246,11 +247,11 @@ func Test_agentClient_Search(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -259,9 +260,11 @@ func Test_agentClient_Search(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -271,46 +274,46 @@ func Test_agentClient_Search(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - gotRes, err := c.Search(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, gotRes, err); err != nil { + gotCode, err := h.Search(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_SearchByID(t *testing.T) { +func Test_handler_SearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.SearchIDRequest + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - wantRes *client.SearchResponse - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.SearchResponse, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *client.SearchResponse, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(gotRes, w.wantRes) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -320,11 +323,11 @@ func Test_agentClient_SearchByID(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -337,11 +340,11 @@ func Test_agentClient_SearchByID(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -350,9 +353,11 @@ func Test_agentClient_SearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -362,133 +367,46 @@ func Test_agentClient_SearchByID(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - gotRes, err := c.SearchByID(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, gotRes, err); err != nil { + gotCode, err := h.SearchByID(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_StreamSearch(t *testing.T) { +func Test_handler_MultiSearch(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.SearchRequest - f func(*client.SearchResponse, error) + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - ctx: nil, - dataProvider: nil, - f: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - ctx: nil, - dataProvider: nil, - f: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - c := &agentClient{ - addr: test.fields.addr, - } - - err := c.StreamSearch(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_agentClient_StreamSearchByID(t *testing.T) { - type args struct { - ctx context.Context - dataProvider func() *client.SearchIDRequest - f func(*client.SearchResponse, error) - } - type fields struct { - addr string - } - type want struct { - err error - } - type test struct { - name string - args args - fields fields - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -498,12 +416,11 @@ func Test_agentClient_StreamSearchByID(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -516,12 +433,11 @@ func Test_agentClient_StreamSearchByID(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -530,9 +446,11 @@ func Test_agentClient_StreamSearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -542,130 +460,46 @@ func Test_agentClient_StreamSearchByID(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.StreamSearchByID(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.MultiSearch(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_Insert(t *testing.T) { +func Test_handler_MultiSearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVector + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - ctx: nil, - req: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - ctx: nil, - req: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - c := &agentClient{ - addr: test.fields.addr, - } - - err := c.Insert(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_agentClient_StreamInsert(t *testing.T) { - type args struct { - ctx context.Context - dataProvider func() *client.ObjectVector - f func(error) - } - type fields struct { - addr string - } - type want struct { - err error - } - type test struct { - name string - args args - fields fields - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -675,12 +509,11 @@ func Test_agentClient_StreamInsert(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -693,12 +526,11 @@ func Test_agentClient_StreamInsert(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -707,9 +539,11 @@ func Test_agentClient_StreamInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -719,129 +553,46 @@ func Test_agentClient_StreamInsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.StreamInsert(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.MultiSearchByID(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_MultiInsert(t *testing.T) { +func Test_handler_Insert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - objectVectors *client.ObjectVectors + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - ctx: nil, - objectVectors: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - ctx: nil, - objectVectors: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - c := &agentClient{ - addr: test.fields.addr, - } - - err := c.MultiInsert(test.args.ctx, test.args.objectVectors) - if err := test.checkFunc(test.want, err); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_agentClient_Update(t *testing.T) { - type args struct { - ctx context.Context - req *client.ObjectVector - } - type fields struct { - addr string - } - type want struct { - err error - } - type test struct { - name string - args args - fields fields - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -851,11 +602,11 @@ func Test_agentClient_Update(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -868,11 +619,11 @@ func Test_agentClient_Update(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -881,9 +632,11 @@ func Test_agentClient_Update(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -893,132 +646,46 @@ func Test_agentClient_Update(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.Update(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.Insert(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_StreamUpdate(t *testing.T) { +func Test_handler_MultiInsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectVector - f func(error) + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - ctx: nil, - dataProvider: nil, - f: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - ctx: nil, - dataProvider: nil, - f: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - c := &agentClient{ - addr: test.fields.addr, - } - - err := c.StreamUpdate(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_agentClient_MultiUpdate(t *testing.T) { - type args struct { - ctx context.Context - objectVectors *client.ObjectVectors - } - type fields struct { - addr string - } - type want struct { - err error - } - type test struct { - name string - args args - fields fields - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -1028,11 +695,11 @@ func Test_agentClient_MultiUpdate(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - objectVectors: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1045,11 +712,11 @@ func Test_agentClient_MultiUpdate(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - objectVectors: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1058,9 +725,11 @@ func Test_agentClient_MultiUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1070,130 +739,46 @@ func Test_agentClient_MultiUpdate(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.MultiUpdate(test.args.ctx, test.args.objectVectors) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.MultiInsert(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_Remove(t *testing.T) { +func Test_handler_Update(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectID + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - ctx: nil, - req: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - ctx: nil, - req: nil, - }, - fields: fields { - addr: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - c := &agentClient{ - addr: test.fields.addr, - } - - err := c.Remove(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_agentClient_StreamRemove(t *testing.T) { - type args struct { - ctx context.Context - dataProvider func() *client.ObjectID - f func(error) - } - type fields struct { - addr string - } - type want struct { - err error - } - type test struct { - name string - args args - fields fields - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -1203,12 +788,11 @@ func Test_agentClient_StreamRemove(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1221,12 +805,11 @@ func Test_agentClient_StreamRemove(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1235,9 +818,11 @@ func Test_agentClient_StreamRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1247,43 +832,47 @@ func Test_agentClient_StreamRemove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.StreamRemove(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.Update(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_MultiRemove(t *testing.T) { +func Test_handler_MultiUpdate(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectIDs + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } return nil } tests := []test{ @@ -1292,11 +881,11 @@ func Test_agentClient_MultiRemove(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1309,11 +898,11 @@ func Test_agentClient_MultiRemove(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1322,9 +911,11 @@ func Test_agentClient_MultiRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1334,46 +925,46 @@ func Test_agentClient_MultiRemove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.MultiRemove(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.MultiUpdate(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_GetObject(t *testing.T) { +func Test_handler_Upsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectID + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - wantRes *client.ObjectVector - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.ObjectVector, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *client.ObjectVector, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(gotRes, w.wantRes) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -1383,11 +974,11 @@ func Test_agentClient_GetObject(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1400,11 +991,11 @@ func Test_agentClient_GetObject(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1413,9 +1004,11 @@ func Test_agentClient_GetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1425,44 +1018,47 @@ func Test_agentClient_GetObject(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - gotRes, err := c.GetObject(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, gotRes, err); err != nil { + gotCode, err := h.Upsert(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_StreamGetObject(t *testing.T) { +func Test_handler_MultiUpsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectID - f func(*client.ObjectVector, error) + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } return nil } tests := []test{ @@ -1471,12 +1067,11 @@ func Test_agentClient_StreamGetObject(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1489,12 +1084,11 @@ func Test_agentClient_StreamGetObject(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1503,9 +1097,11 @@ func Test_agentClient_StreamGetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1515,43 +1111,47 @@ func Test_agentClient_StreamGetObject(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.StreamGetObject(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.MultiUpsert(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_CreateIndex(t *testing.T) { +func Test_handler_Remove(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ControlCreateIndexRequest + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } return nil } tests := []test{ @@ -1560,11 +1160,11 @@ func Test_agentClient_CreateIndex(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1577,11 +1177,11 @@ func Test_agentClient_CreateIndex(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1590,9 +1190,11 @@ func Test_agentClient_CreateIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1602,42 +1204,47 @@ func Test_agentClient_CreateIndex(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.CreateIndex(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.Remove(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_SaveIndex(t *testing.T) { +func Test_handler_MultiRemove(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } return nil } tests := []test{ @@ -1646,10 +1253,11 @@ func Test_agentClient_SaveIndex(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1662,10 +1270,11 @@ func Test_agentClient_SaveIndex(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1674,9 +1283,11 @@ func Test_agentClient_SaveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1686,43 +1297,47 @@ func Test_agentClient_SaveIndex(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.SaveIndex(test.args.ctx) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.MultiRemove(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_CreateAndSaveIndex(t *testing.T) { +func Test_handler_GetObject(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ControlCreateIndexRequest + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } return nil } tests := []test{ @@ -1731,11 +1346,11 @@ func Test_agentClient_CreateAndSaveIndex(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1748,11 +1363,11 @@ func Test_agentClient_CreateAndSaveIndex(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1761,9 +1376,11 @@ func Test_agentClient_CreateAndSaveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1773,45 +1390,46 @@ func Test_agentClient_CreateAndSaveIndex(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - err := c.CreateAndSaveIndex(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotCode, err := h.GetObject(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_agentClient_IndexInfo(t *testing.T) { +func Test_handler_Exists(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context + w http.ResponseWriter + r *http.Request } type fields struct { - addr string + vald vald.Server } type want struct { - wantRes *client.InfoIndex - err error + wantCode int + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.InfoIndex, error) error + checkFunc func(want, int, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *client.InfoIndex, err error) error { + defaultCheckFunc := func(w want, gotCode int, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(gotRes, w.wantRes) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) } return nil } @@ -1821,10 +1439,11 @@ func Test_agentClient_IndexInfo(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1837,10 +1456,11 @@ func Test_agentClient_IndexInfo(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, + w: nil, + r: nil, }, fields: fields { - addr: "", + vald: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1849,9 +1469,11 @@ func Test_agentClient_IndexInfo(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1861,15 +1483,14 @@ func Test_agentClient_IndexInfo(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &agentClient{ - addr: test.fields.addr, + h := &handler{ + vald: test.fields.vald, } - gotRes, err := c.IndexInfo(test.args.ctx) - if err := test.checkFunc(test.want, gotRes, err); err != nil { + gotCode, err := h.Exists(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/gateway/lb/handler/rest/option.go b/pkg/gateway/lb/handler/rest/option.go new file mode 100644 index 0000000000..478a365a77 --- /dev/null +++ b/pkg/gateway/lb/handler/rest/option.go @@ -0,0 +1,30 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package rest provides rest api logic +package rest + +import "github.com/vdaas/vald/apis/grpc/v1/vald" + +type Option func(*handler) + +var defaultOpts = []Option{} + +func WithVald(v vald.Server) Option { + return func(h *handler) { + h.vald = v + } +} diff --git a/pkg/gateway/lb/handler/rest/option_test.go b/pkg/gateway/lb/handler/rest/option_test.go new file mode 100644 index 0000000000..5efca60aa7 --- /dev/null +++ b/pkg/gateway/lb/handler/rest/option_test.go @@ -0,0 +1,142 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package rest provides rest api logic +package rest + +import ( + "testing" + + "github.com/vdaas/vald/apis/grpc/v1/vald" + "go.uber.org/goleak" +) + +func TestWithVald(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + v vald.Server + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + v: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + v: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithVald(test.args.v) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithVald(test.args.v) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/gateway/lb/router/option.go b/pkg/gateway/lb/router/option.go new file mode 100644 index 0000000000..0235c3e922 --- /dev/null +++ b/pkg/gateway/lb/router/option.go @@ -0,0 +1,40 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package router provides implementation of Go API for routing http Handler wrapped by rest.Func +package router + +import ( + "github.com/vdaas/vald/pkg/gateway/lb/handler/rest" +) + +type Option func(*router) + +var defaultOpts = []Option{ + WithTimeout("3s"), +} + +func WithHandler(h rest.Handler) Option { + return func(r *router) { + r.handler = h + } +} + +func WithTimeout(timeout string) Option { + return func(r *router) { + r.timeout = timeout + } +} diff --git a/pkg/gateway/lb/router/option_test.go b/pkg/gateway/lb/router/option_test.go new file mode 100644 index 0000000000..738ef8343e --- /dev/null +++ b/pkg/gateway/lb/router/option_test.go @@ -0,0 +1,259 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package router provides implementation of Go API for routing http Handler wrapped by rest.Func +package router + +import ( + "testing" + + "github.com/vdaas/vald/pkg/gateway/lb/handler/rest" + "go.uber.org/goleak" +) + +func TestWithHandler(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + h rest.Handler + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + h: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + h: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithHandler(test.args.h) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithHandler(test.args.h) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + timeout string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + timeout: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + timeout: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithTimeout(test.args.timeout) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithTimeout(test.args.timeout) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/gateway/lb/router/router.go b/pkg/gateway/lb/router/router.go new file mode 100644 index 0000000000..15a38766fc --- /dev/null +++ b/pkg/gateway/lb/router/router.go @@ -0,0 +1,167 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package router provides implementation of Go API for routing http Handler wrapped by rest.Func +package router + +import ( + "net/http" + + "github.com/vdaas/vald/internal/net/http/routing" + "github.com/vdaas/vald/pkg/gateway/lb/handler/rest" +) + +type router struct { + handler rest.Handler + timeout string +} + +// New returns REST route&method information from handler interface. +func New(opts ...Option) http.Handler { + r := new(router) + + for _, opt := range append(defaultOpts, opts...) { + opt(r) + } + + h := r.handler + + return routing.New( + routing.WithRoutes([]routing.Route{ + { + "Index", + []string{ + http.MethodGet, + }, + "/", + h.Index, + }, + { + "Search", + []string{ + http.MethodPost, + }, + "/search", + h.Search, + }, + { + "Search By ID", + []string{ + http.MethodGet, + }, + "/search/{id}", + h.SearchByID, + }, + + { + "Multi Search", + []string{ + http.MethodPost, + }, + "/search/multi", + h.MultiSearch, + }, + { + "Multi Search By ID", + []string{ + http.MethodGet, + }, + "/search/multi/{id}", + h.MultiSearchByID, + }, + { + "Insert", + []string{ + http.MethodPost, + }, + "/insert", + h.Insert, + }, + { + "Multiple Insert", + []string{ + http.MethodPost, + }, + "/insert/multi", + h.MultiInsert, + }, + { + "Update", + []string{ + http.MethodPost, + http.MethodPatch, + http.MethodPut, + }, + "/update", + h.Update, + }, + { + "Multiple Update", + []string{ + http.MethodPost, + http.MethodPatch, + http.MethodPut, + }, + "/update/multi", + h.MultiUpdate, + }, + { + "Upsert", + []string{ + http.MethodPost, + http.MethodPatch, + http.MethodPut, + }, + "/upsert", + h.Upsert, + }, + { + "Multiple Upsert", + []string{ + http.MethodPost, + http.MethodPatch, + http.MethodPut, + }, + "/upsert/multi", + h.MultiUpsert, + }, + { + "Remove", + []string{ + http.MethodDelete, + }, + "/delete/{id}", + h.Remove, + }, + { + "Multiple Remove", + []string{ + http.MethodDelete, + http.MethodPost, + }, + "/delete/multi", + h.MultiRemove, + }, + { + "GetObject", + []string{ + http.MethodGet, + }, + "/object/{id}", + h.GetObject, + }, + }...)) +} diff --git a/pkg/gateway/lb/router/router_test.go b/pkg/gateway/lb/router/router_test.go new file mode 100644 index 0000000000..97248787ae --- /dev/null +++ b/pkg/gateway/lb/router/router_test.go @@ -0,0 +1,100 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package router provides implementation of Go API for routing http Handler wrapped by rest.Func +package router + +import ( + "net/http" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type args struct { + opts []Option + } + type want struct { + want http.Handler + } + type test struct { + name string + args args + want want + checkFunc func(want, http.Handler) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got http.Handler) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := New(test.args.opts...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/pkg/gateway/lb/service/doc.go b/pkg/gateway/lb/service/doc.go new file mode 100644 index 0000000000..c13956cbbe --- /dev/null +++ b/pkg/gateway/lb/service/doc.go @@ -0,0 +1,18 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package service manages the main logic of server. +package service diff --git a/pkg/gateway/lb/service/gateway.go b/pkg/gateway/lb/service/gateway.go new file mode 100644 index 0000000000..add793532e --- /dev/null +++ b/pkg/gateway/lb/service/gateway.go @@ -0,0 +1,138 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package service +package service + +import ( + "context" + "reflect" + "sync/atomic" + + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/client/v1/client/discoverer" + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/internal/observability/trace" +) + +type Gateway interface { + Start(ctx context.Context) (<-chan error, error) + GetAgentCount(ctx context.Context) int + Do(ctx context.Context, + f func(ctx context.Context, tgt string, ac vald.Client, copts ...grpc.CallOption) error) error + DoMulti(ctx context.Context, num int, + f func(ctx context.Context, tgt string, ac vald.Client, copts ...grpc.CallOption) error) error + BroadCast(ctx context.Context, + f func(ctx context.Context, tgt string, ac vald.Client, copts ...grpc.CallOption) error) error +} + +type gateway struct { + client discoverer.Client + eg errgroup.Group +} + +func NewGateway(opts ...Option) (gw Gateway, err error) { + g := new(gateway) + for _, opt := range append(defaultGWOpts, opts...) { + if err := opt(g); err != nil { + return nil, errors.ErrOptionFailed(err, reflect.ValueOf(opt)) + } + } + return g, nil +} + +func (g *gateway) Start(ctx context.Context) (<-chan error, error) { + return g.client.Start(ctx) +} + +func (g *gateway) BroadCast(ctx context.Context, + f func(ctx context.Context, target string, ac vald.Client, copts ...grpc.CallOption) error) (err error) { + return g.client.GetClient().RangeConcurrent(ctx, -1, func(ctx context.Context, + addr string, conn *grpc.ClientConn, copts ...grpc.CallOption) (err error) { + ctx, span := trace.StartSpan(ctx, "vald/gateway-lb/service/Gateway.BroadCast") + defer func() { + if span != nil { + span.End() + } + }() + select { + case <-ctx.Done(): + return nil + default: + err = f(ctx, addr, vald.NewValdClient(conn), copts...) + if err != nil { + log.Debug(addr, err) + return err + } + } + return nil + }) +} + +func (g *gateway) Do(ctx context.Context, + f func(ctx context.Context, target string, ac vald.Client, copts ...grpc.CallOption) error) (err error) { + addr := g.client.GetAddrs(ctx)[0] + _, err = g.client.GetClient().Do(ctx, addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + ctx, span := trace.StartSpan(ctx, "vald/gateway-lb/service/Gateway.Do") + defer func() { + if span != nil { + span.End() + } + }() + return nil, f(ctx, addr, vald.NewValdClient(conn), copts...) + }) + return err +} + +func (g *gateway) DoMulti(ctx context.Context, num int, + f func(ctx context.Context, target string, ac vald.Client, copts ...grpc.CallOption) error) (err error) { + var cur uint32 = 0 + limit := uint32(num) + addrs := g.client.GetAddrs(ctx) + log.Debug("DoMulti", addrs) + err = g.client.GetClient().OrderedRange(ctx, addrs, func(ictx context.Context, + addr string, + conn *grpc.ClientConn, + copts ...grpc.CallOption) (err error) { + ictx, span := trace.StartSpan(ictx, "vald/gateway-lb/service/Gateway.DoMulti") + defer func() { + if span != nil { + span.End() + } + }() + if atomic.LoadUint32(&cur) < limit { + err = f(ictx, addr, vald.NewValdClient(conn), copts...) + if err != nil { + log.Debug(addr, err) + return err + } + atomic.AddUint32(&cur, 1) + } + return nil + }) + if err != nil && cur < limit { + return err + } + return nil +} + +func (g *gateway) GetAgentCount(ctx context.Context) int { + return len(g.client.GetAddrs(ctx)) +} diff --git a/pkg/manager/index/handler/grpc/checklist_test.go b/pkg/gateway/lb/service/gateway_test.go similarity index 54% rename from pkg/manager/index/handler/grpc/checklist_test.go rename to pkg/gateway/lb/service/gateway_test.go index f1b4798237..e796843365 100644 --- a/pkg/manager/index/handler/grpc/checklist_test.go +++ b/pkg/gateway/lb/service/gateway_test.go @@ -14,43 +14,45 @@ // limitations under the License. // -package grpc +// Package service +package service import ( + "context" "reflect" - "sync" - "sync/atomic" "testing" - "unsafe" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/client/v1/client/discoverer" + "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/net/grpc" + "go.uber.org/goleak" ) -func Test_checkList_Exists(t *testing.T) { +func TestNewGateway(t *testing.T) { + t.Parallel() type args struct { - key string - } - type fields struct { - mu sync.Mutex - read atomic.Value - dirty map[string]*entryCheckList - misses int + opts []Option } type want struct { - want bool + wantGw Gateway + err error } type test struct { name string args args - fields fields want want - checkFunc func(want, bool) error + checkFunc func(want, Gateway, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got bool) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + defaultCheckFunc := func(w want, gotGw Gateway, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotGw, w.wantGw) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotGw, w.wantGw) } return nil } @@ -60,13 +62,7 @@ func Test_checkList_Exists(t *testing.T) { { name: "test_case_1", args: args { - key: "", - }, - fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + opts: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -79,13 +75,7 @@ func Test_checkList_Exists(t *testing.T) { return test { name: "test_case_2", args: args { - key: "", - }, - fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + opts: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -94,8 +84,11 @@ func Test_checkList_Exists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -105,44 +98,44 @@ func Test_checkList_Exists(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - m := &checkList{ - mu: test.fields.mu, - read: test.fields.read, - dirty: test.fields.dirty, - misses: test.fields.misses, - } - got := m.Exists(test.args.key) - if err := test.checkFunc(test.want, got); err != nil { + gotGw, err := NewGateway(test.args.opts...) + if err := test.checkFunc(test.want, gotGw, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_checkList_Check(t *testing.T) { +func Test_gateway_Start(t *testing.T) { + t.Parallel() type args struct { - key string + ctx context.Context } type fields struct { - mu sync.Mutex - read atomic.Value - dirty map[string]*entryCheckList - misses int + client discoverer.Client + eg errgroup.Group } type want struct { + want <-chan error + err error } type test struct { name string args args fields fields want want - checkFunc func(want) error + checkFunc func(want, <-chan error, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want) error { + defaultCheckFunc := func(w want, got <-chan error, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } return nil } tests := []test{ @@ -151,13 +144,11 @@ func Test_checkList_Check(t *testing.T) { { name: "test_case_1", args: args { - key: "", + ctx: nil, }, fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + client: nil, + eg: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -170,13 +161,11 @@ func Test_checkList_Check(t *testing.T) { return test { name: "test_case_2", args: args { - key: "", + ctx: nil, }, fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + client: nil, + eg: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -185,8 +174,11 @@ func Test_checkList_Check(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -196,43 +188,44 @@ func Test_checkList_Check(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - m := &checkList{ - mu: test.fields.mu, - read: test.fields.read, - dirty: test.fields.dirty, - misses: test.fields.misses, + g := &gateway{ + client: test.fields.client, + eg: test.fields.eg, } - m.Check(test.args.key) - if err := test.checkFunc(test.want); err != nil { + got, err := g.Start(test.args.ctx) + if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } }) } } -func Test_entryCheckList_tryStore(t *testing.T) { +func Test_gateway_BroadCast(t *testing.T) { + t.Parallel() type args struct { - i *struct{} + ctx context.Context + f func(ctx context.Context, target string, ac vald.Client, copts ...grpc.CallOption) error } type fields struct { - p unsafe.Pointer + client discoverer.Client + eg errgroup.Group } type want struct { - want bool + err error } type test struct { name string args args fields fields want want - checkFunc func(want, bool) error + checkFunc func(want, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got bool) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } return nil } @@ -242,10 +235,12 @@ func Test_entryCheckList_tryStore(t *testing.T) { { name: "test_case_1", args: args { - i: struct{}{}, + ctx: nil, + f: nil, }, fields: fields { - p: nil, + client: nil, + eg: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -258,10 +253,12 @@ func Test_entryCheckList_tryStore(t *testing.T) { return test { name: "test_case_2", args: args { - i: struct{}{}, + ctx: nil, + f: nil, }, fields: fields { - p: nil, + client: nil, + eg: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -270,8 +267,11 @@ func Test_entryCheckList_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -281,37 +281,44 @@ func Test_entryCheckList_tryStore(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - e := &entryCheckList{ - p: test.fields.p, + g := &gateway{ + client: test.fields.client, + eg: test.fields.eg, } - got := e.tryStore(test.args.i) - if err := test.checkFunc(test.want, got); err != nil { + err := g.BroadCast(test.args.ctx, test.args.f) + if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_entryCheckList_unexpungeLocked(t *testing.T) { +func Test_gateway_Do(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + f func(ctx context.Context, target string, ac vald.Client, copts ...grpc.CallOption) error + } type fields struct { - p unsafe.Pointer + client discoverer.Client + eg errgroup.Group } type want struct { - wantWasExpunged bool + err error } type test struct { name string + args args fields fields want want - checkFunc func(want, bool) error - beforeFunc func() - afterFunc func() + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) } - defaultCheckFunc := func(w want, gotWasExpunged bool) error { - if !reflect.DeepEqual(gotWasExpunged, w.wantWasExpunged) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotWasExpunged, w.wantWasExpunged) + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } return nil } @@ -320,83 +327,13 @@ func Test_entryCheckList_unexpungeLocked(t *testing.T) { /* { name: "test_case_1", - fields: fields { - p: nil, + args: args { + ctx: nil, + f: nil, }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - fields: fields { - p: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - e := &entryCheckList{ - p: test.fields.p, - } - - gotWasExpunged := e.unexpungeLocked() - if err := test.checkFunc(test.want, gotWasExpunged); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_checkList_missLocked(t *testing.T) { - type fields struct { - mu sync.Mutex - read atomic.Value - dirty map[string]*entryCheckList - misses int - } - type want struct { - } - type test struct { - name string - fields fields - want want - checkFunc func(want) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want) error { - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + client: nil, + eg: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -408,11 +345,13 @@ func Test_checkList_missLocked(t *testing.T) { func() test { return test { name: "test_case_2", + args: args { + ctx: nil, + f: nil, + }, fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + client: nil, + eg: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -421,50 +360,60 @@ func Test_checkList_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { - test.beforeFunc() + test.beforeFunc(test.args) } if test.afterFunc != nil { - defer test.afterFunc() + defer test.afterFunc(test.args) } if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - m := &checkList{ - mu: test.fields.mu, - read: test.fields.read, - dirty: test.fields.dirty, - misses: test.fields.misses, + g := &gateway{ + client: test.fields.client, + eg: test.fields.eg, } - m.missLocked() - if err := test.checkFunc(test.want); err != nil { + err := g.Do(test.args.ctx, test.args.f) + if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } }) } } -func Test_checkList_dirtyLocked(t *testing.T) { +func Test_gateway_DoMulti(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + num int + f func(ctx context.Context, target string, ac vald.Client, copts ...grpc.CallOption) error + } type fields struct { - mu sync.Mutex - read atomic.Value - dirty map[string]*entryCheckList - misses int + client discoverer.Client + eg errgroup.Group } type want struct { + err error } type test struct { name string + args args fields fields want want - checkFunc func(want) error - beforeFunc func() - afterFunc func() + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) } - defaultCheckFunc := func(w want) error { + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } return nil } tests := []test{ @@ -472,11 +421,14 @@ func Test_checkList_dirtyLocked(t *testing.T) { /* { name: "test_case_1", + args: args { + ctx: nil, + num: 0, + f: nil, + }, fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + client: nil, + eg: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -488,11 +440,14 @@ func Test_checkList_dirtyLocked(t *testing.T) { func() test { return test { name: "test_case_2", + args: args { + ctx: nil, + num: 0, + f: nil, + }, fields: fields { - mu: sync.Mutex{}, - read: nil, - dirty: nil, - misses: 0, + client: nil, + eg: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -501,50 +456,57 @@ func Test_checkList_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { - test.beforeFunc() + test.beforeFunc(test.args) } if test.afterFunc != nil { - defer test.afterFunc() + defer test.afterFunc(test.args) } if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - m := &checkList{ - mu: test.fields.mu, - read: test.fields.read, - dirty: test.fields.dirty, - misses: test.fields.misses, + g := &gateway{ + client: test.fields.client, + eg: test.fields.eg, } - m.dirtyLocked() - if err := test.checkFunc(test.want); err != nil { + err := g.DoMulti(test.args.ctx, test.args.num, test.args.f) + if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } }) } } -func Test_entryCheckList_tryExpungeLocked(t *testing.T) { +func Test_gateway_GetAgentCount(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } type fields struct { - p unsafe.Pointer + client discoverer.Client + eg errgroup.Group } type want struct { - wantIsExpunged bool + want int } type test struct { name string + args args fields fields want want - checkFunc func(want, bool) error - beforeFunc func() - afterFunc func() + checkFunc func(want, int) error + beforeFunc func(args) + afterFunc func(args) } - defaultCheckFunc := func(w want, gotIsExpunged bool) error { - if !reflect.DeepEqual(gotIsExpunged, w.wantIsExpunged) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotIsExpunged, w.wantIsExpunged) + defaultCheckFunc := func(w want, got int) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -553,8 +515,12 @@ func Test_entryCheckList_tryExpungeLocked(t *testing.T) { /* { name: "test_case_1", + args: args { + ctx: nil, + }, fields: fields { - p: nil, + client: nil, + eg: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -566,8 +532,12 @@ func Test_entryCheckList_tryExpungeLocked(t *testing.T) { func() test { return test { name: "test_case_2", + args: args { + ctx: nil, + }, fields: fields { - p: nil, + client: nil, + eg: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -576,26 +546,29 @@ func Test_entryCheckList_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { - test.beforeFunc() + test.beforeFunc(test.args) } if test.afterFunc != nil { - defer test.afterFunc() + defer test.afterFunc(test.args) } if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - e := &entryCheckList{ - p: test.fields.p, + g := &gateway{ + client: test.fields.client, + eg: test.fields.eg, } - gotIsExpunged := e.tryExpungeLocked() - if err := test.checkFunc(test.want, gotIsExpunged); err != nil { + got := g.GetAgentCount(test.args.ctx) + if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/gateway/lb/service/option.go b/pkg/gateway/lb/service/option.go new file mode 100644 index 0000000000..0ffba26391 --- /dev/null +++ b/pkg/gateway/lb/service/option.go @@ -0,0 +1,47 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package service represents gateway's service logic +package service + +import ( + "github.com/vdaas/vald/internal/client/v1/client/discoverer" + "github.com/vdaas/vald/internal/errgroup" +) + +type Option func(g *gateway) error + +var defaultGWOpts = []Option{ + WithErrGroup(errgroup.Get()), +} + +func WithDiscoverer(c discoverer.Client) Option { + return func(g *gateway) error { + if c != nil { + g.client = c + } + return nil + } +} + +func WithErrGroup(eg errgroup.Group) Option { + return func(g *gateway) error { + if eg != nil { + g.eg = eg + } + return nil + } +} diff --git a/pkg/gateway/lb/service/option_test.go b/pkg/gateway/lb/service/option_test.go new file mode 100644 index 0000000000..4f13f4314d --- /dev/null +++ b/pkg/gateway/lb/service/option_test.go @@ -0,0 +1,260 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package service represents gateway's service logic +package service + +import ( + "testing" + + "github.com/vdaas/vald/internal/client/v1/client/discoverer" + "github.com/vdaas/vald/internal/errgroup" + "go.uber.org/goleak" +) + +func TestWithDiscoverer(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + c discoverer.Client + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + c: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + c: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithDiscoverer(test.args.c) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithDiscoverer(test.args.c) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + eg errgroup.Group + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + eg: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + eg: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithErrGroup(test.args.eg) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithErrGroup(test.args.eg) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/gateway/lb/usecase/vald.go b/pkg/gateway/lb/usecase/vald.go new file mode 100644 index 0000000000..dbe5b91d0e --- /dev/null +++ b/pkg/gateway/lb/usecase/vald.go @@ -0,0 +1,212 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package usecase + +import ( + "context" + + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/client/v1/client/discoverer" + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/internal/net/grpc/metric" + "github.com/vdaas/vald/internal/observability" + "github.com/vdaas/vald/internal/runner" + "github.com/vdaas/vald/internal/safety" + "github.com/vdaas/vald/internal/servers/server" + "github.com/vdaas/vald/internal/servers/starter" + "github.com/vdaas/vald/pkg/gateway/lb/config" + handler "github.com/vdaas/vald/pkg/gateway/lb/handler/grpc" + "github.com/vdaas/vald/pkg/gateway/lb/handler/rest" + "github.com/vdaas/vald/pkg/gateway/lb/router" + "github.com/vdaas/vald/pkg/gateway/lb/service" +) + +type run struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + gateway service.Gateway +} + +func New(cfg *config.Data) (r runner.Runner, err error) { + eg := errgroup.Get() + + var gateway service.Gateway + + discovererClientOptions := append( + cfg.Gateway.Discoverer.Client.Opts(), + grpc.WithErrGroup(eg), + ) + var obs observability.Observability + if cfg.Observability.Enabled { + obs, err = observability.NewWithConfig(cfg.Observability) + if err != nil { + return nil, err + } + discovererClientOptions = append( + discovererClientOptions, + grpc.WithDialOptions( + grpc.WithStatsHandler(metric.NewClientHandler()), + ), + ) + } + + client, err := discoverer.New( + discoverer.WithAutoConnect(true), + discoverer.WithName(cfg.Gateway.AgentName), + discoverer.WithNamespace(cfg.Gateway.AgentNamespace), + discoverer.WithPort(cfg.Gateway.AgentPort), + discoverer.WithServiceDNSARecord(cfg.Gateway.AgentDNS), + discoverer.WithDiscovererClient(grpc.New(discovererClientOptions...)), + discoverer.WithDiscovererHostPort( + cfg.Gateway.Discoverer.Host, + cfg.Gateway.Discoverer.Port, + ), + discoverer.WithDiscoverDuration(cfg.Gateway.Discoverer.Duration), + discoverer.WithOptions(cfg.Gateway.Discoverer.AgentClient.Opts()...), + discoverer.WithNodeName(cfg.Gateway.NodeName), + ) + if err != nil { + return nil, err + } + gateway, err = service.NewGateway( + service.WithErrGroup(eg), + service.WithDiscoverer(client), + ) + if err != nil { + return nil, err + } + + v := handler.New( + handler.WithGateway(gateway), + handler.WithErrGroup(eg), + handler.WithReplicationCount(cfg.Gateway.IndexReplica), + handler.WithStreamConcurrency(cfg.Server.GetGRPCStreamConcurrency()), + ) + + grpcServerOptions := []server.Option{ + server.WithGRPCRegistFunc(func(srv *grpc.Server) { + vald.RegisterValdServer(srv, v) + }), + server.WithPreStopFunction(func() error { + // TODO notify another gateway and scheduler + return nil + }), + } + + if cfg.Observability.Enabled { + grpcServerOptions = append( + grpcServerOptions, + server.WithGRPCOption( + grpc.StatsHandler(metric.NewServerHandler()), + ), + ) + } + + srv, err := starter.New( + starter.WithConfig(cfg.Server), + starter.WithREST(func(sc *config.Server) []server.Option { + return []server.Option{ + server.WithHTTPHandler( + router.New( + router.WithHandler( + rest.New( + rest.WithVald(v), + ), + ), + ), + ), + } + }), + starter.WithGRPC(func(sc *config.Server) []server.Option { + return grpcServerOptions + }), + // TODO add GraphQL handler + ) + if err != nil { + return nil, err + } + + return &run{ + eg: eg, + cfg: cfg, + server: srv, + observability: obs, + gateway: gateway, + }, nil +} + +func (r *run) PreStart(ctx context.Context) error { + if r.observability != nil { + return r.observability.PreStart(ctx) + } + return nil +} + +func (r *run) Start(ctx context.Context) (<-chan error, error) { + ech := make(chan error, 6) + var gech, sech, oech <-chan error + var err error + if r.observability != nil { + oech = r.observability.Start(ctx) + } + if r.gateway != nil { + gech, err = r.gateway.Start(ctx) + if err != nil { + close(ech) + return nil, err + } + } + sech = r.server.ListenAndServe(ctx) + r.eg.Go(safety.RecoverFunc(func() (err error) { + defer close(ech) + for { + select { + case <-ctx.Done(): + return ctx.Err() + case err = <-oech: + case err = <-gech: + case err = <-sech: + } + if err != nil { + select { + case <-ctx.Done(): + return ctx.Err() + case ech <- err: + } + } + } + })) + return ech, nil +} + +func (r *run) PreStop(ctx context.Context) error { + return nil +} + +func (r *run) Stop(ctx context.Context) error { + if r.observability != nil { + r.observability.Stop(ctx) + } + return r.server.Shutdown(ctx) +} + +func (r *run) PostStop(ctx context.Context) error { + return nil +} diff --git a/pkg/gateway/lb/usecase/vald_test.go b/pkg/gateway/lb/usecase/vald_test.go new file mode 100644 index 0000000000..33babf1fbf --- /dev/null +++ b/pkg/gateway/lb/usecase/vald_test.go @@ -0,0 +1,623 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package usecase + +import ( + "context" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/observability" + "github.com/vdaas/vald/internal/runner" + "github.com/vdaas/vald/internal/servers/starter" + "github.com/vdaas/vald/pkg/gateway/lb/config" + "github.com/vdaas/vald/pkg/gateway/lb/service" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type args struct { + cfg *config.Data + } + type want struct { + wantR runner.Runner + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, runner.Runner, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotR runner.Runner, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotR, w.wantR) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotR, w.wantR) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + cfg: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + cfg: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + gotR, err := New(test.args.cfg) + if err := test.checkFunc(test.want, gotR, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_PreStart(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + gateway service.Gateway + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + gateway: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + gateway: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + gateway: test.fields.gateway, + } + + err := r.PreStart(test.args.ctx) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_Start(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + gateway service.Gateway + } + type want struct { + want <-chan error + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, <-chan error, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got <-chan error, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + gateway: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + gateway: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + gateway: test.fields.gateway, + } + + got, err := r.Start(test.args.ctx) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_PreStop(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + gateway service.Gateway + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + gateway: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + gateway: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + gateway: test.fields.gateway, + } + + err := r.PreStop(test.args.ctx) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_Stop(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + gateway service.Gateway + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + gateway: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + gateway: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + gateway: test.fields.gateway, + } + + err := r.Stop(test.args.ctx) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_PostStop(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + gateway service.Gateway + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + gateway: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + gateway: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + gateway: test.fields.gateway, + } + + err := r.PostStop(test.args.ctx) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/pkg/gateway/meta/README.md b/pkg/gateway/meta/README.md new file mode 100644 index 0000000000..a6b21f5176 --- /dev/null +++ b/pkg/gateway/meta/README.md @@ -0,0 +1 @@ +# vald meta gateway diff --git a/pkg/gateway/meta/config/config.go b/pkg/gateway/meta/config/config.go new file mode 100644 index 0000000000..0b03f29c40 --- /dev/null +++ b/pkg/gateway/meta/config/config.go @@ -0,0 +1,158 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package setting stores all server application settings +package config + +import ( + "github.com/vdaas/vald/internal/config" +) + +type ( + GlobalConfig = config.GlobalConfig + Server = config.Server +) + +// Config represent a application setting data content (config.yaml). +// In K8s environment, this configuration is stored in K8s ConfigMap. +type Data struct { + config.GlobalConfig `json:",inline" yaml:",inline"` + + // Server represent all server configurations + Server *config.Servers `json:"server_config" yaml:"server_config"` + + // Observability represent observability configurations + Observability *config.Observability `json:"observability" yaml:"observability"` + + // Client represent gateway client configuration + Client *config.GRPCClient `json:"client" yaml:"client"` + + // Meta represent meta gateway service configuration + Meta *config.Meta `json:"meta" yaml:"meta"` +} + +func NewConfig(path string) (cfg *Data, err error) { + err = config.Read(path, &cfg) + + if err != nil { + return nil, err + } + + if cfg != nil { + cfg.Bind() + } + + if cfg.Server != nil { + cfg.Server = cfg.Server.Bind() + } + + if cfg.Observability != nil { + cfg.Observability = cfg.Observability.Bind() + } + + if cfg.Meta != nil { + cfg.Meta = cfg.Meta.Bind() + } + + if cfg.Client != nil { + cfg.Client = cfg.Client.Bind() + } + + return cfg, nil +} + +// func FakeData() { +// d := Data{ +// Version: "v0.0.1", +// Server: &config.Servers{ +// Servers: []*config.Server{ +// { +// Name: "agent-rest", +// Host: "127.0.0.1", +// Port: 8080, +// Mode: "REST", +// ProbeWaitTime: "3s", +// ShutdownDuration: "5s", +// HandlerTimeout: "5s", +// IdleTimeout: "2s", +// ReadHeaderTimeout: "1s", +// ReadTimeout: "1s", +// WriteTimeout: "1s", +// }, +// { +// Name: "agent-grpc", +// Host: "127.0.0.1", +// Port: 8082, +// Mode: "GRPC", +// }, +// }, +// MetricsServers: []*config.Server{ +// { +// Name: "pprof", +// Host: "127.0.0.1", +// Port: 6060, +// Mode: "REST", +// ProbeWaitTime: "3s", +// ShutdownDuration: "5s", +// HandlerTimeout: "5s", +// IdleTimeout: "2s", +// ReadHeaderTimeout: "1s", +// ReadTimeout: "1s", +// WriteTimeout: "1s", +// }, +// }, +// HealthCheckServers: []*config.Server{ +// { +// Name: "livenesss", +// Host: "127.0.0.1", +// Port: 3000, +// }, +// { +// Name: "readiness", +// Host: "127.0.0.1", +// Port: 3001, +// }, +// }, +// StartUpStrategy: []string{ +// "livenesss", +// "pprof", +// "agent-grpc", +// "agent-rest", +// "readiness", +// }, +// ShutdownStrategy: []string{ +// "readiness", +// "agent-rest", +// "agent-grpc", +// "pprof", +// "livenesss", +// }, +// FullShutdownDuration: "30s", +// TLS: &config.TLS{ +// Enabled: false, +// Cert: "/path/to/cert", +// Key: "/path/to/key", +// CA: "/path/to/ca", +// }, +// }, +// Gateway: &config.Gateway{ +// AgentPort: 8080, +// AgentName: "vald-agent", +// BackoffEnabled: false,, +// }, +// } +// fmt.Println(config.ToRawYaml(d)) +// } diff --git a/pkg/gateway/meta/config/config_test.go b/pkg/gateway/meta/config/config_test.go new file mode 100644 index 0000000000..e90809127a --- /dev/null +++ b/pkg/gateway/meta/config/config_test.go @@ -0,0 +1,103 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package setting stores all server application settings +package config + +import ( + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNewConfig(t *testing.T) { + t.Parallel() + type args struct { + path string + } + type want struct { + wantCfg *Data + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, *Data, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCfg *Data, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCfg, w.wantCfg) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCfg, w.wantCfg) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + path: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + path: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + gotCfg, err := NewConfig(test.args.path) + if err := test.checkFunc(test.want, gotCfg, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/pkg/gateway/meta/handler/doc.go b/pkg/gateway/meta/handler/doc.go new file mode 100644 index 0000000000..86b6d1869d --- /dev/null +++ b/pkg/gateway/meta/handler/doc.go @@ -0,0 +1,17 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package handler diff --git a/pkg/gateway/meta/handler/grpc/handler.go b/pkg/gateway/meta/handler/grpc/handler.go new file mode 100644 index 0000000000..c76bb2e190 --- /dev/null +++ b/pkg/gateway/meta/handler/grpc/handler.go @@ -0,0 +1,772 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package grpc provides grpc server logic +package grpc + +import ( + "context" + "fmt" + "sync" + + "github.com/kpango/fuid" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + client "github.com/vdaas/vald/internal/client/v1/client/vald" + "github.com/vdaas/vald/internal/core/algorithm" + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/info" + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/internal/net/grpc/status" + "github.com/vdaas/vald/internal/observability/trace" + "github.com/vdaas/vald/internal/safety" + "github.com/vdaas/vald/pkg/gateway/internal/location" + "github.com/vdaas/vald/pkg/gateway/meta/service" +) + +type server struct { + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int +} + +const apiName = "vald/gateway-meta" + +func New(opts ...Option) vald.Server { + s := new(server) + + for _, opt := range append(defaultOpts, opts...) { + opt(s) + } + + return s +} + +func (s *server) Exists(ctx context.Context, meta *payload.Object_ID) (*payload.Object_ID, error) { + ctx, span := trace.StartSpan(ctx, apiName+".Exists") + defer func() { + if span != nil { + span.End() + } + }() + uuid, err := s.metadata.GetUUID(ctx, meta.GetId()) + if err != nil { + log.Debugf("Exists API failed to get uuid:\t%s\terror:\t%s", meta.GetId(), err.Error()) + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil, status.WrapWithNotFound(fmt.Sprintf("Exists API meta %s's uuid not found", meta.GetId()), err, meta.GetId(), info.Get()) + } + return &payload.Object_ID{ + Id: uuid, + }, nil +} + +func (s *server) Search(ctx context.Context, req *payload.Search_Request) (res *payload.Search_Response, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Search") + defer func() { + if span != nil { + span.End() + } + }() + vl := len(req.GetVector()) + if vl < algorithm.MinimumVectorDimensionSize { + err = errors.ErrInvalidDimensionSize(vl, 0) + if span != nil { + span.SetStatus(trace.StatusCodeInvalidArgument(err.Error())) + } + return nil, status.WrapWithInvalidArgument("Search API invalid vector argument", err, req, info.Get()) + } + res, err = s.search(ctx, func(ctx context.Context, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { + return vc.Search(ctx, req, copts...) + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal("Search API failed to process search request", err, req, info.Get()) + } + return res, nil +} + +func (s *server) SearchByID(ctx context.Context, req *payload.Search_IDRequest) ( + res *payload.Search_Response, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".SearchByID") + defer func() { + if span != nil { + span.End() + } + }() + meta := req.GetId() + req.Id, err = s.metadata.GetUUID(ctx, meta) + if err != nil { + log.Debugf("MultiRemove API failed to process request uuids:\t%v\terror:\t%s", meta, err.Error()) + req.Id = meta + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil, status.WrapWithNotFound(fmt.Sprintf("SearchByID API meta %s's uuid not found", meta), err, req, info.Get()) + } + res, err = s.search(ctx, func(ctx context.Context, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { + return vc.SearchByID(ctx, req, copts...) + }) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil, status.WrapWithNotFound("Search API failed to process search request", err, req, info.Get()) + } + return res, nil +} + +func (s *server) search(ctx context.Context, + f func(ctx context.Context, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error)) ( + res *payload.Search_Response, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".search") + defer func() { + if span != nil { + span.End() + } + }() + res, err = f(ctx, s.gateway, s.copts...) + if err != nil { + return nil, err + } + uuids := make([]string, 0, len(res.Results)) + for _, r := range res.Results { + uuids = append(uuids, r.GetId()) + } + if s.metadata != nil { + var metas []string + metas, err = s.metadata.GetMetas(ctx, uuids...) + for i, k := range metas { + if len(k) != 0 { + res.Results[i].Id = k + } + } + } + return res, err +} + +func (s *server) StreamSearch(stream vald.Search_StreamSearchServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamSearch") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Search_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Search(ctx, data.(*payload.Search_Request)) + }) +} + +func (s *server) StreamSearchByID(stream vald.Search_StreamSearchByIDServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamSearchByID") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Search_IDRequest) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.SearchByID(ctx, data.(*payload.Search_IDRequest)) + }) +} + +func (s *server) MultiSearch(ctx context.Context, reqs *payload.Search_MultiRequest) (res *payload.Search_Responses, errs error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiSearch") + defer func() { + if span != nil { + span.End() + } + }() + + res = &payload.Search_Responses{ + Responses: make([]*payload.Search_Response, len(reqs.Requests)), + } + var wg sync.WaitGroup + var mu sync.Mutex + for i, req := range reqs.Requests { + idx, query := i, req + wg.Add(1) + s.eg.Go(func() error { + defer wg.Done() + r, err := s.Search(ctx, query) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + mu.Lock() + errs = errors.Wrap(errs, status.WrapWithNotFound(fmt.Sprintf("MultiSearch API vector %v's search request result not found", query.GetVector()), err, info.Get()).Error()) + mu.Unlock() + return nil + } + res.Responses[idx] = r + return nil + }) + } + wg.Wait() + return res, errs +} + +func (s *server) MultiSearchByID(ctx context.Context, reqs *payload.Search_MultiIDRequest) (res *payload.Search_Responses, errs error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiSearchByID") + defer func() { + if span != nil { + span.End() + } + }() + + res = &payload.Search_Responses{ + Responses: make([]*payload.Search_Response, len(reqs.Requests)), + } + var wg sync.WaitGroup + var mu sync.Mutex + for i, req := range reqs.Requests { + idx, query := i, req + wg.Add(1) + s.eg.Go(func() error { + defer wg.Done() + r, err := s.SearchByID(ctx, query) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + mu.Lock() + errs = errors.Wrap(errs, status.WrapWithNotFound(fmt.Sprintf("MultiSearchByID API uuid %v's search by id request result not found", query.GetId()), err, info.Get()).Error()) + mu.Unlock() + return nil + } + res.Responses[idx] = r + return nil + }) + } + wg.Wait() + return res, errs +} + +func (s *server) Insert(ctx context.Context, req *payload.Insert_Request) (loc *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Insert") + defer func() { + if span != nil { + span.End() + } + }() + vec := req.GetVector() + meta := vec.GetId() + vl := len(vec.GetVector()) + if vl < algorithm.MinimumVectorDimensionSize { + err = errors.ErrInvalidDimensionSize(vl, 0) + if span != nil { + span.SetStatus(trace.StatusCodeInvalidArgument(err.Error())) + } + return nil, status.WrapWithInvalidArgument("Insert API invalid vector argument", err, req, info.Get()) + } + if !req.GetConfig().GetSkipStrictExistCheck() { + exists, err := s.metadata.Exists(ctx, meta) + if err != nil { + log.Debug(err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal( + fmt.Sprintf("Insert API meta %s couldn't check meta already exists or not", meta), err, info.Get()) + } + if exists { + err = errors.Wrap(err, errors.ErrMetaDataAlreadyExists(meta).Error()) + if span != nil { + span.SetStatus(trace.StatusCodeAlreadyExists(err.Error())) + } + return nil, status.WrapWithAlreadyExists(fmt.Sprintf("Insert API meta %s already exists", meta), err, info.Get()) + } + req.Config.SkipStrictExistCheck = true + } + uuid := fuid.String() + req.Vector.Id = uuid + loc, err = s.gateway.Insert(ctx, req, s.copts...) + if err != nil { + err = errors.Wrapf(err, "Insert API (do multiple) failed to Insert uuid = %s\tmeta = %s\t info = %#v", uuid, meta, info.Get()) + log.Debug(err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Insert API failed to Execute DoMulti error = %s", err.Error()), err, info.Get()) + } + err = s.metadata.SetUUIDandMeta(ctx, uuid, meta) + if err != nil { + _, rerr := s.gateway.Remove(ctx, &payload.Remove_Request{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) + if rerr != nil { + err = errors.Wrap(err, rerr.Error()) + } + err = errors.Wrapf(err, "Insert API (meta.SetUUIDandMeta) failed to Register Metadata Vectors = %#v\t info = %#v", req, info.Get()) + log.Debug(err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Insert API meta %s & uuid %s couldn't store", meta, uuid), err, info.Get()) + } + return loc, nil +} + +func (s *server) StreamInsert(stream vald.Insert_StreamInsertServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamInsert") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Insert_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Insert(ctx, data.(*payload.Insert_Request)) + }) +} + +func (s *server) MultiInsert(ctx context.Context, reqs *payload.Insert_MultiRequest) (res *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiInsert") + defer func() { + if span != nil { + span.End() + } + }() + vecs := reqs.GetRequests() + metaMap := make(map[string]string, len(vecs)) + metas := make([]string, 0, len(vecs)) + for i, req := range vecs { + vec := req.GetVector() + uuid := fuid.String() + meta := vec.GetId() + metaMap[uuid] = meta + metas = append(metas, meta) + reqs.Requests[i].Vector.Id = uuid + if !req.GetConfig().GetSkipStrictExistCheck() { + exists, err := s.metadata.Exists(ctx, meta) + if err != nil { + log.Debug(err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal( + fmt.Sprintf("MultiInsert API couldn't check metadata exists or not metas = %v", metas), err, info.Get()) + } + if exists { + if span != nil { + span.SetStatus(trace.StatusCodeAlreadyExists(err.Error())) + } + return nil, status.WrapWithAlreadyExists( + fmt.Sprintf("MultiInsert API failed metadata already exists meta = %s", meta), err, info.Get()) + } + reqs.Requests[i].Config.SkipStrictExistCheck = true + } + } + + res, err = s.gateway.MultiInsert(ctx, reqs, s.copts...) + if err != nil { + err = errors.Wrapf(err, "MultiInsert API failed to Insert info = %#v", info.Get()) + log.Debug(err) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiInsert API failed to Insert error = %s", err.Error()), err, info.Get()) + } + + err = s.metadata.SetUUIDandMetas(ctx, metaMap) + if err != nil { + removeList := make([]*payload.Remove_Request, 0, len(reqs.GetRequests())) + for _, req := range reqs.GetRequests() { + removeList = append(removeList, &payload.Remove_Request{ + Id: &payload.Object_ID{ + Id: req.GetVector().GetId(), + }, + }) + } + _, rerr := s.gateway.MultiRemove(ctx, &payload.Remove_MultiRequest{ + Requests: removeList, + }, s.copts...) + if rerr != nil { + err = errors.Wrap(err, rerr.Error()) + } + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiInsert API failed SetUUIDandMetas %#v", metaMap), err, info.Get()) + } + return res, nil +} + +func (s *server) Update(ctx context.Context, req *payload.Update_Request) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Update") + defer func() { + if span != nil { + span.End() + } + }() + meta := req.GetVector().GetId() + uuid, err := s.metadata.GetUUID(ctx, meta) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil, status.WrapWithNotFound(fmt.Sprintf("Update API failed GetUUID meta = %s", meta), err, info.Get()) + } + req.Vector.Id = uuid + res, err = s.gateway.Update(ctx, req, s.copts...) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Update API failed to insert data for update %#v", req), err, info.Get()) + } + return res, nil +} + +func (s *server) StreamUpdate(stream vald.Update_StreamUpdateServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamUpdate") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Update_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Update(ctx, data.(*payload.Update_Request)) + }) +} + +func (s *server) MultiUpdate(ctx context.Context, reqs *payload.Update_MultiRequest) (res *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiUpdate") + defer func() { + if span != nil { + span.End() + } + }() + ids := make([]string, 0, len(reqs.GetRequests())) + for _, req := range reqs.GetRequests() { + ids = append(ids, req.GetVector().GetId()) + } + metas, err := s.metadata.GetUUIDs(ctx, ids...) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiUpdate API failed MultiUpdate request %#v", ids), err, info.Get()) + } + + for i, meta := range metas { + reqs.Requests[i].Vector.Id = meta + } + + res, err = s.gateway.MultiUpdate(ctx, reqs) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiUpdate API failed MultiUpdate request %#v", ids), err, info.Get()) + } + return res, nil +} + +func (s *server) Upsert(ctx context.Context, req *payload.Upsert_Request) (loc *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Upsert") + defer func() { + if span != nil { + span.End() + } + }() + + vec := req.GetVector() + meta := vec.GetId() + filters := req.GetConfig().GetFilters() + exists, err := s.metadata.Exists(ctx, meta) + if err != nil { + log.Debugf("Upsert API metadata exists check error:\t%s", err.Error()) + } + if !exists { + loc, err = s.Insert(ctx, &payload.Insert_Request{ + Vector: vec, + Config: &payload.Insert_Config{ + SkipStrictExistCheck: true, + Filters: filters, + }, + }) + } else { + loc, err = s.Update(ctx, &payload.Update_Request{ + Vector: vec, + Config: &payload.Update_Config{ + SkipStrictExistCheck: true, + Filters: filters, + }, + }) + } + if err != nil { + log.Debugf("Upsert API failed to process request uuid:\t%s\terror:\t%s", meta, err.Error()) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Upsert API failed to process request %s", meta), err, info.Get()) + } + return loc, nil +} + +func (s *server) StreamUpsert(stream vald.Upsert_StreamUpsertServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamUpsert") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Upsert_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Upsert(ctx, data.(*payload.Upsert_Request)) + }) +} + +func (s *server) MultiUpsert(ctx context.Context, reqs *payload.Upsert_MultiRequest) (locs *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiUpsert") + defer func() { + if span != nil { + span.End() + } + }() + + insertReqs := make([]*payload.Insert_Request, 0, len(reqs.GetRequests())) + updateReqs := make([]*payload.Update_Request, 0, len(reqs.GetRequests())) + + ids := make([]string, 0, len(reqs.GetRequests())) + for _, req := range reqs.GetRequests() { + vec := req.GetVector() + uuid := vec.GetId() + ids = append(ids, uuid) + _, err = s.Exists(ctx, &payload.Object_ID{ + Id: uuid, + }) + filters := req.GetConfig().GetFilters() + if err != nil { + insertReqs = append(insertReqs, &payload.Insert_Request{ + Vector: vec, + Config: &payload.Insert_Config{ + SkipStrictExistCheck: true, + Filters: filters, + }, + }) + } else { + updateReqs = append(updateReqs, &payload.Update_Request{ + Vector: vec, + Config: &payload.Update_Config{ + SkipStrictExistCheck: true, + Filters: filters, + }, + }) + } + } + + insertLocs := make([]*payload.Object_Location, 0, len(insertReqs)) + updateLocs := make([]*payload.Object_Location, 0, len(updateReqs)) + + eg, ectx := errgroup.New(ctx) + if len(updateReqs) <= 0 { + eg.Go(safety.RecoverFunc(func() error { + ectx, span := trace.StartSpan(ectx, apiName+".MultiUpsert/Go-MultiUpdate") + defer func() { + if span != nil { + span.End() + } + }() + var err error + loc, err := s.MultiUpdate(ectx, &payload.Update_MultiRequest{ + Requests: updateReqs, + }) + if err == nil { + updateLocs = loc.GetLocations() + } + return err + })) + } + if len(insertReqs) <= 0 { + eg.Go(safety.RecoverFunc(func() error { + ectx, span := trace.StartSpan(ectx, apiName+".MultiUpsert/Go-MultiInsert") + defer func() { + if span != nil { + span.End() + } + }() + var err error + loc, err := s.MultiInsert(ectx, &payload.Insert_MultiRequest{ + Requests: insertReqs, + }) + if err == nil { + insertLocs = loc.GetLocations() + } + return err + })) + } + err = eg.Wait() + if err != nil { + log.Debugf("MultiUpsert API failed to process request uuids:\t%s\terror:\t%s", ids, err.Error()) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiUpsert API failed to process request %v", ids), err, info.Get()) + } + + return location.ReStructure(ids, &payload.Object_Locations{ + Locations: append(insertLocs, updateLocs...), + }), nil +} + +func (s *server) Remove(ctx context.Context, req *payload.Remove_Request) (loc *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".Remove") + defer func() { + if span != nil { + span.End() + } + }() + meta := req.GetId().GetId() + uuid, err := s.metadata.GetUUID(ctx, meta) + if err != nil { + log.Debugf("Remove API failed to get uuid:\t%s\terror:\t%s", meta, err.Error()) + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil, status.WrapWithNotFound(fmt.Sprintf("Remove API meta %s's uuid not found", meta), err, info.Get()) + } + + req.Id.Id = uuid + loc, err = s.gateway.Remove(ctx, req, s.copts...) + if err != nil { + log.Debugf("Remove API failed to process request uuid:\t%s\terror:\t%s", meta, err.Error()) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Remove API failed request uuid %s", uuid), err, info.Get()) + } + _, err = s.metadata.DeleteMeta(ctx, uuid) + if err != nil { + log.Debugf("Remove API failed to remove metadata:\t%s\terror:\t%s", meta, err.Error()) + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("Remove API failed Delete metadata uuid = %s", uuid), err, info.Get()) + } + return loc, nil +} + +func (s *server) StreamRemove(stream vald.Remove_StreamRemoveServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamRemove") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Remove_Request) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.Remove(ctx, data.(*payload.Remove_Request)) + }) +} + +func (s *server) MultiRemove(ctx context.Context, reqs *payload.Remove_MultiRequest) (locs *payload.Object_Locations, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".MultiRemove") + defer func() { + if span != nil { + span.End() + } + }() + ids := make([]string, 0, len(reqs.GetRequests())) + for _, req := range reqs.GetRequests() { + ids = append(ids, req.GetId().GetId()) + } + uuids, err := s.metadata.GetUUIDs(ctx, ids...) + if err != nil { + log.Debugf("MultiRemove API failed to process request uuids:\t%v\terror:\t%s", ids, err.Error()) + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil, status.WrapWithNotFound(fmt.Sprintf("MultiRemove API meta datas %v's uuid not found", ids), err, info.Get()) + } + for i, id := range uuids { + reqs.Requests[i].Id.Id = id + } + locs, err = s.gateway.MultiRemove(ctx, reqs, s.copts...) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiRemove API failed to request uuids %v metas %v ", uuids, ids), err, info.Get()) + } + _, err = s.metadata.DeleteMetas(ctx, uuids...) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeInternal(err.Error())) + } + return nil, status.WrapWithInternal(fmt.Sprintf("MultiRemove API failed to DeleteMetas uuids %v ", uuids), err, info.Get()) + } + return locs, nil +} + +func (s *server) GetObject(ctx context.Context, id *payload.Object_ID) (vec *payload.Object_Vector, err error) { + ctx, span := trace.StartSpan(ctx, apiName+".GetObject") + defer func() { + if span != nil { + span.End() + } + }() + meta := id.GetId() + uuid, err := s.metadata.GetUUID(ctx, meta) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil, status.WrapWithNotFound(fmt.Sprintf("GetObject API meta %s's uuid not found", meta), err, info.Get()) + } + id.Id = uuid + vec, err = s.gateway.GetObject(ctx, id, s.copts...) + if err != nil { + if span != nil { + span.SetStatus(trace.StatusCodeNotFound(err.Error())) + } + return nil, status.WrapWithNotFound(fmt.Sprintf("GetObject API meta %s uuid %s Object not found", meta, uuid), err, info.Get()) + } + vec.Id = meta + return vec, nil +} + +func (s *server) StreamGetObject(stream vald.Object_StreamGetObjectServer) error { + ctx, span := trace.StartSpan(stream.Context(), apiName+".StreamGetObject") + defer func() { + if span != nil { + span.End() + } + }() + return grpc.BidirectionalStream(ctx, stream, s.streamConcurrency, + func() interface{} { return new(payload.Object_ID) }, + func(ctx context.Context, data interface{}) (interface{}, error) { + return s.GetObject(ctx, data.(*payload.Object_ID)) + }) +} diff --git a/hack/benchmark/internal/client/ngtd/grpc/client_test.go b/pkg/gateway/meta/handler/grpc/handler_test.go similarity index 52% rename from hack/benchmark/internal/client/ngtd/grpc/client_test.go rename to pkg/gateway/meta/handler/grpc/handler_test.go index da40edb4e2..356739ba38 100644 --- a/hack/benchmark/internal/client/ngtd/grpc/client_test.go +++ b/pkg/gateway/meta/handler/grpc/handler_test.go @@ -14,7 +14,7 @@ // limitations under the License. // -// Package grpc provides grpc client functions +// Package grpc provides grpc server logic package grpc import ( @@ -22,35 +22,33 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/internal/client" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + client "github.com/vdaas/vald/internal/client/v1/client/vald" + "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" - proto "github.com/yahoojapan/ngtd/proto" - + "github.com/vdaas/vald/pkg/gateway/meta/service" "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context opts []Option } type want struct { - want Client - err error + want vald.Server } type test struct { name string args args want want - checkFunc func(want, Client, error) error + checkFunc func(want, vald.Server) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got Client, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) - } + defaultCheckFunc := func(w want, got vald.Server) error { if !reflect.DeepEqual(got, w.want) { return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } @@ -62,7 +60,6 @@ func TestNew(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, opts: nil, }, want: want{}, @@ -76,7 +73,6 @@ func TestNew(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, opts: nil, }, want: want{}, @@ -86,9 +82,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -99,27 +97,29 @@ func TestNew(t *testing.T) { test.checkFunc = defaultCheckFunc } - got, err := New(test.args.ctx, test.args.opts...) - if err := test.checkFunc(test.want, got, err); err != nil { + got := New(test.args.opts...) + if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_Exists(t *testing.T) { +func Test_server_Exists(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectID + ctx context.Context + meta *payload.Object_ID } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - want *client.ObjectID + want *payload.Object_ID err error } type test struct { @@ -127,11 +127,11 @@ func Test_ngtdClient_Exists(t *testing.T) { args args fields fields want want - checkFunc func(want, *client.ObjectID, error) error + checkFunc func(want, *payload.Object_ID, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.ObjectID, err error) error { + defaultCheckFunc := func(w want, got *payload.Object_ID, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -147,12 +147,14 @@ func Test_ngtdClient_Exists(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + meta: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -166,12 +168,14 @@ func Test_ngtdClient_Exists(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + meta: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -180,9 +184,11 @@ func Test_ngtdClient_Exists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -192,50 +198,54 @@ func Test_ngtdClient_Exists(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - got, err := c.Exists(test.args.ctx, test.args.req) + got, err := s.Exists(test.args.ctx, test.args.meta) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_Search(t *testing.T) { +func Test_server_Search(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.SearchRequest + req *payload.Search_Request } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - want *client.SearchResponse - err error + wantRes *payload.Search_Response + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.SearchResponse, error) error + checkFunc func(want, *payload.Search_Response, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.SearchResponse, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) } return nil } @@ -249,9 +259,11 @@ func Test_ngtdClient_Search(t *testing.T) { req: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -268,9 +280,11 @@ func Test_ngtdClient_Search(t *testing.T) { req: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -279,9 +293,11 @@ func Test_ngtdClient_Search(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -291,50 +307,54 @@ func Test_ngtdClient_Search(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - got, err := c.Search(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotRes, err := s.Search(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_SearchByID(t *testing.T) { +func Test_server_SearchByID(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.SearchIDRequest + req *payload.Search_IDRequest } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - want *client.SearchResponse - err error + wantRes *payload.Search_Response + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.SearchResponse, error) error + checkFunc func(want, *payload.Search_Response, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.SearchResponse, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) } return nil } @@ -348,9 +368,11 @@ func Test_ngtdClient_SearchByID(t *testing.T) { req: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -367,9 +389,11 @@ func Test_ngtdClient_SearchByID(t *testing.T) { req: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -378,9 +402,11 @@ func Test_ngtdClient_SearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -390,48 +416,55 @@ func Test_ngtdClient_SearchByID(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - got, err := c.SearchByID(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotRes, err := s.SearchByID(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_StreamSearch(t *testing.T) { +func Test_server_search(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.SearchRequest - f func(*client.SearchResponse, error) + ctx context.Context + f func(ctx context.Context, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantRes *payload.Search_Response + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Search_Response, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -441,13 +474,14 @@ func Test_ngtdClient_StreamSearch(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, f: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -461,13 +495,14 @@ func Test_ngtdClient_StreamSearch(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, f: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -476,9 +511,11 @@ func Test_ngtdClient_StreamSearch(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -488,31 +525,33 @@ func Test_ngtdClient_StreamSearch(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamSearch(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.search(test.args.ctx, test.args.f) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_StreamSearchByID(t *testing.T) { +func Test_server_StreamSearch(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.SearchIDRequest - f func(*client.SearchResponse, error) + stream vald.Search_StreamSearchServer } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { err error @@ -538,14 +577,14 @@ func Test_ngtdClient_StreamSearchByID(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -558,14 +597,14 @@ func Test_ngtdClient_StreamSearchByID(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -574,9 +613,11 @@ func Test_ngtdClient_StreamSearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -586,30 +627,33 @@ func Test_ngtdClient_StreamSearchByID(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamSearchByID(test.args.ctx, test.args.dataProvider, test.args.f) + err := s.StreamSearch(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_Insert(t *testing.T) { +func Test_server_StreamSearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVector + stream vald.Search_StreamSearchByIDServer } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { err error @@ -635,13 +679,14 @@ func Test_ngtdClient_Insert(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + stream: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -654,13 +699,14 @@ func Test_ngtdClient_Insert(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + stream: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -669,9 +715,11 @@ func Test_ngtdClient_Insert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -681,48 +729,55 @@ func Test_ngtdClient_Insert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.Insert(test.args.ctx, test.args.req) + err := s.StreamSearchByID(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_StreamInsert(t *testing.T) { +func Test_server_MultiSearch(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectVector - f func(error) + ctx context.Context + reqs *payload.Search_MultiRequest } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantRes *payload.Search_Responses + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Search_Responses, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Responses, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -732,13 +787,14 @@ func Test_ngtdClient_StreamInsert(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - dataProvider: nil, - f: nil, + reqs: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -752,13 +808,14 @@ func Test_ngtdClient_StreamInsert(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - dataProvider: nil, - f: nil, + reqs: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -767,9 +824,11 @@ func Test_ngtdClient_StreamInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -779,47 +838,55 @@ func Test_ngtdClient_StreamInsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamInsert(test.args.ctx, test.args.dataProvider, test.args.f) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.MultiSearch(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_MultiInsert(t *testing.T) { +func Test_server_MultiSearchByID(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVectors + ctx context.Context + reqs *payload.Search_MultiIDRequest } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantRes *payload.Search_Responses + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Search_Responses, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Search_Responses, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -829,12 +896,14 @@ func Test_ngtdClient_MultiInsert(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -848,12 +917,14 @@ func Test_ngtdClient_MultiInsert(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -862,9 +933,11 @@ func Test_ngtdClient_MultiInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -874,47 +947,55 @@ func Test_ngtdClient_MultiInsert(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.MultiInsert(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.MultiSearchByID(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_Update(t *testing.T) { +func Test_server_Insert(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.ObjectVector + req *payload.Insert_Request } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantLoc *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotLoc *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotLoc, w.wantLoc) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoc, w.wantLoc) + } return nil } tests := []test{ @@ -927,9 +1008,11 @@ func Test_ngtdClient_Update(t *testing.T) { req: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -946,9 +1029,11 @@ func Test_ngtdClient_Update(t *testing.T) { req: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -957,9 +1042,11 @@ func Test_ngtdClient_Update(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -969,31 +1056,33 @@ func Test_ngtdClient_Update(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.Update(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotLoc, err := s.Insert(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotLoc, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_StreamUpdate(t *testing.T) { +func Test_server_StreamInsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectVector - f func(error) + stream vald.Insert_StreamInsertServer } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { err error @@ -1019,14 +1108,14 @@ func Test_ngtdClient_StreamUpdate(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1039,14 +1128,14 @@ func Test_ngtdClient_StreamUpdate(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1055,9 +1144,11 @@ func Test_ngtdClient_StreamUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1067,47 +1158,55 @@ func Test_ngtdClient_StreamUpdate(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamUpdate(test.args.ctx, test.args.dataProvider, test.args.f) + err := s.StreamInsert(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_MultiUpdate(t *testing.T) { +func Test_server_MultiInsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectVectors + ctx context.Context + reqs *payload.Insert_MultiRequest } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantRes *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1117,12 +1216,14 @@ func Test_ngtdClient_MultiUpdate(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1136,12 +1237,14 @@ func Test_ngtdClient_MultiUpdate(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1150,9 +1253,11 @@ func Test_ngtdClient_MultiUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1162,47 +1267,55 @@ func Test_ngtdClient_MultiUpdate(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.MultiUpdate(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.MultiInsert(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_Remove(t *testing.T) { +func Test_server_Update(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.ObjectID + req *payload.Update_Request } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantRes *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1215,9 +1328,11 @@ func Test_ngtdClient_Remove(t *testing.T) { req: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1234,9 +1349,11 @@ func Test_ngtdClient_Remove(t *testing.T) { req: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1245,9 +1362,11 @@ func Test_ngtdClient_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1257,31 +1376,33 @@ func Test_ngtdClient_Remove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.Remove(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.Update(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_StreamRemove(t *testing.T) { +func Test_server_StreamUpdate(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectID - f func(error) + stream vald.Update_StreamUpdateServer } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { err error @@ -1307,14 +1428,14 @@ func Test_ngtdClient_StreamRemove(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1327,14 +1448,14 @@ func Test_ngtdClient_StreamRemove(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1343,9 +1464,11 @@ func Test_ngtdClient_StreamRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1355,47 +1478,55 @@ func Test_ngtdClient_StreamRemove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamRemove(test.args.ctx, test.args.dataProvider, test.args.f) + err := s.StreamUpdate(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_MultiRemove(t *testing.T) { +func Test_server_MultiUpdate(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ObjectIDs + ctx context.Context + reqs *payload.Update_MultiRequest } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantRes *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotRes, w.wantRes) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) + } return nil } tests := []test{ @@ -1405,12 +1536,14 @@ func Test_ngtdClient_MultiRemove(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1424,12 +1557,14 @@ func Test_ngtdClient_MultiRemove(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1438,9 +1573,11 @@ func Test_ngtdClient_MultiRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1450,50 +1587,54 @@ func Test_ngtdClient_MultiRemove(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.MultiRemove(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotRes, err := s.MultiUpdate(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_GetObject(t *testing.T) { +func Test_server_Upsert(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - req *client.ObjectID + req *payload.Upsert_Request } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - want *client.ObjectVector - err error + wantLoc *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.ObjectVector, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.ObjectVector, err error) error { + defaultCheckFunc := func(w want, gotLoc *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotLoc, w.wantLoc) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoc, w.wantLoc) } return nil } @@ -1507,9 +1648,11 @@ func Test_ngtdClient_GetObject(t *testing.T) { req: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1526,9 +1669,11 @@ func Test_ngtdClient_GetObject(t *testing.T) { req: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1537,9 +1682,11 @@ func Test_ngtdClient_GetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1549,31 +1696,33 @@ func Test_ngtdClient_GetObject(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - got, err := c.GetObject(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, got, err); err != nil { + gotLoc, err := s.Upsert(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotLoc, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_StreamGetObject(t *testing.T) { +func Test_server_StreamUpsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - dataProvider func() *client.ObjectID - f func(*client.ObjectVector, error) + stream vald.Upsert_StreamUpsertServer } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { err error @@ -1599,14 +1748,14 @@ func Test_ngtdClient_StreamGetObject(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1619,14 +1768,14 @@ func Test_ngtdClient_StreamGetObject(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - dataProvider: nil, - f: nil, + stream: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1635,9 +1784,11 @@ func Test_ngtdClient_StreamGetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1647,47 +1798,55 @@ func Test_ngtdClient_StreamGetObject(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.StreamGetObject(test.args.ctx, test.args.dataProvider, test.args.f) + err := s.StreamUpsert(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_CreateIndex(t *testing.T) { +func Test_server_MultiUpsert(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ControlCreateIndexRequest + ctx context.Context + reqs *payload.Upsert_MultiRequest } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantLocs *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotLocs *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotLocs, w.wantLocs) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLocs, w.wantLocs) + } return nil } tests := []test{ @@ -1697,12 +1856,14 @@ func Test_ngtdClient_CreateIndex(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1716,12 +1877,14 @@ func Test_ngtdClient_CreateIndex(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - req: nil, + reqs: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1730,9 +1893,11 @@ func Test_ngtdClient_CreateIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1742,46 +1907,55 @@ func Test_ngtdClient_CreateIndex(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.CreateIndex(test.args.ctx, test.args.req) - if err := test.checkFunc(test.want, err); err != nil { + gotLocs, err := s.MultiUpsert(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotLocs, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_SaveIndex(t *testing.T) { +func Test_server_Remove(t *testing.T) { + t.Parallel() type args struct { ctx context.Context + req *payload.Remove_Request } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - err error + wantLoc *payload.Object_Location + err error } type test struct { name string args args fields fields want want - checkFunc func(want, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, err error) error { + defaultCheckFunc := func(w want, gotLoc *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } + if !reflect.DeepEqual(gotLoc, w.wantLoc) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLoc, w.wantLoc) + } return nil } tests := []test{ @@ -1791,11 +1965,14 @@ func Test_ngtdClient_SaveIndex(t *testing.T) { name: "test_case_1", args: args { ctx: nil, + req: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1809,11 +1986,14 @@ func Test_ngtdClient_SaveIndex(t *testing.T) { name: "test_case_2", args: args { ctx: nil, + req: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1822,9 +2002,11 @@ func Test_ngtdClient_SaveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1834,30 +2016,33 @@ func Test_ngtdClient_SaveIndex(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.SaveIndex(test.args.ctx) - if err := test.checkFunc(test.want, err); err != nil { + gotLoc, err := s.Remove(test.args.ctx, test.args.req) + if err := test.checkFunc(test.want, gotLoc, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_CreateAndSaveIndex(t *testing.T) { +func Test_server_StreamRemove(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - req *client.ControlCreateIndexRequest + stream vald.Remove_StreamRemoveServer } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { err error @@ -1883,13 +2068,14 @@ func Test_ngtdClient_CreateAndSaveIndex(t *testing.T) { { name: "test_case_1", args: args { - ctx: nil, - req: nil, + stream: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1902,13 +2088,14 @@ func Test_ngtdClient_CreateAndSaveIndex(t *testing.T) { return test { name: "test_case_2", args: args { - ctx: nil, - req: nil, + stream: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1917,9 +2104,11 @@ func Test_ngtdClient_CreateAndSaveIndex(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1929,49 +2118,54 @@ func Test_ngtdClient_CreateAndSaveIndex(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - err := c.CreateAndSaveIndex(test.args.ctx, test.args.req) + err := s.StreamRemove(test.args.stream) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_ngtdClient_IndexInfo(t *testing.T) { +func Test_server_MultiRemove(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context + ctx context.Context + reqs *payload.Remove_MultiRequest } type fields struct { - addr string - Client grpc.Client - opts []grpc.Option + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - want *client.InfoIndex - err error + wantLocs *payload.Object_Locations + err error } type test struct { name string args args fields fields want want - checkFunc func(want, *client.InfoIndex, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *client.InfoIndex, err error) error { + defaultCheckFunc := func(w want, gotLocs *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + if !reflect.DeepEqual(gotLocs, w.wantLocs) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLocs, w.wantLocs) } return nil } @@ -1982,11 +2176,14 @@ func Test_ngtdClient_IndexInfo(t *testing.T) { name: "test_case_1", args: args { ctx: nil, + reqs: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -2000,11 +2197,14 @@ func Test_ngtdClient_IndexInfo(t *testing.T) { name: "test_case_2", args: args { ctx: nil, + reqs: nil, }, fields: fields { - addr: "", - Client: nil, - opts: nil, + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -2013,9 +2213,11 @@ func Test_ngtdClient_IndexInfo(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2025,39 +2227,54 @@ func Test_ngtdClient_IndexInfo(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - c := &ngtdClient{ - addr: test.fields.addr, - Client: test.fields.Client, - opts: test.fields.opts, + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, } - got, err := c.IndexInfo(test.args.ctx) - if err := test.checkFunc(test.want, got, err); err != nil { + gotLocs, err := s.MultiRemove(test.args.ctx, test.args.reqs) + if err := test.checkFunc(test.want, gotLocs, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_searchRequestToNgtdSearchRequest(t *testing.T) { +func Test_server_GetObject(t *testing.T) { + t.Parallel() type args struct { - in *client.SearchRequest + ctx context.Context + id *payload.Object_ID + } + type fields struct { + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - want *proto.SearchRequest + wantVec *payload.Object_Vector + err error } type test struct { name string args args + fields fields want want - checkFunc func(want, *proto.SearchRequest) error + checkFunc func(want, *payload.Object_Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *proto.SearchRequest) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + defaultCheckFunc := func(w want, gotVec *payload.Object_Vector, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotVec, w.wantVec) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) } return nil } @@ -2067,7 +2284,15 @@ func Test_searchRequestToNgtdSearchRequest(t *testing.T) { { name: "test_case_1", args: args { - in: nil, + ctx: nil, + id: nil, + }, + fields: fields { + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -2080,7 +2305,15 @@ func Test_searchRequestToNgtdSearchRequest(t *testing.T) { return test { name: "test_case_2", args: args { - in: nil, + ctx: nil, + id: nil, + }, + fields: fields { + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -2089,9 +2322,11 @@ func Test_searchRequestToNgtdSearchRequest(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2101,34 +2336,49 @@ func Test_searchRequestToNgtdSearchRequest(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, + } - got := searchRequestToNgtdSearchRequest(test.args.in) - if err := test.checkFunc(test.want, got); err != nil { + gotVec, err := s.GetObject(test.args.ctx, test.args.id) + if err := test.checkFunc(test.want, gotVec, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_searchIDRequestToNgtdSearchRequest(t *testing.T) { +func Test_server_StreamGetObject(t *testing.T) { + t.Parallel() type args struct { - in *client.SearchIDRequest + stream vald.Object_StreamGetObjectServer + } + type fields struct { + eg errgroup.Group + metadata service.Meta + gateway client.Client + copts []grpc.CallOption + streamConcurrency int } type want struct { - want *proto.SearchRequest + err error } type test struct { name string args args + fields fields want want - checkFunc func(want, *proto.SearchRequest) error + checkFunc func(want, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *proto.SearchRequest) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } return nil } @@ -2138,7 +2388,14 @@ func Test_searchIDRequestToNgtdSearchRequest(t *testing.T) { { name: "test_case_1", args: args { - in: nil, + stream: nil, + }, + fields: fields { + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -2151,7 +2408,14 @@ func Test_searchIDRequestToNgtdSearchRequest(t *testing.T) { return test { name: "test_case_2", args: args { - in: nil, + stream: nil, + }, + fields: fields { + eg: nil, + metadata: nil, + gateway: nil, + copts: nil, + streamConcurrency: 0, }, want: want{}, checkFunc: defaultCheckFunc, @@ -2160,9 +2424,11 @@ func Test_searchIDRequestToNgtdSearchRequest(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2172,584 +2438,18 @@ func Test_searchIDRequestToNgtdSearchRequest(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } + s := &server{ + eg: test.fields.eg, + metadata: test.fields.metadata, + gateway: test.fields.gateway, + copts: test.fields.copts, + streamConcurrency: test.fields.streamConcurrency, + } - got := searchIDRequestToNgtdSearchRequest(test.args.in) - if err := test.checkFunc(test.want, got); err != nil { + err := s.StreamGetObject(test.args.stream) + if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - - }) - } -} - -func Test_ngtdSearchResponseToSearchResponse(t *testing.T) { - type args struct { - in *proto.SearchResponse - } - type want struct { - want *client.SearchResponse - } - type test struct { - name string - args args - want want - checkFunc func(want, *client.SearchResponse) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got *client.SearchResponse) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := ngtdSearchResponseToSearchResponse(test.args.in) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_ngtdGetObjectResponseToObjectVector(t *testing.T) { - type args struct { - in *proto.GetObjectResponse - } - type want struct { - want *client.ObjectVector - } - type test struct { - name string - args args - want want - checkFunc func(want, *client.ObjectVector) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got *client.ObjectVector) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := ngtdGetObjectResponseToObjectVector(test.args.in) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_objectVectorToNGTDInsertRequest(t *testing.T) { - type args struct { - in *client.ObjectVector - } - type want struct { - want *proto.InsertRequest - } - type test struct { - name string - args args - want want - checkFunc func(want, *proto.InsertRequest) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got *proto.InsertRequest) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := objectVectorToNGTDInsertRequest(test.args.in) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_objectIDToNGTDRemoveRequest(t *testing.T) { - type args struct { - in *client.ObjectID - } - type want struct { - want *proto.RemoveRequest - } - type test struct { - name string - args args - want want - checkFunc func(want, *proto.RemoveRequest) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got *proto.RemoveRequest) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := objectIDToNGTDRemoveRequest(test.args.in) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_objectIDToNGTDGetObjectRequest(t *testing.T) { - type args struct { - in *client.ObjectID - } - type want struct { - want *proto.GetObjectRequest - } - type test struct { - name string - args args - want want - checkFunc func(want, *proto.GetObjectRequest) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got *proto.GetObjectRequest) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := objectIDToNGTDGetObjectRequest(test.args.in) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_controlCreateIndexRequestToCreateIndexRequest(t *testing.T) { - type args struct { - in *client.ControlCreateIndexRequest - } - type want struct { - want *proto.CreateIndexRequest - } - type test struct { - name string - args args - want want - checkFunc func(want, *proto.CreateIndexRequest) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got *proto.CreateIndexRequest) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := controlCreateIndexRequestToCreateIndexRequest(test.args.in) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_getSizeAndEpsilon(t *testing.T) { - type args struct { - cfg *client.SearchConfig - } - type want struct { - wantSize int32 - wantEpsilon float32 - } - type test struct { - name string - args args - want want - checkFunc func(want, int32, float32) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, gotSize int32, gotEpsilon float32) error { - if !reflect.DeepEqual(gotSize, w.wantSize) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotSize, w.wantSize) - } - if !reflect.DeepEqual(gotEpsilon, w.wantEpsilon) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotEpsilon, w.wantEpsilon) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - cfg: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - cfg: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - gotSize, gotEpsilon := getSizeAndEpsilon(test.args.cfg) - if err := test.checkFunc(test.want, gotSize, gotEpsilon); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_tofloat64(t *testing.T) { - type args struct { - in []float32 - } - type want struct { - wantOut []float64 - } - type test struct { - name string - args args - want want - checkFunc func(want, []float64) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, gotOut []float64) error { - if !reflect.DeepEqual(gotOut, w.wantOut) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOut, w.wantOut) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - in: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - gotOut := tofloat64(test.args.in) - if err := test.checkFunc(test.want, gotOut); err != nil { - tt.Errorf("error = %v", err) - } - }) } } diff --git a/pkg/gateway/meta/handler/grpc/option.go b/pkg/gateway/meta/handler/grpc/option.go new file mode 100644 index 0000000000..27738fcde5 --- /dev/null +++ b/pkg/gateway/meta/handler/grpc/option.go @@ -0,0 +1,63 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package grpc provides grpc server logic +package grpc + +import ( + "github.com/vdaas/vald/internal/client/v1/client/vald" + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/pkg/gateway/meta/service" +) + +type Option func(*server) + +var defaultOpts = []Option{ + WithErrGroup(errgroup.Get()), + WithStreamConcurrency(20), +} + +func WithValdClient(g vald.Client) Option { + return func(s *server) { + if g != nil { + s.gateway = g + } + } +} + +func WithMeta(m service.Meta) Option { + return func(s *server) { + if m != nil { + s.metadata = m + } + } +} + +func WithErrGroup(eg errgroup.Group) Option { + return func(s *server) { + if eg != nil { + s.eg = eg + } + } +} + +func WithStreamConcurrency(c int) Option { + return func(s *server) { + if c != 0 { + s.streamConcurrency = c + } + } +} diff --git a/pkg/gateway/meta/handler/grpc/option_test.go b/pkg/gateway/meta/handler/grpc/option_test.go new file mode 100644 index 0000000000..d44faf1fce --- /dev/null +++ b/pkg/gateway/meta/handler/grpc/option_test.go @@ -0,0 +1,495 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package grpc provides grpc server logic +package grpc + +import ( + "testing" + + "github.com/vdaas/vald/internal/client/v1/client/vald" + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/pkg/gateway/meta/service" + "go.uber.org/goleak" +) + +func TestWithValdClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + g vald.Client + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + g: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + g: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithValdClient(test.args.g) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithValdClient(test.args.g) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithMeta(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + m service.Meta + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + m: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + m: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithMeta(test.args.m) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithMeta(test.args.m) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + eg errgroup.Group + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + eg: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + eg: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithErrGroup(test.args.eg) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithErrGroup(test.args.eg) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithStreamConcurrency(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + c int + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + c: 0, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + c: 0, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithStreamConcurrency(test.args.c) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithStreamConcurrency(test.args.c) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/gateway/meta/handler/rest/handler.go b/pkg/gateway/meta/handler/rest/handler.go new file mode 100644 index 0000000000..59032d8e1f --- /dev/null +++ b/pkg/gateway/meta/handler/rest/handler.go @@ -0,0 +1,163 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package rest provides rest api logic +package rest + +import ( + "net/http" + + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/net/http/dump" + "github.com/vdaas/vald/internal/net/http/json" +) + +type Handler interface { + Index(w http.ResponseWriter, r *http.Request) (int, error) + Exists(w http.ResponseWriter, r *http.Request) (int, error) + Search(w http.ResponseWriter, r *http.Request) (int, error) + SearchByID(w http.ResponseWriter, r *http.Request) (int, error) + MultiSearch(w http.ResponseWriter, r *http.Request) (int, error) + MultiSearchByID(w http.ResponseWriter, r *http.Request) (int, error) + Insert(w http.ResponseWriter, r *http.Request) (int, error) + MultiInsert(w http.ResponseWriter, r *http.Request) (int, error) + Update(w http.ResponseWriter, r *http.Request) (int, error) + MultiUpdate(w http.ResponseWriter, r *http.Request) (int, error) + Upsert(w http.ResponseWriter, r *http.Request) (int, error) + MultiUpsert(w http.ResponseWriter, r *http.Request) (int, error) + Remove(w http.ResponseWriter, r *http.Request) (int, error) + MultiRemove(w http.ResponseWriter, r *http.Request) (int, error) + GetObject(w http.ResponseWriter, r *http.Request) (int, error) +} + +type handler struct { + vald vald.Server +} + +func New(opts ...Option) Handler { + h := new(handler) + + for _, opt := range append(defaultOpts, opts...) { + opt(h) + } + return h +} + +func (h *handler) Index(w http.ResponseWriter, r *http.Request) (int, error) { + data := make(map[string]interface{}) + return json.Handler(w, r, &data, func() (interface{}, error) { + return dump.Request(nil, data, r) + }) +} + +func (h *handler) Search(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Search_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Search(r.Context(), req) + }) +} + +func (h *handler) SearchByID(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Search_IDRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.SearchByID(r.Context(), req) + }) +} + +func (h *handler) MultiSearch(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Search_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiSearch(r.Context(), req) + }) +} + +func (h *handler) MultiSearchByID(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Search_MultiIDRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiSearchByID(r.Context(), req) + }) +} + +func (h *handler) Insert(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Insert_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Insert(r.Context(), req) + }) +} + +func (h *handler) MultiInsert(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Insert_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiInsert(r.Context(), req) + }) +} + +func (h *handler) Update(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Update_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Update(r.Context(), req) + }) +} + +func (h *handler) MultiUpdate(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Update_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiUpdate(r.Context(), req) + }) +} + +func (h *handler) Upsert(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Upsert_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Upsert(r.Context(), req) + }) +} + +func (h *handler) MultiUpsert(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Upsert_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiUpsert(r.Context(), req) + }) +} + +func (h *handler) Remove(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Remove_Request + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Remove(r.Context(), req) + }) +} + +func (h *handler) MultiRemove(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Remove_MultiRequest + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.MultiRemove(r.Context(), req) + }) +} + +func (h *handler) GetObject(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Object_ID + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.GetObject(r.Context(), req) + }) +} + +func (h *handler) Exists(w http.ResponseWriter, r *http.Request) (code int, err error) { + var req *payload.Object_ID + return json.Handler(w, r, &req, func() (interface{}, error) { + return h.vald.Exists(r.Context(), req) + }) +} diff --git a/pkg/gateway/meta/handler/rest/handler_test.go b/pkg/gateway/meta/handler/rest/handler_test.go new file mode 100644 index 0000000000..2987f032b3 --- /dev/null +++ b/pkg/gateway/meta/handler/rest/handler_test.go @@ -0,0 +1,1496 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package rest provides rest api logic +package rest + +import ( + "net/http" + "reflect" + "testing" + + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type args struct { + opts []Option + } + type want struct { + want Handler + } + type test struct { + name string + args args + want want + checkFunc func(want, Handler) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got Handler) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := New(test.args.opts...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_Index(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + want int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + got, err := h.Index(test.args.w, test.args.r) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_Search(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + wantCode int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCode int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + gotCode, err := h.Search(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_SearchByID(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + wantCode int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCode int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + gotCode, err := h.SearchByID(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_MultiSearch(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + wantCode int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCode int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + gotCode, err := h.MultiSearch(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_MultiSearchByID(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + wantCode int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCode int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + gotCode, err := h.MultiSearchByID(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_Insert(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + wantCode int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCode int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + gotCode, err := h.Insert(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_MultiInsert(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + wantCode int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCode int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + gotCode, err := h.MultiInsert(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_Update(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + wantCode int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCode int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + gotCode, err := h.Update(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_MultiUpdate(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + wantCode int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCode int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + gotCode, err := h.MultiUpdate(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_Upsert(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + wantCode int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCode int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + gotCode, err := h.Upsert(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_MultiUpsert(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + wantCode int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCode int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + gotCode, err := h.MultiUpsert(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_Remove(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + wantCode int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCode int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + gotCode, err := h.Remove(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_MultiRemove(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + wantCode int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCode int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + gotCode, err := h.MultiRemove(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_GetObject(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + wantCode int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCode int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + gotCode, err := h.GetObject(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_handler_Exists(t *testing.T) { + t.Parallel() + type args struct { + w http.ResponseWriter + r *http.Request + } + type fields struct { + vald vald.Server + } + type want struct { + wantCode int + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, int, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotCode int, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotCode, w.wantCode) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCode, w.wantCode) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + w: nil, + r: nil, + }, + fields: fields { + vald: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + h := &handler{ + vald: test.fields.vald, + } + + gotCode, err := h.Exists(test.args.w, test.args.r) + if err := test.checkFunc(test.want, gotCode, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/pkg/gateway/meta/handler/rest/option.go b/pkg/gateway/meta/handler/rest/option.go new file mode 100644 index 0000000000..478a365a77 --- /dev/null +++ b/pkg/gateway/meta/handler/rest/option.go @@ -0,0 +1,30 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package rest provides rest api logic +package rest + +import "github.com/vdaas/vald/apis/grpc/v1/vald" + +type Option func(*handler) + +var defaultOpts = []Option{} + +func WithVald(v vald.Server) Option { + return func(h *handler) { + h.vald = v + } +} diff --git a/pkg/gateway/meta/handler/rest/option_test.go b/pkg/gateway/meta/handler/rest/option_test.go new file mode 100644 index 0000000000..5efca60aa7 --- /dev/null +++ b/pkg/gateway/meta/handler/rest/option_test.go @@ -0,0 +1,142 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package rest provides rest api logic +package rest + +import ( + "testing" + + "github.com/vdaas/vald/apis/grpc/v1/vald" + "go.uber.org/goleak" +) + +func TestWithVald(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + v vald.Server + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + v: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + v: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithVald(test.args.v) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithVald(test.args.v) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/gateway/meta/router/option.go b/pkg/gateway/meta/router/option.go new file mode 100644 index 0000000000..13cc90a816 --- /dev/null +++ b/pkg/gateway/meta/router/option.go @@ -0,0 +1,40 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package router provides implementation of Go API for routing http Handler wrapped by rest.Func +package router + +import ( + "github.com/vdaas/vald/pkg/gateway/meta/handler/rest" +) + +type Option func(*router) + +var defaultOpts = []Option{ + WithTimeout("3s"), +} + +func WithHandler(h rest.Handler) Option { + return func(r *router) { + r.handler = h + } +} + +func WithTimeout(timeout string) Option { + return func(r *router) { + r.timeout = timeout + } +} diff --git a/pkg/gateway/meta/router/option_test.go b/pkg/gateway/meta/router/option_test.go new file mode 100644 index 0000000000..fbb3e2959a --- /dev/null +++ b/pkg/gateway/meta/router/option_test.go @@ -0,0 +1,259 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package router provides implementation of Go API for routing http Handler wrapped by rest.Func +package router + +import ( + "testing" + + "github.com/vdaas/vald/pkg/gateway/meta/handler/rest" + "go.uber.org/goleak" +) + +func TestWithHandler(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + h rest.Handler + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + h: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + h: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithHandler(test.args.h) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithHandler(test.args.h) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + timeout string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + timeout: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + timeout: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithTimeout(test.args.timeout) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithTimeout(test.args.timeout) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/gateway/meta/router/router.go b/pkg/gateway/meta/router/router.go new file mode 100644 index 0000000000..28f27cb9c6 --- /dev/null +++ b/pkg/gateway/meta/router/router.go @@ -0,0 +1,167 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package router provides implementation of Go API for routing http Handler wrapped by rest.Func +package router + +import ( + "net/http" + + "github.com/vdaas/vald/internal/net/http/routing" + "github.com/vdaas/vald/pkg/gateway/meta/handler/rest" +) + +type router struct { + handler rest.Handler + timeout string +} + +// New returns REST route&method information from handler interface. +func New(opts ...Option) http.Handler { + r := new(router) + + for _, opt := range append(defaultOpts, opts...) { + opt(r) + } + + h := r.handler + + return routing.New( + routing.WithRoutes([]routing.Route{ + { + "Index", + []string{ + http.MethodGet, + }, + "/", + h.Index, + }, + { + "Search", + []string{ + http.MethodPost, + }, + "/search", + h.Search, + }, + { + "Search By ID", + []string{ + http.MethodGet, + }, + "/search/{id}", + h.SearchByID, + }, + + { + "Multi Search", + []string{ + http.MethodPost, + }, + "/search/multi", + h.MultiSearch, + }, + { + "Multi Search By ID", + []string{ + http.MethodGet, + }, + "/search/multi/{id}", + h.MultiSearchByID, + }, + { + "Insert", + []string{ + http.MethodPost, + }, + "/insert", + h.Insert, + }, + { + "Multiple Insert", + []string{ + http.MethodPost, + }, + "/insert/multi", + h.MultiInsert, + }, + { + "Update", + []string{ + http.MethodPost, + http.MethodPatch, + http.MethodPut, + }, + "/update", + h.Update, + }, + { + "Multiple Update", + []string{ + http.MethodPost, + http.MethodPatch, + http.MethodPut, + }, + "/update/multi", + h.MultiUpdate, + }, + { + "Upsert", + []string{ + http.MethodPost, + http.MethodPatch, + http.MethodPut, + }, + "/upsert", + h.Upsert, + }, + { + "Multiple Upsert", + []string{ + http.MethodPost, + http.MethodPatch, + http.MethodPut, + }, + "/upsert/multi", + h.MultiUpsert, + }, + { + "Remove", + []string{ + http.MethodDelete, + }, + "/delete/{id}", + h.Remove, + }, + { + "Multiple Remove", + []string{ + http.MethodDelete, + http.MethodPost, + }, + "/delete/multi", + h.MultiRemove, + }, + { + "GetObject", + []string{ + http.MethodGet, + }, + "/object/{id}", + h.GetObject, + }, + }...)) +} diff --git a/pkg/gateway/meta/router/router_test.go b/pkg/gateway/meta/router/router_test.go new file mode 100644 index 0000000000..97248787ae --- /dev/null +++ b/pkg/gateway/meta/router/router_test.go @@ -0,0 +1,100 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package router provides implementation of Go API for routing http Handler wrapped by rest.Func +package router + +import ( + "net/http" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type args struct { + opts []Option + } + type want struct { + want http.Handler + } + type test struct { + name string + args args + want want + checkFunc func(want, http.Handler) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got http.Handler) error { + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := New(test.args.opts...) + if err := test.checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/pkg/gateway/meta/service/doc.go b/pkg/gateway/meta/service/doc.go new file mode 100644 index 0000000000..c13956cbbe --- /dev/null +++ b/pkg/gateway/meta/service/doc.go @@ -0,0 +1,18 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package service manages the main logic of server. +package service diff --git a/pkg/gateway/meta/service/meta.go b/pkg/gateway/meta/service/meta.go new file mode 100644 index 0000000000..d158c10507 --- /dev/null +++ b/pkg/gateway/meta/service/meta.go @@ -0,0 +1,492 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package service provides meta service +package service + +import ( + "context" + "reflect" + + gmeta "github.com/vdaas/vald/apis/grpc/v1/meta" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/internal/cache" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/internal/net/grpc/status" + "github.com/vdaas/vald/internal/observability/trace" +) + +type Meta interface { + Start(ctx context.Context) (<-chan error, error) + Exists(context.Context, string) (bool, error) + GetMeta(context.Context, string) (string, error) + GetMetas(context.Context, ...string) ([]string, error) + GetUUID(context.Context, string) (string, error) + GetUUIDs(context.Context, ...string) ([]string, error) + SetUUIDandMeta(context.Context, string, string) error + SetUUIDandMetas(context.Context, map[string]string) error + DeleteMeta(context.Context, string) (string, error) + DeleteMetas(context.Context, ...string) ([]string, error) + DeleteUUID(context.Context, string) (string, error) + DeleteUUIDs(context.Context, ...string) ([]string, error) +} + +type meta struct { + addr string + client grpc.Client + cache cache.Cache + enableCache bool + expireCheckDuration string + expireDuration string +} + +const ( + apiName = "vald/gateway-meta" + + uuidCacheKeyPref = "uuid-" + metaCacheKeyPref = "meta-" +) + +func New(opts ...Option) (mi Meta, err error) { + m := new(meta) + for _, opt := range append(defaultOpts, opts...) { + if err = opt(m); err != nil { + return nil, errors.ErrOptionFailed(err, reflect.ValueOf(opt)) + } + } + if m.enableCache { + if m.cache == nil { + m.cache, err = cache.New( + cache.WithExpireDuration(m.expireDuration), + cache.WithExpireCheckDuration(m.expireCheckDuration), + ) + if err != nil { + return nil, err + } + } + } + + return m, nil +} + +func (m *meta) Start(ctx context.Context) (<-chan error, error) { + if m.enableCache && m.cache != nil { + m.cache.Start(ctx) + } + return m.client.StartConnectionMonitor(ctx) +} + +func (m *meta) Exists(ctx context.Context, meta string) (bool, error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/Meta.Exists") + defer func() { + if span != nil { + span.End() + } + }() + + if m.enableCache { + _, ok := m.cache.Get(uuidCacheKeyPref + meta) + if ok { + return true, nil + } + } + key, err := m.client.Do(ctx, m.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + key, err := gmeta.NewMetaClient(conn).GetMetaInverse(ctx, &payload.Meta_Val{ + Val: meta, + }, copts...) + if err != nil { + if status.Code(err) == status.NotFound { + return "", nil + } + return nil, err + } + return key.GetKey(), nil + }) + if err != nil { + return false, err + } + + k := key.(string) + if k == "" { + return false, nil + } + + if m.enableCache { + m.cache.Set(uuidCacheKeyPref+meta, k) + m.cache.Set(metaCacheKeyPref+k, meta) + } + return true, nil +} + +func (m *meta) GetMeta(ctx context.Context, uuid string) (v string, err error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/Meta.GetMeta") + defer func() { + if span != nil { + span.End() + } + }() + + if m.enableCache { + data, ok := m.cache.Get(metaCacheKeyPref + uuid) + if ok { + return data.(string), nil + } + } + val, err := m.client.Do(ctx, m.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + val, err := gmeta.NewMetaClient(conn).GetMeta(ctx, &payload.Meta_Key{ + Key: uuid, + }, copts...) + if err != nil { + return nil, err + } + return val.GetVal(), nil + }) + if err != nil { + return "", err + } + v = val.(string) + + if m.enableCache { + m.cache.Set(metaCacheKeyPref+uuid, v) + m.cache.Set(uuidCacheKeyPref+v, uuid) + } + return v, nil +} + +func (m *meta) GetMetas(ctx context.Context, uuids ...string) ([]string, error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/Meta.GetMetas") + defer func() { + if span != nil { + span.End() + } + }() + + if m.enableCache { + metas, ok := func() (metas []string, ok bool) { + for _, uuid := range uuids { + data, ok := m.cache.Get(metaCacheKeyPref + uuid) + if !ok { + return nil, false + } + metas = append(metas, data.(string)) + } + return metas, true + }() + if ok { + return metas, nil + } + } + vals, err := m.client.Do(ctx, m.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + vals, err := gmeta.NewMetaClient(conn).GetMetas(ctx, &payload.Meta_Keys{ + Keys: uuids, + }, copts...) + if vals != nil { + return vals.GetVals(), err + } + return nil, err + }) + if vals != nil { + vs, ok := vals.([]string) + if ok { + if m.enableCache { + for i, v := range vs { + uuid := uuids[i] + m.cache.Set(metaCacheKeyPref+uuid, v) + m.cache.Set(uuidCacheKeyPref+v, uuid) + } + } + return vs, err + } + } + return nil, err +} + +func (m *meta) GetUUID(ctx context.Context, meta string) (k string, err error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/Meta.GetUUID") + defer func() { + if span != nil { + span.End() + } + }() + + if m.enableCache { + data, ok := m.cache.Get(uuidCacheKeyPref + meta) + if ok { + return data.(string), nil + } + } + key, err := m.client.Do(ctx, m.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + key, err := gmeta.NewMetaClient(conn).GetMetaInverse(ctx, &payload.Meta_Val{ + Val: meta, + }, copts...) + if err != nil { + return nil, err + } + return key.GetKey(), nil + }) + if err != nil { + return "", err + } + + k = key.(string) + if m.enableCache { + m.cache.Set(uuidCacheKeyPref+meta, k) + m.cache.Set(metaCacheKeyPref+k, meta) + } + return k, nil +} + +func (m *meta) GetUUIDs(ctx context.Context, metas ...string) ([]string, error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/Meta.GetUUIDs") + defer func() { + if span != nil { + span.End() + } + }() + + if m.enableCache { + uuids, ok := func() (uuids []string, ok bool) { + for _, meta := range metas { + data, ok := m.cache.Get(uuidCacheKeyPref + meta) + if !ok { + return nil, false + } + uuids = append(uuids, data.(string)) + } + return uuids, true + }() + if ok { + return uuids, nil + } + } + keys, err := m.client.Do(ctx, m.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + keys, err := gmeta.NewMetaClient(conn).GetMetasInverse(ctx, &payload.Meta_Vals{ + Vals: metas, + }, copts...) + if keys != nil { + return keys.GetKeys(), err + } + return nil, err + }) + if keys != nil { + ks, ok := keys.([]string) + if ok { + if m.enableCache { + for i, k := range ks { + meta := metas[i] + m.cache.Set(uuidCacheKeyPref+meta, k) + m.cache.Set(metaCacheKeyPref+k, meta) + } + } + return ks, err + } + } + return nil, err +} + +func (m *meta) SetUUIDandMeta(ctx context.Context, uuid, meta string) (err error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/Meta.SetUUIDandMeta") + defer func() { + if span != nil { + span.End() + } + }() + + _, err = m.client.Do(ctx, m.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + _, err := gmeta.NewMetaClient(conn).SetMeta(ctx, &payload.Meta_KeyVal{ + Key: uuid, + Val: meta, + }, copts...) + + return nil, err + }) + if err != nil { + return err + } + + if m.enableCache { + m.cache.Set(uuidCacheKeyPref+meta, uuid) + m.cache.Set(metaCacheKeyPref+uuid, meta) + } + return nil +} + +func (m *meta) SetUUIDandMetas(ctx context.Context, kvs map[string]string) (err error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/Meta.SetUUIDandMetas") + defer func() { + if span != nil { + span.End() + } + }() + + data := make([]*payload.Meta_KeyVal, 0, len(kvs)) + for uuid, meta := range kvs { + data = append(data, &payload.Meta_KeyVal{ + Key: uuid, + Val: meta, + }) + } + _, err = m.client.Do(ctx, m.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + _, err := gmeta.NewMetaClient(conn).SetMetas(ctx, &payload.Meta_KeyVals{ + Kvs: data, + }, copts...) + + return nil, err + }) + if err != nil { + return err + } + + if m.enableCache { + for uuid, meta := range kvs { + m.cache.Set(uuidCacheKeyPref+meta, uuid) + m.cache.Set(metaCacheKeyPref+uuid, meta) + } + } + return nil +} + +func (m *meta) DeleteMeta(ctx context.Context, uuid string) (v string, err error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/Meta.DeleteMeta") + defer func() { + if span != nil { + span.End() + } + }() + + if m.enableCache { + meta, ok := m.cache.GetAndDelete(metaCacheKeyPref + uuid) + if ok { + m.cache.Delete(uuidCacheKeyPref + meta.(string)) + } + } + val, err := m.client.Do(ctx, m.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + val, err := gmeta.NewMetaClient(conn).DeleteMeta(ctx, &payload.Meta_Key{ + Key: uuid, + }, copts...) + if err != nil { + return nil, err + } + return val.GetVal(), nil + }) + if err != nil { + return "", err + } + return val.(string), nil +} + +func (m *meta) DeleteMetas(ctx context.Context, uuids ...string) ([]string, error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/Meta.DeleteMetas") + defer func() { + if span != nil { + span.End() + } + }() + + if m.enableCache { + for _, uuid := range uuids { + meta, ok := m.cache.GetAndDelete(metaCacheKeyPref + uuid) + if ok { + m.cache.Delete(uuidCacheKeyPref + meta.(string)) + } + } + } + vals, err := m.client.Do(ctx, m.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + vals, err := gmeta.NewMetaClient(conn).DeleteMetas(ctx, &payload.Meta_Keys{ + Keys: uuids, + }, copts...) + if err != nil { + return nil, err + } + return vals.GetVals(), nil + }) + if err != nil { + return nil, err + } + return vals.([]string), nil +} + +func (m *meta) DeleteUUID(ctx context.Context, meta string) (string, error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/Meta.DeleteUUID") + defer func() { + if span != nil { + span.End() + } + }() + + if m.enableCache { + uuid, ok := m.cache.GetAndDelete(uuidCacheKeyPref + meta) + if ok { + m.cache.Delete(metaCacheKeyPref + uuid.(string)) + } + } + key, err := m.client.Do(ctx, m.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + key, err := gmeta.NewMetaClient(conn).DeleteMetaInverse(ctx, &payload.Meta_Val{ + Val: meta, + }, copts...) + if err != nil { + return nil, err + } + return key.GetKey(), nil + }) + if err != nil { + return "", err + } + return key.(string), nil +} + +func (m *meta) DeleteUUIDs(ctx context.Context, metas ...string) ([]string, error) { + ctx, span := trace.StartSpan(ctx, apiName+"/service/Meta.DeleteUUIDs") + defer func() { + if span != nil { + span.End() + } + }() + + if m.enableCache { + for _, meta := range metas { + uuid, ok := m.cache.GetAndDelete(uuidCacheKeyPref + meta) + if ok { + m.cache.Delete(metaCacheKeyPref + uuid.(string)) + } + } + } + keys, err := m.client.Do(ctx, m.addr, func(ctx context.Context, + conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { + keys, err := gmeta.NewMetaClient(conn).DeleteMetasInverse(ctx, &payload.Meta_Vals{ + Vals: metas, + }, copts...) + if err != nil { + return nil, err + } + return keys.GetKeys(), nil + }) + if err != nil { + return nil, err + } + return keys.([]string), nil +} diff --git a/pkg/gateway/meta/service/meta_test.go b/pkg/gateway/meta/service/meta_test.go new file mode 100644 index 0000000000..1d12956543 --- /dev/null +++ b/pkg/gateway/meta/service/meta_test.go @@ -0,0 +1,1454 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package service provides meta service +package service + +import ( + "context" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/cache" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/net/grpc" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type args struct { + opts []Option + } + type want struct { + wantMi Meta + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, Meta, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotMi Meta, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotMi, w.wantMi) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotMi, w.wantMi) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + opts: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + gotMi, err := New(test.args.opts...) + if err := test.checkFunc(test.want, gotMi, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_meta_Start(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + addr string + client grpc.Client + cache cache.Cache + enableCache bool + expireCheckDuration string + expireDuration string + } + type want struct { + want <-chan error + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, <-chan error, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got <-chan error, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &meta{ + addr: test.fields.addr, + client: test.fields.client, + cache: test.fields.cache, + enableCache: test.fields.enableCache, + expireCheckDuration: test.fields.expireCheckDuration, + expireDuration: test.fields.expireDuration, + } + + got, err := m.Start(test.args.ctx) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_meta_Exists(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + meta string + } + type fields struct { + addr string + client grpc.Client + cache cache.Cache + enableCache bool + expireCheckDuration string + expireDuration string + } + type want struct { + want bool + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, bool, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got bool, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + meta: "", + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + meta: "", + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &meta{ + addr: test.fields.addr, + client: test.fields.client, + cache: test.fields.cache, + enableCache: test.fields.enableCache, + expireCheckDuration: test.fields.expireCheckDuration, + expireDuration: test.fields.expireDuration, + } + + got, err := m.Exists(test.args.ctx, test.args.meta) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_meta_GetMeta(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + uuid string + } + type fields struct { + addr string + client grpc.Client + cache cache.Cache + enableCache bool + expireCheckDuration string + expireDuration string + } + type want struct { + wantV string + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, string, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotV string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotV, w.wantV) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotV, w.wantV) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + uuid: "", + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + uuid: "", + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &meta{ + addr: test.fields.addr, + client: test.fields.client, + cache: test.fields.cache, + enableCache: test.fields.enableCache, + expireCheckDuration: test.fields.expireCheckDuration, + expireDuration: test.fields.expireDuration, + } + + gotV, err := m.GetMeta(test.args.ctx, test.args.uuid) + if err := test.checkFunc(test.want, gotV, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_meta_GetMetas(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + uuids []string + } + type fields struct { + addr string + client grpc.Client + cache cache.Cache + enableCache bool + expireCheckDuration string + expireDuration string + } + type want struct { + want []string + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, []string, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got []string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + uuids: nil, + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + uuids: nil, + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &meta{ + addr: test.fields.addr, + client: test.fields.client, + cache: test.fields.cache, + enableCache: test.fields.enableCache, + expireCheckDuration: test.fields.expireCheckDuration, + expireDuration: test.fields.expireDuration, + } + + got, err := m.GetMetas(test.args.ctx, test.args.uuids...) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_meta_GetUUID(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + meta string + } + type fields struct { + addr string + client grpc.Client + cache cache.Cache + enableCache bool + expireCheckDuration string + expireDuration string + } + type want struct { + wantK string + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, string, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotK string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotK, w.wantK) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotK, w.wantK) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + meta: "", + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + meta: "", + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &meta{ + addr: test.fields.addr, + client: test.fields.client, + cache: test.fields.cache, + enableCache: test.fields.enableCache, + expireCheckDuration: test.fields.expireCheckDuration, + expireDuration: test.fields.expireDuration, + } + + gotK, err := m.GetUUID(test.args.ctx, test.args.meta) + if err := test.checkFunc(test.want, gotK, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_meta_GetUUIDs(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + metas []string + } + type fields struct { + addr string + client grpc.Client + cache cache.Cache + enableCache bool + expireCheckDuration string + expireDuration string + } + type want struct { + want []string + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, []string, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got []string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + metas: nil, + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + metas: nil, + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &meta{ + addr: test.fields.addr, + client: test.fields.client, + cache: test.fields.cache, + enableCache: test.fields.enableCache, + expireCheckDuration: test.fields.expireCheckDuration, + expireDuration: test.fields.expireDuration, + } + + got, err := m.GetUUIDs(test.args.ctx, test.args.metas...) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_meta_SetUUIDandMeta(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + uuid string + meta string + } + type fields struct { + addr string + client grpc.Client + cache cache.Cache + enableCache bool + expireCheckDuration string + expireDuration string + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + uuid: "", + meta: "", + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + uuid: "", + meta: "", + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &meta{ + addr: test.fields.addr, + client: test.fields.client, + cache: test.fields.cache, + enableCache: test.fields.enableCache, + expireCheckDuration: test.fields.expireCheckDuration, + expireDuration: test.fields.expireDuration, + } + + err := m.SetUUIDandMeta(test.args.ctx, test.args.uuid, test.args.meta) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_meta_SetUUIDandMetas(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + kvs map[string]string + } + type fields struct { + addr string + client grpc.Client + cache cache.Cache + enableCache bool + expireCheckDuration string + expireDuration string + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + kvs: nil, + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + kvs: nil, + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &meta{ + addr: test.fields.addr, + client: test.fields.client, + cache: test.fields.cache, + enableCache: test.fields.enableCache, + expireCheckDuration: test.fields.expireCheckDuration, + expireDuration: test.fields.expireDuration, + } + + err := m.SetUUIDandMetas(test.args.ctx, test.args.kvs) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_meta_DeleteMeta(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + uuid string + } + type fields struct { + addr string + client grpc.Client + cache cache.Cache + enableCache bool + expireCheckDuration string + expireDuration string + } + type want struct { + wantV string + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, string, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotV string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotV, w.wantV) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotV, w.wantV) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + uuid: "", + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + uuid: "", + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &meta{ + addr: test.fields.addr, + client: test.fields.client, + cache: test.fields.cache, + enableCache: test.fields.enableCache, + expireCheckDuration: test.fields.expireCheckDuration, + expireDuration: test.fields.expireDuration, + } + + gotV, err := m.DeleteMeta(test.args.ctx, test.args.uuid) + if err := test.checkFunc(test.want, gotV, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_meta_DeleteMetas(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + uuids []string + } + type fields struct { + addr string + client grpc.Client + cache cache.Cache + enableCache bool + expireCheckDuration string + expireDuration string + } + type want struct { + want []string + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, []string, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got []string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + uuids: nil, + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + uuids: nil, + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &meta{ + addr: test.fields.addr, + client: test.fields.client, + cache: test.fields.cache, + enableCache: test.fields.enableCache, + expireCheckDuration: test.fields.expireCheckDuration, + expireDuration: test.fields.expireDuration, + } + + got, err := m.DeleteMetas(test.args.ctx, test.args.uuids...) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_meta_DeleteUUID(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + meta string + } + type fields struct { + addr string + client grpc.Client + cache cache.Cache + enableCache bool + expireCheckDuration string + expireDuration string + } + type want struct { + want string + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, string, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + meta: "", + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + meta: "", + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &meta{ + addr: test.fields.addr, + client: test.fields.client, + cache: test.fields.cache, + enableCache: test.fields.enableCache, + expireCheckDuration: test.fields.expireCheckDuration, + expireDuration: test.fields.expireDuration, + } + + got, err := m.DeleteUUID(test.args.ctx, test.args.meta) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_meta_DeleteUUIDs(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + metas []string + } + type fields struct { + addr string + client grpc.Client + cache cache.Cache + enableCache bool + expireCheckDuration string + expireDuration string + } + type want struct { + want []string + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, []string, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got []string, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + metas: nil, + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + metas: nil, + }, + fields: fields { + addr: "", + client: nil, + cache: nil, + enableCache: false, + expireCheckDuration: "", + expireDuration: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + m := &meta{ + addr: test.fields.addr, + client: test.fields.client, + cache: test.fields.cache, + enableCache: test.fields.enableCache, + expireCheckDuration: test.fields.expireCheckDuration, + expireDuration: test.fields.expireDuration, + } + + got, err := m.DeleteUUIDs(test.args.ctx, test.args.metas...) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/pkg/gateway/meta/service/option.go b/pkg/gateway/meta/service/option.go new file mode 100644 index 0000000000..36c897e20e --- /dev/null +++ b/pkg/gateway/meta/service/option.go @@ -0,0 +1,95 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package service +package service + +import ( + "fmt" + + "github.com/vdaas/vald/internal/cache" + "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/internal/timeutil" +) + +type Option func(m *meta) error + +var defaultOpts = []Option{ + WithMetaCacheEnabled(true), + WithMetaCacheExpireDuration("30m"), + WithMetaCacheExpiredCheckDuration("2m"), +} + +func WithMetaAddr(addr string) Option { + return func(m *meta) error { + m.addr = addr + return nil + } +} + +func WithMetaHostPort(host string, port int) Option { + return func(m *meta) error { + m.addr = fmt.Sprintf("%s:%d", host, port) + return nil + } +} + +func WithMetaClient(client grpc.Client) Option { + return func(m *meta) error { + if client != nil { + m.client = client + } + return nil + } +} + +func WithMetaCacheEnabled(flg bool) Option { + return func(m *meta) error { + m.enableCache = flg + return nil + } +} + +func WithMetaCache(c cache.Cache) Option { + return func(m *meta) error { + if c != nil { + m.cache = c + } + return nil + } +} + +func WithMetaCacheExpireDuration(dur string) Option { + return func(m *meta) error { + _, err := timeutil.Parse(dur) + if err != nil { + return err + } + m.expireDuration = dur + return nil + } +} + +func WithMetaCacheExpiredCheckDuration(dur string) Option { + return func(m *meta) error { + _, err := timeutil.Parse(dur) + if err != nil { + return err + } + m.expireCheckDuration = dur + return nil + } +} diff --git a/pkg/gateway/meta/service/option_test.go b/pkg/gateway/meta/service/option_test.go new file mode 100644 index 0000000000..10419ed307 --- /dev/null +++ b/pkg/gateway/meta/service/option_test.go @@ -0,0 +1,848 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package service +package service + +import ( + "testing" + + "github.com/vdaas/vald/internal/cache" + "github.com/vdaas/vald/internal/net/grpc" + "go.uber.org/goleak" +) + +func TestWithMetaAddr(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + addr string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + addr: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + addr: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithMetaAddr(test.args.addr) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithMetaAddr(test.args.addr) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithMetaHostPort(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + host string + port int + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + host: "", + port: 0, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + host: "", + port: 0, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithMetaHostPort(test.args.host, test.args.port) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithMetaHostPort(test.args.host, test.args.port) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithMetaClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + client grpc.Client + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + client: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + client: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithMetaClient(test.args.client) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithMetaClient(test.args.client) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithMetaCacheEnabled(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + flg bool + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + flg: false, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + flg: false, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithMetaCacheEnabled(test.args.flg) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithMetaCacheEnabled(test.args.flg) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithMetaCache(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + c cache.Cache + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + c: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + c: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithMetaCache(test.args.c) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithMetaCache(test.args.c) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithMetaCacheExpireDuration(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + dur string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + dur: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + dur: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithMetaCacheExpireDuration(test.args.dur) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithMetaCacheExpireDuration(test.args.dur) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithMetaCacheExpiredCheckDuration(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + dur string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + dur: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + dur: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithMetaCacheExpiredCheckDuration(test.args.dur) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithMetaCacheExpiredCheckDuration(test.args.dur) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/gateway/meta/usecase/vald.go b/pkg/gateway/meta/usecase/vald.go new file mode 100644 index 0000000000..ff8e55e5dd --- /dev/null +++ b/pkg/gateway/meta/usecase/vald.go @@ -0,0 +1,211 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package usecase + +import ( + "context" + + "github.com/vdaas/vald/apis/grpc/v1/vald" + client "github.com/vdaas/vald/internal/client/v1/client/vald" + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/internal/net/grpc/metric" + "github.com/vdaas/vald/internal/observability" + "github.com/vdaas/vald/internal/runner" + "github.com/vdaas/vald/internal/safety" + "github.com/vdaas/vald/internal/servers/server" + "github.com/vdaas/vald/internal/servers/starter" + "github.com/vdaas/vald/pkg/gateway/meta/config" + handler "github.com/vdaas/vald/pkg/gateway/meta/handler/grpc" + "github.com/vdaas/vald/pkg/gateway/meta/handler/rest" + "github.com/vdaas/vald/pkg/gateway/meta/router" + "github.com/vdaas/vald/pkg/gateway/meta/service" +) + +type run struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + metadata service.Meta +} + +func New(cfg *config.Data) (r runner.Runner, err error) { + eg := errgroup.Get() + + var metadata service.Meta + + metadataClientOptions := append( + cfg.Meta.Client.Opts(), + grpc.WithErrGroup(eg), + ) + + var obs observability.Observability + if cfg.Observability.Enabled { + obs, err = observability.NewWithConfig(cfg.Observability) + if err != nil { + return nil, err + } + metadataClientOptions = append( + metadataClientOptions, + grpc.WithDialOptions( + grpc.WithStatsHandler(metric.NewClientHandler()), + ), + ) + } + + if addrs := cfg.Meta.Client.Addrs; len(addrs) == 0 { + return nil, errors.ErrInvalidMetaDataConfig + } + metadata, err = service.New( + service.WithMetaAddr(cfg.Meta.Client.Addrs[0]), + service.WithMetaClient( + grpc.New(metadataClientOptions...), + ), + service.WithMetaCacheEnabled(cfg.Meta.EnableCache), + service.WithMetaCacheExpireDuration(cfg.Meta.CacheExpiration), + service.WithMetaCacheExpiredCheckDuration(cfg.Meta.ExpiredCacheCheckDuration), + ) + if err != nil { + return nil, err + } + + if addrs := cfg.Client.Addrs; len(addrs) == 0 { + return nil, errors.ErrGRPCTargetAddrNotFound + } + + v := handler.New( + handler.WithValdClient(client.New( + client.WithAddr(cfg.Client.Addrs[0]), + client.WithClient(grpc.New(cfg.Client.Opts()...)), + )), + handler.WithMeta(metadata), + handler.WithErrGroup(eg), + handler.WithStreamConcurrency(cfg.Server.GetGRPCStreamConcurrency()), + ) + + grpcServerOptions := []server.Option{ + server.WithGRPCRegistFunc(func(srv *grpc.Server) { + vald.RegisterValdServer(srv, v) + }), + server.WithPreStopFunction(func() error { + // TODO notify another gateway and scheduler + return nil + }), + } + + if cfg.Observability.Enabled { + grpcServerOptions = append( + grpcServerOptions, + server.WithGRPCOption( + grpc.StatsHandler(metric.NewServerHandler()), + ), + ) + } + + srv, err := starter.New( + starter.WithConfig(cfg.Server), + starter.WithREST(func(sc *config.Server) []server.Option { + return []server.Option{ + server.WithHTTPHandler( + router.New( + router.WithHandler( + rest.New( + rest.WithVald(v), + ), + ), + ), + ), + } + }), + starter.WithGRPC(func(sc *config.Server) []server.Option { + return grpcServerOptions + }), + // TODO add GraphQL handler + ) + if err != nil { + return nil, err + } + + return &run{ + eg: eg, + cfg: cfg, + server: srv, + observability: obs, + metadata: metadata, + }, nil +} + +func (r *run) PreStart(ctx context.Context) error { + if r.observability != nil { + return r.observability.PreStart(ctx) + } + return nil +} + +func (r *run) Start(ctx context.Context) (<-chan error, error) { + ech := make(chan error, 6) + var mech, sech, oech <-chan error + var err error + if r.observability != nil { + oech = r.observability.Start(ctx) + } + if r.metadata != nil { + mech, err = r.metadata.Start(ctx) + if err != nil { + close(ech) + return nil, err + } + } + sech = r.server.ListenAndServe(ctx) + r.eg.Go(safety.RecoverFunc(func() (err error) { + defer close(ech) + for { + select { + case <-ctx.Done(): + return ctx.Err() + case err = <-oech: + case err = <-mech: + case err = <-sech: + } + if err != nil { + select { + case <-ctx.Done(): + return ctx.Err() + case ech <- err: + } + } + } + })) + return ech, nil +} + +func (r *run) PreStop(ctx context.Context) error { + return nil +} + +func (r *run) Stop(ctx context.Context) error { + if r.observability != nil { + r.observability.Stop(ctx) + } + return r.server.Shutdown(ctx) +} + +func (r *run) PostStop(ctx context.Context) error { + return nil +} diff --git a/pkg/gateway/meta/usecase/vald_test.go b/pkg/gateway/meta/usecase/vald_test.go new file mode 100644 index 0000000000..97a0edb90f --- /dev/null +++ b/pkg/gateway/meta/usecase/vald_test.go @@ -0,0 +1,623 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package usecase + +import ( + "context" + "reflect" + "testing" + + "github.com/vdaas/vald/internal/errgroup" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/observability" + "github.com/vdaas/vald/internal/runner" + "github.com/vdaas/vald/internal/servers/starter" + "github.com/vdaas/vald/pkg/gateway/meta/config" + "github.com/vdaas/vald/pkg/gateway/meta/service" + "go.uber.org/goleak" +) + +func TestNew(t *testing.T) { + t.Parallel() + type args struct { + cfg *config.Data + } + type want struct { + wantR runner.Runner + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, runner.Runner, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, gotR runner.Runner, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotR, w.wantR) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotR, w.wantR) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + cfg: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + cfg: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + gotR, err := New(test.args.cfg) + if err := test.checkFunc(test.want, gotR, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_PreStart(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + metadata service.Meta + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + metadata: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + metadata: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + metadata: test.fields.metadata, + } + + err := r.PreStart(test.args.ctx) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_Start(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + metadata service.Meta + } + type want struct { + want <-chan error + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, <-chan error, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got <-chan error, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(got, w.want) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + metadata: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + metadata: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + metadata: test.fields.metadata, + } + + got, err := r.Start(test.args.ctx) + if err := test.checkFunc(test.want, got, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_PreStop(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + metadata service.Meta + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + metadata: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + metadata: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + metadata: test.fields.metadata, + } + + err := r.PreStop(test.args.ctx) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_Stop(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + metadata service.Meta + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + metadata: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + metadata: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + metadata: test.fields.metadata, + } + + err := r.Stop(test.args.ctx) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func Test_run_PostStop(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + } + type fields struct { + eg errgroup.Group + cfg *config.Data + server starter.Server + observability observability.Observability + metadata service.Meta + } + type want struct { + err error + } + type test struct { + name string + args args + fields fields + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + metadata: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + ctx: nil, + }, + fields: fields { + eg: nil, + cfg: nil, + server: nil, + observability: nil, + metadata: nil, + }, + want: want{}, + checkFunc: defaultCheckFunc, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + r := &run{ + eg: test.fields.eg, + cfg: test.fields.cfg, + server: test.fields.server, + observability: test.fields.observability, + metadata: test.fields.metadata, + } + + err := r.PostStop(test.args.ctx) + if err := test.checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/pkg/gateway/vald/config/config_test.go b/pkg/gateway/vald/config/config_test.go index eba18af56a..e90809127a 100644 --- a/pkg/gateway/vald/config/config_test.go +++ b/pkg/gateway/vald/config/config_test.go @@ -26,6 +26,7 @@ import ( ) func TestNewConfig(t *testing.T) { + t.Parallel() type args struct { path string } @@ -78,9 +79,11 @@ func TestNewConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -95,7 +98,6 @@ func TestNewConfig(t *testing.T) { if err := test.checkFunc(test.want, gotCfg, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/gateway/vald/handler/grpc/checklist.go b/pkg/gateway/vald/handler/grpc/checklist.go deleted file mode 100644 index 1d3cdc4b47..0000000000 --- a/pkg/gateway/vald/handler/grpc/checklist.go +++ /dev/null @@ -1,141 +0,0 @@ -// -// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package grpc - -import ( - "sync" - "sync/atomic" - "unsafe" -) - -type checkList struct { - mu sync.Mutex - read atomic.Value - dirty map[string]*entryCheckList - misses int -} - -type readOnlyCheckList struct { - m map[string]*entryCheckList - amended bool -} - -var expungedCheckList = unsafe.Pointer(new(struct{})) - -type entryCheckList struct { - p unsafe.Pointer -} - -func (m *checkList) Exists(key string) bool { - read, _ := m.read.Load().(readOnlyCheckList) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - read, _ = m.read.Load().(readOnlyCheckList) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - m.missLocked() - } - m.mu.Unlock() - } - if !ok { - return false - } - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedCheckList { - return false - } - return true -} - -func (m *checkList) Check(key string) { - value := struct{}{} - read, _ := m.read.Load().(readOnlyCheckList) - if e, ok := read.m[key]; ok && e.tryStore(&value) { - return - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyCheckList) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - m.dirty[key] = e - } - atomic.StorePointer(&e.p, unsafe.Pointer(&value)) - } else if e, ok := m.dirty[key]; ok { - atomic.StorePointer(&e.p, unsafe.Pointer(&value)) - } else { - if !read.amended { - m.dirtyLocked() - m.read.Store(readOnlyCheckList{m: read.m, amended: true}) - } - m.dirty[key] = &entryCheckList{p: unsafe.Pointer(&value)} - } - m.mu.Unlock() -} - -func (e *entryCheckList) tryStore(i *struct{}) bool { - for { - p := atomic.LoadPointer(&e.p) - if p == expungedCheckList { - return false - } - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { - return true - } - } -} - -func (e *entryCheckList) unexpungeLocked() (wasExpunged bool) { - return atomic.CompareAndSwapPointer(&e.p, expungedCheckList, nil) -} - -func (m *checkList) missLocked() { - m.misses++ - if m.misses < len(m.dirty) { - return - } - m.read.Store(readOnlyCheckList{m: m.dirty}) - m.dirty = nil - m.misses = 0 -} - -func (m *checkList) dirtyLocked() { - if m.dirty != nil { - return - } - - read, _ := m.read.Load().(readOnlyCheckList) - m.dirty = make(map[string]*entryCheckList, len(read.m)) - for k, e := range read.m { - if !e.tryExpungeLocked() { - m.dirty[k] = e - } - } -} - -func (e *entryCheckList) tryExpungeLocked() (isExpunged bool) { - p := atomic.LoadPointer(&e.p) - for p == nil { - if atomic.CompareAndSwapPointer(&e.p, nil, expungedCheckList) { - return true - } - p = atomic.LoadPointer(&e.p) - } - return p == expungedCheckList -} diff --git a/pkg/gateway/vald/handler/grpc/handler.go b/pkg/gateway/vald/handler/grpc/handler.go index 8629acf685..c71bf784f3 100644 --- a/pkg/gateway/vald/handler/grpc/handler.go +++ b/pkg/gateway/vald/handler/grpc/handler.go @@ -27,9 +27,11 @@ import ( "time" "github.com/kpango/fuid" - agent "github.com/vdaas/vald/apis/grpc/agent/core" "github.com/vdaas/vald/apis/grpc/gateway/vald" "github.com/vdaas/vald/apis/grpc/payload" + payloadv1 "github.com/vdaas/vald/apis/grpc/v1/payload" + valdv1 "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/core/algorithm" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" @@ -87,9 +89,28 @@ func (s *server) Search(ctx context.Context, req *payload.Search_Request) (res * span.End() } }() + if len(req.Vector) < algorithm.MinimumVectorDimensionSize { + return nil, errors.ErrInvalidDimensionSize(len(req.Vector), 0) + } return s.search(ctx, req.GetConfig(), - func(ctx context.Context, ac agent.AgentClient, copts ...grpc.CallOption) (*payload.Search_Response, error) { - return ac.Search(ctx, req, copts...) + func(ctx context.Context, vc valdv1.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { + res, err := vc.Search(ctx, &payloadv1.Search_Request{ + Vector: req.GetVector(), + }, copts...) + if err != nil { + return nil, err + } + distances := make([]*payload.Object_Distance, 0, len(res.GetResults())) + for _, r := range res.GetResults() { + distances = append(distances, &payload.Object_Distance{ + Id: r.GetId(), + Distance: r.GetDistance(), + }) + } + return &payload.Search_Response{ + RequestId: res.GetRequestId(), + Results: distances, + }, nil }) } @@ -112,17 +133,14 @@ func (s *server) SearchByID(ctx context.Context, req *payload.Search_IDRequest) } return nil, status.WrapWithNotFound(fmt.Sprintf("SearchByID API meta %s's uuid not found", req.GetId()), err, req, info.Get()) } - return s.search(ctx, req.GetConfig(), - func(ctx context.Context, ac agent.AgentClient, copts ...grpc.CallOption) (*payload.Search_Response, error) { - return ac.Search(ctx, &payload.Search_Request{ - Vector: vec.GetVector(), - Config: req.GetConfig(), - }, copts...) - }) + return s.Search(ctx, &payload.Search_Request{ + Vector: vec.GetVector(), + Config: req.GetConfig(), + }) } func (s *server) search(ctx context.Context, cfg *payload.Search_Config, - f func(ctx context.Context, ac agent.AgentClient, copts ...grpc.CallOption) (*payload.Search_Response, error)) ( + f func(ctx context.Context, vc valdv1.Client, copts ...grpc.CallOption) (*payload.Search_Response, error)) ( res *payload.Search_Response, err error) { ctx, span := trace.StartSpan(ctx, "vald/gateway-vald.search") defer func() { @@ -149,8 +167,8 @@ func (s *server) search(ctx context.Context, cfg *payload.Search_Config, eg.Go(safety.RecoverFunc(func() error { defer cancel() visited := new(sync.Map) - return s.gateway.BroadCast(ectx, func(ctx context.Context, target string, ac agent.AgentClient, copts ...grpc.CallOption) error { - r, err := f(ctx, ac, copts...) + return s.gateway.BroadCast(ectx, func(ctx context.Context, target string, vc valdv1.Client, copts ...grpc.CallOption) error { + r, err := f(ctx, vc, copts...) if err != nil { log.Debug("ignoring error:", err) return nil @@ -282,14 +300,14 @@ func (s *server) StreamSearchByID(stream vald.Vald_StreamSearchByIDServer) error }) } -func (s *server) Insert(ctx context.Context, vec *payload.Object_Vector) (ce *payload.Empty, err error) { +func (s *server) Insert(ctx context.Context, vec *payload.Object_Vector) (ce *payload.Object_Location, err error) { ctx, span := trace.StartSpan(ctx, "vald/gateway-vald.Insert") defer func() { if span != nil { span.End() } }() - if len(vec.GetVector()) < 2 { + if len(vec.GetVector()) < algorithm.MinimumVectorDimensionSize { err = errors.ErrInvalidDimensionSize(len(vec.GetVector()), 0) if span != nil { span.SetStatus(trace.StatusCodeInvalidArgument(err.Error())) @@ -326,8 +344,13 @@ func (s *server) Insert(ctx context.Context, vec *payload.Object_Vector) (ce *pa vec.Id = uuid mu := new(sync.Mutex) targets := make([]string, 0, s.replica) - err = s.gateway.DoMulti(ctx, s.replica, func(ctx context.Context, target string, ac agent.AgentClient, copts ...grpc.CallOption) (err error) { - _, err = ac.Insert(ctx, vec, copts...) + err = s.gateway.DoMulti(ctx, s.replica, func(ctx context.Context, target string, vc valdv1.Client, copts ...grpc.CallOption) (err error) { + _, err = vc.Insert(ctx, &payloadv1.Insert_Request{ + Vector: &payloadv1.Object_Vector{ + Id: vec.GetId(), + Vector: vec.GetVector(), + }, + }, copts...) if err != nil { if err == errors.ErrRPCCallFailed(target, context.Canceled) { return nil @@ -349,9 +372,8 @@ func (s *server) Insert(ctx context.Context, vec *payload.Object_Vector) (ce *pa return nil, status.WrapWithInternal(fmt.Sprintf("Insert API failed to Execute DoMulti error = %s", err.Error()), err, info.Get()) } if s.backup != nil { - vecs := &payload.Backup_MetaVector{ + vecs := &payloadv1.Backup_Vector{ Uuid: uuid, - Meta: meta, Ips: targets, } if vec != nil { @@ -368,7 +390,7 @@ func (s *server) Insert(ctx context.Context, vec *payload.Object_Vector) (ce *pa } } log.Debugf("Insert API insert succeeded to %v", targets) - return new(payload.Empty), nil + return new(payload.Object_Location), nil } func (s *server) StreamInsert(stream vald.Vald_StreamInsertServer) error { @@ -385,7 +407,7 @@ func (s *server) StreamInsert(stream vald.Vald_StreamInsertServer) error { }) } -func (s *server) MultiInsert(ctx context.Context, vecs *payload.Object_Vectors) (res *payload.Empty, err error) { +func (s *server) MultiInsert(ctx context.Context, vecs *payload.Object_Vectors) (res *payload.Object_Locations, err error) { ctx, span := trace.StartSpan(ctx, "vald/gateway-vald.MultiInsert") defer func() { if span != nil { @@ -394,8 +416,9 @@ func (s *server) MultiInsert(ctx context.Context, vecs *payload.Object_Vectors) }() metaMap := make(map[string]string) metas := make([]string, 0, len(vecs.GetVectors())) - for i, vec := range vecs.GetVectors() { - if len(vec.GetVector()) < 2 { + reqs := make([]*payloadv1.Insert_Request, 0, len(vecs.GetVectors())) + for _, vec := range vecs.GetVectors() { + if len(vec.GetVector()) < algorithm.MinimumVectorDimensionSize { err = errors.ErrInvalidDimensionSize(len(vec.GetVector()), 0) if span != nil { span.SetStatus(trace.StatusCodeInvalidArgument(err.Error())) @@ -406,7 +429,12 @@ func (s *server) MultiInsert(ctx context.Context, vecs *payload.Object_Vectors) meta := vec.GetId() metaMap[uuid] = meta metas = append(metas, meta) - vecs.Vectors[i].Id = uuid + reqs = append(reqs, &payloadv1.Insert_Request{ + Vector: &payloadv1.Object_Vector{ + Vector: vec.GetVector(), + Id: uuid, + }, + }) } for _, meta := range metas { @@ -438,8 +466,10 @@ func (s *server) MultiInsert(ctx context.Context, vecs *payload.Object_Vectors) mu := new(sync.Mutex) targets := make([]string, 0, s.replica) - gerr := s.gateway.DoMulti(ctx, s.replica, func(ctx context.Context, target string, ac agent.AgentClient, copts ...grpc.CallOption) (err error) { - _, err = ac.MultiInsert(ctx, vecs, copts...) + gerr := s.gateway.DoMulti(ctx, s.replica, func(ctx context.Context, target string, vc valdv1.Client, copts ...grpc.CallOption) (err error) { + _, err = vc.MultiInsert(ctx, &payloadv1.Insert_MultiRequest{ + Requests: reqs, + }, copts...) if err != nil { return err } @@ -457,13 +487,13 @@ func (s *server) MultiInsert(ctx context.Context, vecs *payload.Object_Vectors) } if s.backup != nil { - mvecs := new(payload.Backup_MetaVectors) - mvecs.Vectors = make([]*payload.Backup_MetaVector, 0, len(vecs.GetVectors())) - for _, vec := range vecs.GetVectors() { + mvecs := new(payloadv1.Backup_Vectors) + mvecs.Vectors = make([]*payloadv1.Backup_Vector, 0, len(vecs.GetVectors())) + for _, req := range reqs { + vec := req.GetVector() uuid := vec.GetId() - mvecs.Vectors = append(mvecs.Vectors, &payload.Backup_MetaVector{ + mvecs.Vectors = append(mvecs.Vectors, &payloadv1.Backup_Vector{ Uuid: uuid, - Meta: metaMap[uuid], Vector: vec.GetVector(), Ips: targets, }) @@ -476,17 +506,17 @@ func (s *server) MultiInsert(ctx context.Context, vecs *payload.Object_Vectors) return nil, status.WrapWithInternal(fmt.Sprintf("MultiInsert API failed RegisterMultiple %#v", mvecs), err, info.Get()) } } - return new(payload.Empty), nil + return new(payload.Object_Locations), nil } -func (s *server) Update(ctx context.Context, vec *payload.Object_Vector) (res *payload.Empty, err error) { +func (s *server) Update(ctx context.Context, vec *payload.Object_Vector) (res *payload.Object_Location, err error) { ctx, span := trace.StartSpan(ctx, "vald/gateway-vald.Update") defer func() { if span != nil { span.End() } }() - if len(vec.GetVector()) < 2 { + if len(vec.GetVector()) < algorithm.MinimumVectorDimensionSize { err = errors.ErrInvalidDimensionSize(len(vec.GetVector()), 0) if span != nil { span.SetStatus(trace.StatusCodeInvalidArgument(err.Error())) @@ -513,11 +543,16 @@ func (s *server) Update(ctx context.Context, vec *payload.Object_Vector) (res *p for _, loc := range locs { lmap[loc] = struct{}{} } - err = s.gateway.BroadCast(ctx, func(ctx context.Context, target string, ac agent.AgentClient, copts ...grpc.CallOption) error { + err = s.gateway.BroadCast(ctx, func(ctx context.Context, target string, vc valdv1.Client, copts ...grpc.CallOption) error { target = strings.SplitN(target, ":", 2)[0] _, ok := lmap[target] if ok { - _, err = ac.Update(ctx, vec, copts...) + _, err = vc.Update(ctx, &payloadv1.Update_Request{ + Vector: &payloadv1.Object_Vector{ + Vector: vec.GetVector(), + Id: vec.GetId(), + }, + }, copts...) if err != nil { return err } @@ -530,13 +565,11 @@ func (s *server) Update(ctx context.Context, vec *payload.Object_Vector) (res *p } return nil, status.WrapWithInternal(fmt.Sprintf("Update API failed request %#v", vec), err, info.Get()) } - mvec := &payload.Backup_MetaVector{ + err = s.backup.Register(ctx, &payloadv1.Backup_Vector{ Uuid: uuid, - Meta: meta, Vector: vec.GetVector(), Ips: locs, - } - err = s.backup.Register(ctx, mvec) + }) if err != nil { if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) @@ -544,7 +577,7 @@ func (s *server) Update(ctx context.Context, vec *payload.Object_Vector) (res *p return nil, status.WrapWithInternal(fmt.Sprintf("Update API failed backup %#v", vec), err, info.Get()) } - return new(payload.Empty), nil + return new(payload.Object_Location), nil } func (s *server) StreamUpdate(stream vald.Vald_StreamUpdateServer) error { @@ -561,7 +594,7 @@ func (s *server) StreamUpdate(stream vald.Vald_StreamUpdateServer) error { }) } -func (s *server) MultiUpdate(ctx context.Context, vecs *payload.Object_Vectors) (res *payload.Empty, err error) { +func (s *server) MultiUpdate(ctx context.Context, vecs *payload.Object_Vectors) (res *payload.Object_Locations, err error) { ctx, span := trace.StartSpan(ctx, "vald/gateway-vald.MultiUpdate") defer func() { if span != nil { @@ -570,7 +603,7 @@ func (s *server) MultiUpdate(ctx context.Context, vecs *payload.Object_Vectors) }() ids := make([]string, 0, len(vecs.GetVectors())) for _, vec := range vecs.GetVectors() { - if len(vec.GetVector()) < 2 { + if len(vec.GetVector()) < algorithm.MinimumVectorDimensionSize { err = errors.ErrInvalidDimensionSize(len(vec.GetVector()), 0) if span != nil { span.SetStatus(trace.StatusCodeInvalidArgument(err.Error())) @@ -595,10 +628,10 @@ func (s *server) MultiUpdate(ctx context.Context, vecs *payload.Object_Vectors) } return nil, status.WrapWithInternal(fmt.Sprintf("MultiUpdate API failed Insert request %#v", vecs), err, info.Get()) } - return new(payload.Empty), nil + return new(payload.Object_Locations), nil } -func (s *server) Upsert(ctx context.Context, vec *payload.Object_Vector) (*payload.Empty, error) { +func (s *server) Upsert(ctx context.Context, vec *payload.Object_Vector) (*payload.Object_Location, error) { ctx, span := trace.StartSpan(ctx, "vald/gateway-vald.Upsert") defer func() { if span != nil { @@ -606,7 +639,7 @@ func (s *server) Upsert(ctx context.Context, vec *payload.Object_Vector) (*paylo } }() - if len(vec.GetVector()) < 2 { + if len(vec.GetVector()) < algorithm.MinimumVectorDimensionSize { err := errors.ErrInvalidDimensionSize(len(vec.GetVector()), 0) if span != nil { span.SetStatus(trace.StatusCodeInvalidArgument(err.Error())) @@ -634,7 +667,7 @@ func (s *server) Upsert(ctx context.Context, vec *payload.Object_Vector) (*paylo } } - return new(payload.Empty), errs + return new(payload.Object_Location), errs } func (s *server) StreamUpsert(stream vald.Vald_StreamUpsertServer) error { @@ -651,7 +684,7 @@ func (s *server) StreamUpsert(stream vald.Vald_StreamUpsertServer) error { }) } -func (s *server) MultiUpsert(ctx context.Context, vecs *payload.Object_Vectors) (*payload.Empty, error) { +func (s *server) MultiUpsert(ctx context.Context, vecs *payload.Object_Vectors) (*payload.Object_Locations, error) { ctx, span := trace.StartSpan(ctx, "vald/gateway-vald.MultiUpsert") defer func() { if span != nil { @@ -664,7 +697,7 @@ func (s *server) MultiUpsert(ctx context.Context, vecs *payload.Object_Vectors) var errs error for _, vec := range vecs.GetVectors() { - if len(vec.GetVector()) < 2 { + if len(vec.GetVector()) < algorithm.MinimumVectorDimensionSize { err := errors.ErrInvalidDimensionSize(len(vec.GetVector()), 0) if span != nil { span.SetStatus(trace.StatusCodeInvalidArgument(err.Error())) @@ -715,10 +748,10 @@ func (s *server) MultiUpsert(ctx context.Context, vecs *payload.Object_Vectors) return nil, status.WrapWithInternal("MultiUpsert API failed", errs, info.Get()) } - return new(payload.Empty), errs + return new(payload.Object_Locations), errs } -func (s *server) Remove(ctx context.Context, id *payload.Object_ID) (*payload.Empty, error) { +func (s *server) Remove(ctx context.Context, id *payload.Object_ID) (*payload.Object_Location, error) { ctx, span := trace.StartSpan(ctx, "vald/gateway-vald.Remove") defer func() { if span != nil { @@ -744,11 +777,13 @@ func (s *server) Remove(ctx context.Context, id *payload.Object_ID) (*payload.Em for _, loc := range locs { lmap[loc] = struct{}{} } - err = s.gateway.BroadCast(ctx, func(ctx context.Context, target string, ac agent.AgentClient, copts ...grpc.CallOption) error { + err = s.gateway.BroadCast(ctx, func(ctx context.Context, target string, vc valdv1.Client, copts ...grpc.CallOption) error { _, ok := lmap[target] if ok { - _, err = ac.Remove(ctx, &payload.Object_ID{ - Id: uuid, + _, err = vc.Remove(ctx, &payloadv1.Remove_Request{ + Id: &payloadv1.Object_ID{ + Id: uuid, + }, }, copts...) if err != nil { return err @@ -776,7 +811,7 @@ func (s *server) Remove(ctx context.Context, id *payload.Object_ID) (*payload.Em } return nil, status.WrapWithInternal(fmt.Sprintf("Remove API failed to Remove backup uuid = %s", uuid), err, info.Get()) } - return new(payload.Empty), nil + return new(payload.Object_Location), nil } func (s *server) StreamRemove(stream vald.Vald_StreamRemoveServer) error { @@ -793,7 +828,7 @@ func (s *server) StreamRemove(stream vald.Vald_StreamRemoveServer) error { }) } -func (s *server) MultiRemove(ctx context.Context, ids *payload.Object_IDs) (res *payload.Empty, err error) { +func (s *server) MultiRemove(ctx context.Context, ids *payload.Object_IDs) (res *payload.Object_Locations, err error) { ctx, span := trace.StartSpan(ctx, "vald/gateway-vald.MultiRemove") defer func() { if span != nil { @@ -817,11 +852,19 @@ func (s *server) MultiRemove(ctx context.Context, ids *payload.Object_IDs) (res lmap[loc] = append(lmap[loc], uuid) } } - err = s.gateway.BroadCast(ctx, func(ctx context.Context, target string, ac agent.AgentClient, copts ...grpc.CallOption) error { + err = s.gateway.BroadCast(ctx, func(ctx context.Context, target string, vc valdv1.Client, copts ...grpc.CallOption) error { uuids, ok := lmap[target] if ok { - _, err := ac.MultiRemove(ctx, &payload.Object_IDs{ - Ids: uuids, + reqs := make([]*payloadv1.Remove_Request, 0, len(uuids)) + for _, uuid := range uuids { + reqs = append(reqs, &payloadv1.Remove_Request{ + Id: &payloadv1.Object_ID{ + Id: uuid, + }, + }) + } + _, err := vc.MultiRemove(ctx, &payloadv1.Remove_MultiRequest{ + Requests: reqs, }, copts...) if err != nil { return err @@ -849,10 +892,10 @@ func (s *server) MultiRemove(ctx context.Context, ids *payload.Object_IDs) (res } return nil, status.WrapWithInternal(fmt.Sprintf("MultiRemove API failed to Remove backup uuids %v ", uuids), err, info.Get()) } - return new(payload.Empty), nil + return new(payload.Object_Locations), nil } -func (s *server) GetObject(ctx context.Context, id *payload.Object_ID) (vec *payload.Backup_MetaVector, err error) { +func (s *server) GetObject(ctx context.Context, id *payload.Object_ID) (vec *payload.Object_Vector, err error) { ctx, span := trace.StartSpan(ctx, "vald/gateway-vald.GetObject") defer func() { if span != nil { @@ -867,13 +910,17 @@ func (s *server) GetObject(ctx context.Context, id *payload.Object_ID) (vec *pay } return nil, status.WrapWithNotFound(fmt.Sprintf("GetObject API meta %s's uuid not found", meta), err, info.Get()) } - vec, err = s.backup.GetObject(ctx, uuid) + mvec, err := s.backup.GetObject(ctx, uuid) if err != nil { if span != nil { span.SetStatus(trace.StatusCodeNotFound(err.Error())) } return nil, status.WrapWithNotFound(fmt.Sprintf("GetObject API meta %s uuid %s Object not found", meta, uuid), err, info.Get()) } + vec = &payload.Object_Vector{ + Id: mvec.GetUuid(), + Vector: mvec.GetVector(), + } return vec, nil } diff --git a/pkg/gateway/vald/handler/grpc/handler_test.go b/pkg/gateway/vald/handler/grpc/handler_test.go index 6728b93ffc..1225c06669 100644 --- a/pkg/gateway/vald/handler/grpc/handler_test.go +++ b/pkg/gateway/vald/handler/grpc/handler_test.go @@ -23,18 +23,18 @@ import ( "testing" "time" - agent "github.com/vdaas/vald/apis/grpc/agent/core" "github.com/vdaas/vald/apis/grpc/gateway/vald" "github.com/vdaas/vald/apis/grpc/payload" + valdv1 "github.com/vdaas/vald/apis/grpc/v1/vald" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/pkg/gateway/vald/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -83,9 +83,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -100,12 +102,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Exists(t *testing.T) { + t.Parallel() type args struct { ctx context.Context meta *payload.Object_ID @@ -192,9 +194,11 @@ func Test_server_Exists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -219,12 +223,12 @@ func Test_server_Exists(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Search(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Search_Request @@ -311,9 +315,11 @@ func Test_server_Search(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -338,12 +344,12 @@ func Test_server_Search(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_SearchByID(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Search_IDRequest @@ -430,9 +436,11 @@ func Test_server_SearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -457,16 +465,16 @@ func Test_server_SearchByID(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_search(t *testing.T) { + t.Parallel() type args struct { ctx context.Context cfg *payload.Search_Config - f func(ctx context.Context, ac agent.AgentClient, copts ...grpc.CallOption) (*payload.Search_Response, error) + f func(ctx context.Context, vc valdv1.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) } type fields struct { eg errgroup.Group @@ -552,9 +560,11 @@ func Test_server_search(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -579,12 +589,12 @@ func Test_server_search(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_StreamSearch(t *testing.T) { + t.Parallel() type args struct { stream vald.Vald_StreamSearchServer } @@ -664,9 +674,11 @@ func Test_server_StreamSearch(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -691,12 +703,12 @@ func Test_server_StreamSearch(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_StreamSearchByID(t *testing.T) { + t.Parallel() type args struct { stream vald.Vald_StreamSearchByIDServer } @@ -776,9 +788,11 @@ func Test_server_StreamSearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -803,12 +817,12 @@ func Test_server_StreamSearchByID(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Insert(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vec *payload.Object_Vector @@ -824,7 +838,7 @@ func Test_server_Insert(t *testing.T) { streamConcurrency int } type want struct { - wantCe *payload.Empty + wantCe *payload.Object_Location err error } type test struct { @@ -832,11 +846,11 @@ func Test_server_Insert(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Empty, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotCe *payload.Empty, err error) error { + defaultCheckFunc := func(w want, gotCe *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -895,9 +909,11 @@ func Test_server_Insert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -922,12 +938,12 @@ func Test_server_Insert(t *testing.T) { if err := test.checkFunc(test.want, gotCe, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_StreamInsert(t *testing.T) { + t.Parallel() type args struct { stream vald.Vald_StreamInsertServer } @@ -1007,9 +1023,11 @@ func Test_server_StreamInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1034,12 +1052,12 @@ func Test_server_StreamInsert(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_MultiInsert(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vecs *payload.Object_Vectors @@ -1055,7 +1073,7 @@ func Test_server_MultiInsert(t *testing.T) { streamConcurrency int } type want struct { - wantRes *payload.Empty + wantRes *payload.Object_Locations err error } type test struct { @@ -1063,11 +1081,11 @@ func Test_server_MultiInsert(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Empty, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Empty, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -1126,9 +1144,11 @@ func Test_server_MultiInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1153,12 +1173,12 @@ func Test_server_MultiInsert(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Update(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vec *payload.Object_Vector @@ -1174,7 +1194,7 @@ func Test_server_Update(t *testing.T) { streamConcurrency int } type want struct { - wantRes *payload.Empty + wantRes *payload.Object_Location err error } type test struct { @@ -1182,11 +1202,11 @@ func Test_server_Update(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Empty, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Empty, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -1245,9 +1265,11 @@ func Test_server_Update(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1272,12 +1294,12 @@ func Test_server_Update(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_StreamUpdate(t *testing.T) { + t.Parallel() type args struct { stream vald.Vald_StreamUpdateServer } @@ -1357,9 +1379,11 @@ func Test_server_StreamUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1384,12 +1408,12 @@ func Test_server_StreamUpdate(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_MultiUpdate(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vecs *payload.Object_Vectors @@ -1405,7 +1429,7 @@ func Test_server_MultiUpdate(t *testing.T) { streamConcurrency int } type want struct { - wantRes *payload.Empty + wantRes *payload.Object_Locations err error } type test struct { @@ -1413,11 +1437,11 @@ func Test_server_MultiUpdate(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Empty, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Empty, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -1476,9 +1500,11 @@ func Test_server_MultiUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1503,12 +1529,12 @@ func Test_server_MultiUpdate(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Upsert(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vec *payload.Object_Vector @@ -1524,7 +1550,7 @@ func Test_server_Upsert(t *testing.T) { streamConcurrency int } type want struct { - want *payload.Empty + want *payload.Object_Location err error } type test struct { @@ -1532,11 +1558,11 @@ func Test_server_Upsert(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Empty, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *payload.Empty, err error) error { + defaultCheckFunc := func(w want, got *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -1595,9 +1621,11 @@ func Test_server_Upsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1622,12 +1650,12 @@ func Test_server_Upsert(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_StreamUpsert(t *testing.T) { + t.Parallel() type args struct { stream vald.Vald_StreamUpsertServer } @@ -1707,9 +1735,11 @@ func Test_server_StreamUpsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1734,12 +1764,12 @@ func Test_server_StreamUpsert(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_MultiUpsert(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vecs *payload.Object_Vectors @@ -1755,7 +1785,7 @@ func Test_server_MultiUpsert(t *testing.T) { streamConcurrency int } type want struct { - want *payload.Empty + want *payload.Object_Locations err error } type test struct { @@ -1763,11 +1793,11 @@ func Test_server_MultiUpsert(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Empty, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *payload.Empty, err error) error { + defaultCheckFunc := func(w want, got *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -1826,9 +1856,11 @@ func Test_server_MultiUpsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1853,12 +1885,12 @@ func Test_server_MultiUpsert(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Remove(t *testing.T) { + t.Parallel() type args struct { ctx context.Context id *payload.Object_ID @@ -1874,7 +1906,7 @@ func Test_server_Remove(t *testing.T) { streamConcurrency int } type want struct { - want *payload.Empty + want *payload.Object_Location err error } type test struct { @@ -1882,11 +1914,11 @@ func Test_server_Remove(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Empty, error) error + checkFunc func(want, *payload.Object_Location, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *payload.Empty, err error) error { + defaultCheckFunc := func(w want, got *payload.Object_Location, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -1945,9 +1977,11 @@ func Test_server_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1972,12 +2006,12 @@ func Test_server_Remove(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_StreamRemove(t *testing.T) { + t.Parallel() type args struct { stream vald.Vald_StreamRemoveServer } @@ -2057,9 +2091,11 @@ func Test_server_StreamRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2084,12 +2120,12 @@ func Test_server_StreamRemove(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_MultiRemove(t *testing.T) { + t.Parallel() type args struct { ctx context.Context ids *payload.Object_IDs @@ -2105,7 +2141,7 @@ func Test_server_MultiRemove(t *testing.T) { streamConcurrency int } type want struct { - wantRes *payload.Empty + wantRes *payload.Object_Locations err error } type test struct { @@ -2113,11 +2149,11 @@ func Test_server_MultiRemove(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Empty, error) error + checkFunc func(want, *payload.Object_Locations, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Empty, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Object_Locations, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -2176,9 +2212,11 @@ func Test_server_MultiRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2203,12 +2241,12 @@ func Test_server_MultiRemove(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_GetObject(t *testing.T) { + t.Parallel() type args struct { ctx context.Context id *payload.Object_ID @@ -2224,7 +2262,7 @@ func Test_server_GetObject(t *testing.T) { streamConcurrency int } type want struct { - wantVec *payload.Backup_MetaVector + wantVec *payload.Object_Vector err error } type test struct { @@ -2232,11 +2270,11 @@ func Test_server_GetObject(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Backup_MetaVector, error) error + checkFunc func(want, *payload.Object_Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotVec *payload.Backup_MetaVector, err error) error { + defaultCheckFunc := func(w want, gotVec *payload.Object_Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -2295,9 +2333,11 @@ func Test_server_GetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2322,12 +2362,12 @@ func Test_server_GetObject(t *testing.T) { if err := test.checkFunc(test.want, gotVec, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_StreamGetObject(t *testing.T) { + t.Parallel() type args struct { stream vald.Vald_StreamGetObjectServer } @@ -2407,9 +2447,11 @@ func Test_server_StreamGetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -2434,7 +2476,6 @@ func Test_server_StreamGetObject(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/gateway/vald/handler/grpc/option.go b/pkg/gateway/vald/handler/grpc/option.go index e1c8ce30ad..8dee039646 100644 --- a/pkg/gateway/vald/handler/grpc/option.go +++ b/pkg/gateway/vald/handler/grpc/option.go @@ -27,14 +27,12 @@ import ( type Option func(*server) -var ( - defaultOpts = []Option{ - WithErrGroup(errgroup.Get()), - WithReplicationCount(3), - WithStreamConcurrency(20), - WithTimeout("5s"), - } -) +var defaultOpts = []Option{ + WithErrGroup(errgroup.Get()), + WithReplicationCount(3), + WithStreamConcurrency(20), + WithTimeout("5s"), +} func WithGateway(g service.Gateway) Option { return func(s *server) { diff --git a/pkg/gateway/vald/handler/grpc/option_test.go b/pkg/gateway/vald/handler/grpc/option_test.go index 5483e75afe..28058c653d 100644 --- a/pkg/gateway/vald/handler/grpc/option_test.go +++ b/pkg/gateway/vald/handler/grpc/option_test.go @@ -22,11 +22,12 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/pkg/gateway/vald/service" - "go.uber.org/goleak" ) func TestWithGateway(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { g service.Gateway @@ -64,7 +65,7 @@ func TestWithGateway(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -100,9 +101,11 @@ func TestWithGateway(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -123,7 +126,7 @@ func TestWithGateway(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -131,7 +134,7 @@ func TestWithGateway(t *testing.T) { got := WithGateway(test.args.g) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -140,6 +143,8 @@ func TestWithGateway(t *testing.T) { } func TestWithMeta(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { m service.Meta @@ -177,7 +182,7 @@ func TestWithMeta(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -213,9 +218,11 @@ func TestWithMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -236,7 +243,7 @@ func TestWithMeta(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -244,7 +251,7 @@ func TestWithMeta(t *testing.T) { got := WithMeta(test.args.m) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -253,6 +260,8 @@ func TestWithMeta(t *testing.T) { } func TestWithBackup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { b service.Backup @@ -290,7 +299,7 @@ func TestWithBackup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -326,9 +335,11 @@ func TestWithBackup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -349,7 +360,7 @@ func TestWithBackup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -357,7 +368,7 @@ func TestWithBackup(t *testing.T) { got := WithBackup(test.args.b) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -366,6 +377,8 @@ func TestWithBackup(t *testing.T) { } func TestWithFilters(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { filter service.Filter @@ -403,7 +416,7 @@ func TestWithFilters(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -439,9 +452,11 @@ func TestWithFilters(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -462,7 +477,7 @@ func TestWithFilters(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -470,7 +485,7 @@ func TestWithFilters(t *testing.T) { got := WithFilters(test.args.filter) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -479,6 +494,8 @@ func TestWithFilters(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -516,7 +533,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -552,9 +569,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -575,7 +594,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -583,7 +602,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -592,6 +611,8 @@ func TestWithErrGroup(t *testing.T) { } func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -629,7 +650,7 @@ func TestWithTimeout(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -665,9 +686,11 @@ func TestWithTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -688,7 +711,7 @@ func TestWithTimeout(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -696,7 +719,7 @@ func TestWithTimeout(t *testing.T) { got := WithTimeout(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -705,6 +728,8 @@ func TestWithTimeout(t *testing.T) { } func TestWithReplicationCount(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { rep int @@ -742,7 +767,7 @@ func TestWithReplicationCount(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -778,9 +803,11 @@ func TestWithReplicationCount(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -801,7 +828,7 @@ func TestWithReplicationCount(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -809,7 +836,7 @@ func TestWithReplicationCount(t *testing.T) { got := WithReplicationCount(test.args.rep) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -818,6 +845,8 @@ func TestWithReplicationCount(t *testing.T) { } func TestWithStreamConcurrency(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { c int @@ -855,7 +884,7 @@ func TestWithStreamConcurrency(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -891,9 +920,11 @@ func TestWithStreamConcurrency(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -914,7 +945,7 @@ func TestWithStreamConcurrency(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -922,7 +953,7 @@ func TestWithStreamConcurrency(t *testing.T) { got := WithStreamConcurrency(test.args.c) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/gateway/vald/handler/rest/handler_test.go b/pkg/gateway/vald/handler/rest/handler_test.go index ffa160b438..d3021c4b58 100644 --- a/pkg/gateway/vald/handler/rest/handler_test.go +++ b/pkg/gateway/vald/handler/rest/handler_test.go @@ -24,11 +24,11 @@ import ( "github.com/vdaas/vald/apis/grpc/gateway/vald" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -77,9 +77,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -94,12 +96,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Index(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -165,9 +167,11 @@ func Test_handler_Index(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -185,12 +189,12 @@ func Test_handler_Index(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Search(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -256,9 +260,11 @@ func Test_handler_Search(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -276,12 +282,12 @@ func Test_handler_Search(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_SearchByID(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -347,9 +353,11 @@ func Test_handler_SearchByID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,12 +375,12 @@ func Test_handler_SearchByID(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Insert(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -438,9 +446,11 @@ func Test_handler_Insert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -458,12 +468,12 @@ func Test_handler_Insert(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_MultiInsert(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -529,9 +539,11 @@ func Test_handler_MultiInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -549,12 +561,12 @@ func Test_handler_MultiInsert(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Update(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -620,9 +632,11 @@ func Test_handler_Update(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -640,12 +654,12 @@ func Test_handler_Update(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_MultiUpdate(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -711,9 +725,11 @@ func Test_handler_MultiUpdate(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -731,12 +747,12 @@ func Test_handler_MultiUpdate(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Remove(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -802,9 +818,11 @@ func Test_handler_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -822,12 +840,12 @@ func Test_handler_Remove(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_MultiRemove(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -893,9 +911,11 @@ func Test_handler_MultiRemove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -913,12 +933,12 @@ func Test_handler_MultiRemove(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_GetObject(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -984,9 +1004,11 @@ func Test_handler_GetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1004,12 +1026,12 @@ func Test_handler_GetObject(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Exists(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -1075,9 +1097,11 @@ func Test_handler_Exists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1095,7 +1119,6 @@ func Test_handler_Exists(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/gateway/vald/handler/rest/option.go b/pkg/gateway/vald/handler/rest/option.go index c684ad7ec8..57e837226c 100644 --- a/pkg/gateway/vald/handler/rest/option.go +++ b/pkg/gateway/vald/handler/rest/option.go @@ -21,9 +21,7 @@ import "github.com/vdaas/vald/apis/grpc/gateway/vald" type Option func(*handler) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithVald(v vald.ValdServer) Option { return func(h *handler) { diff --git a/pkg/gateway/vald/handler/rest/option_test.go b/pkg/gateway/vald/handler/rest/option_test.go index 133b88509f..9b71adc31c 100644 --- a/pkg/gateway/vald/handler/rest/option_test.go +++ b/pkg/gateway/vald/handler/rest/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/apis/grpc/gateway/vald" - "go.uber.org/goleak" ) func TestWithVald(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { v vald.ValdServer @@ -63,7 +64,7 @@ func TestWithVald(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithVald(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithVald(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithVald(t *testing.T) { got := WithVald(test.args.v) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/gateway/vald/router/option.go b/pkg/gateway/vald/router/option.go index 76abf879ef..6e2e4747a8 100644 --- a/pkg/gateway/vald/router/option.go +++ b/pkg/gateway/vald/router/option.go @@ -23,11 +23,9 @@ import ( type Option func(*router) -var ( - defaultOpts = []Option{ - WithTimeout("3s"), - } -) +var defaultOpts = []Option{ + WithTimeout("3s"), +} func WithHandler(h rest.Handler) Option { return func(r *router) { diff --git a/pkg/gateway/vald/router/option_test.go b/pkg/gateway/vald/router/option_test.go index ca2fc74726..c5cfe67352 100644 --- a/pkg/gateway/vald/router/option_test.go +++ b/pkg/gateway/vald/router/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/pkg/gateway/vald/handler/rest" - "go.uber.org/goleak" ) func TestWithHandler(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { h rest.Handler @@ -63,7 +64,7 @@ func TestWithHandler(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithHandler(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithHandler(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithHandler(t *testing.T) { got := WithHandler(test.args.h) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -139,6 +142,8 @@ func TestWithHandler(t *testing.T) { } func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { timeout string @@ -176,7 +181,7 @@ func TestWithTimeout(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -212,9 +217,11 @@ func TestWithTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -235,7 +242,7 @@ func TestWithTimeout(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -243,7 +250,7 @@ func TestWithTimeout(t *testing.T) { got := WithTimeout(test.args.timeout) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/gateway/vald/router/router.go b/pkg/gateway/vald/router/router.go index cfb944a433..c906236938 100644 --- a/pkg/gateway/vald/router/router.go +++ b/pkg/gateway/vald/router/router.go @@ -29,7 +29,7 @@ type router struct { timeout string } -// New returns REST route&method information from handler interface +// New returns REST route&method information from handler interface. func New(opts ...Option) http.Handler { r := new(router) diff --git a/pkg/gateway/vald/router/router_test.go b/pkg/gateway/vald/router/router_test.go index 25eaec0bef..97248787ae 100644 --- a/pkg/gateway/vald/router/router_test.go +++ b/pkg/gateway/vald/router/router_test.go @@ -23,9 +23,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -74,8 +76,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -90,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/gateway/vald/service/backup.go b/pkg/gateway/vald/service/backup.go index bc50e64904..3f52f02300 100644 --- a/pkg/gateway/vald/service/backup.go +++ b/pkg/gateway/vald/service/backup.go @@ -20,18 +20,18 @@ import ( "context" "reflect" - "github.com/vdaas/vald/apis/grpc/manager/compressor" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/compressor" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" ) type Backup interface { Start(ctx context.Context) (<-chan error, error) - GetObject(ctx context.Context, uuid string) (*payload.Backup_MetaVector, error) + GetObject(ctx context.Context, uuid string) (*payload.Backup_Vector, error) GetLocation(ctx context.Context, uuid string) ([]string, error) - Register(ctx context.Context, vec *payload.Backup_MetaVector) error - RegisterMultiple(ctx context.Context, vecs *payload.Backup_MetaVectors) error + Register(ctx context.Context, vec *payload.Backup_Vector) error + RegisterMultiple(ctx context.Context, vecs *payload.Backup_Vectors) error Remove(ctx context.Context, uuid string) error RemoveMultiple(ctx context.Context, uuids ...string) error } @@ -56,7 +56,7 @@ func (b *backup) Start(ctx context.Context) (<-chan error, error) { return b.client.StartConnectionMonitor(ctx) } -func (b *backup) GetObject(ctx context.Context, uuid string) (vec *payload.Backup_MetaVector, err error) { +func (b *backup) GetObject(ctx context.Context, uuid string) (vec *payload.Backup_Vector, err error) { _, err = b.client.Do(ctx, b.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { vec, err = compressor.NewBackupClient(conn).GetVector(ctx, &payload.Backup_GetVector_Request{ @@ -85,7 +85,7 @@ func (b *backup) GetLocation(ctx context.Context, uuid string) (ipList []string, return } -func (b *backup) Register(ctx context.Context, vec *payload.Backup_MetaVector) (err error) { +func (b *backup) Register(ctx context.Context, vec *payload.Backup_Vector) (err error) { _, err = b.client.Do(ctx, b.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { _, err = compressor.NewBackupClient(conn).Register(ctx, vec, copts...) @@ -97,7 +97,7 @@ func (b *backup) Register(ctx context.Context, vec *payload.Backup_MetaVector) ( return } -func (b *backup) RegisterMultiple(ctx context.Context, vecs *payload.Backup_MetaVectors) (err error) { +func (b *backup) RegisterMultiple(ctx context.Context, vecs *payload.Backup_Vectors) (err error) { _, err = b.client.Do(ctx, b.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { _, err = compressor.NewBackupClient(conn).RegisterMulti(ctx, vecs, copts...) diff --git a/pkg/gateway/vald/service/backup_option.go b/pkg/gateway/vald/service/backup_option.go index 3647333b07..cb930eb267 100644 --- a/pkg/gateway/vald/service/backup_option.go +++ b/pkg/gateway/vald/service/backup_option.go @@ -21,9 +21,7 @@ import "github.com/vdaas/vald/internal/net/grpc" type BackupOption func(b *backup) error -var ( - defaultBackupOpts = []BackupOption{} -) +var defaultBackupOpts = []BackupOption{} func WithBackupAddr(addr string) BackupOption { return func(b *backup) error { diff --git a/pkg/gateway/vald/service/backup_option_test.go b/pkg/gateway/vald/service/backup_option_test.go index f4ae710854..366cc18fff 100644 --- a/pkg/gateway/vald/service/backup_option_test.go +++ b/pkg/gateway/vald/service/backup_option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestWithBackupAddr(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { addr string @@ -63,7 +64,7 @@ func TestWithBackupAddr(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithBackupAddr(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithBackupAddr(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithBackupAddr(t *testing.T) { got := WithBackupAddr(test.args.addr) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -139,6 +142,8 @@ func TestWithBackupAddr(t *testing.T) { } func TestWithBackupClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { client grpc.Client @@ -176,7 +181,7 @@ func TestWithBackupClient(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -212,9 +217,11 @@ func TestWithBackupClient(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -235,7 +242,7 @@ func TestWithBackupClient(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -243,7 +250,7 @@ func TestWithBackupClient(t *testing.T) { got := WithBackupClient(test.args.client) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/gateway/vald/service/backup_test.go b/pkg/gateway/vald/service/backup_test.go index 96198d011b..4356acb982 100644 --- a/pkg/gateway/vald/service/backup_test.go +++ b/pkg/gateway/vald/service/backup_test.go @@ -21,14 +21,14 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestNewBackup(t *testing.T) { + t.Parallel() type args struct { opts []BackupOption } @@ -81,9 +81,11 @@ func TestNewBackup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -98,12 +100,12 @@ func TestNewBackup(t *testing.T) { if err := test.checkFunc(test.want, gotBu, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -169,9 +171,11 @@ func Test_backup_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -190,12 +194,12 @@ func Test_backup_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_GetObject(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -205,7 +209,7 @@ func Test_backup_GetObject(t *testing.T) { client grpc.Client } type want struct { - wantVec *payload.Backup_MetaVector + wantVec *payload.Backup_Vector err error } type test struct { @@ -213,11 +217,11 @@ func Test_backup_GetObject(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Backup_MetaVector, error) error + checkFunc func(want, *payload.Backup_Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotVec *payload.Backup_MetaVector, err error) error { + defaultCheckFunc := func(w want, gotVec *payload.Backup_Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -264,9 +268,11 @@ func Test_backup_GetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -285,12 +291,12 @@ func Test_backup_GetObject(t *testing.T) { if err := test.checkFunc(test.want, gotVec, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_GetLocation(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -359,9 +365,11 @@ func Test_backup_GetLocation(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -380,15 +388,15 @@ func Test_backup_GetLocation(t *testing.T) { if err := test.checkFunc(test.want, gotIpList, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_Register(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - vec *payload.Backup_MetaVector + vec *payload.Backup_Vector } type fields struct { addr string @@ -450,9 +458,11 @@ func Test_backup_Register(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -471,15 +481,15 @@ func Test_backup_Register(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_RegisterMultiple(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - vecs *payload.Backup_MetaVectors + vecs *payload.Backup_Vectors } type fields struct { addr string @@ -541,9 +551,11 @@ func Test_backup_RegisterMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -562,12 +574,12 @@ func Test_backup_RegisterMultiple(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_Remove(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -632,9 +644,11 @@ func Test_backup_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -653,12 +667,12 @@ func Test_backup_Remove(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_RemoveMultiple(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuids []string @@ -723,9 +737,11 @@ func Test_backup_RemoveMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -744,7 +760,6 @@ func Test_backup_RemoveMultiple(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/gateway/vald/service/filter_option.go b/pkg/gateway/vald/service/filter_option.go index de11d286d2..27b68e9c81 100644 --- a/pkg/gateway/vald/service/filter_option.go +++ b/pkg/gateway/vald/service/filter_option.go @@ -21,9 +21,7 @@ import "github.com/vdaas/vald/internal/net/grpc" type FilterOption func(f *filter) error -var ( - defaultFilterOpts = []FilterOption{} -) +var defaultFilterOpts = []FilterOption{} func WithFilterClient(client grpc.Client) FilterOption { return func(f *filter) error { diff --git a/pkg/gateway/vald/service/filter_option_test.go b/pkg/gateway/vald/service/filter_option_test.go index 960d3a36e4..48aea0aa34 100644 --- a/pkg/gateway/vald/service/filter_option_test.go +++ b/pkg/gateway/vald/service/filter_option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestWithFilterClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { client grpc.Client @@ -63,7 +64,7 @@ func TestWithFilterClient(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithFilterClient(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithFilterClient(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithFilterClient(t *testing.T) { got := WithFilterClient(test.args.client) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/gateway/vald/service/filter_test.go b/pkg/gateway/vald/service/filter_test.go index 7105056704..cfa004e0f2 100644 --- a/pkg/gateway/vald/service/filter_test.go +++ b/pkg/gateway/vald/service/filter_test.go @@ -24,11 +24,11 @@ import ( "github.com/vdaas/vald/apis/grpc/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestNewFilter(t *testing.T) { + t.Parallel() type args struct { opts []FilterOption } @@ -81,9 +81,11 @@ func TestNewFilter(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -98,12 +100,12 @@ func TestNewFilter(t *testing.T) { if err := test.checkFunc(test.want, gotEf, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_filter_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -166,9 +168,11 @@ func Test_filter_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -186,12 +190,12 @@ func Test_filter_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_filter_FilterSearch(t *testing.T) { + t.Parallel() type args struct { ctx context.Context res *payload.Search_Response @@ -257,9 +261,11 @@ func Test_filter_FilterSearch(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -277,7 +283,6 @@ func Test_filter_FilterSearch(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/gateway/vald/service/gateway.go b/pkg/gateway/vald/service/gateway.go index 6653b4593e..2306f38b71 100644 --- a/pkg/gateway/vald/service/gateway.go +++ b/pkg/gateway/vald/service/gateway.go @@ -22,8 +22,8 @@ import ( "reflect" "sync/atomic" - agent "github.com/vdaas/vald/apis/grpc/agent/core" - "github.com/vdaas/vald/internal/client/discoverer" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/client/v1/client/discoverer" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" @@ -34,11 +34,11 @@ type Gateway interface { Start(ctx context.Context) (<-chan error, error) GetAgentCount(ctx context.Context) int Do(ctx context.Context, - f func(ctx context.Context, tgt string, ac agent.AgentClient, copts ...grpc.CallOption) error) error + f func(ctx context.Context, tgt string, vc vald.Client, copts ...grpc.CallOption) error) error DoMulti(ctx context.Context, num int, - f func(ctx context.Context, tgt string, ac agent.AgentClient, copts ...grpc.CallOption) error) error + f func(ctx context.Context, tgt string, vc vald.Client, copts ...grpc.CallOption) error) error BroadCast(ctx context.Context, - f func(ctx context.Context, tgt string, ac agent.AgentClient, copts ...grpc.CallOption) error) error + f func(ctx context.Context, tgt string, vc vald.Client, copts ...grpc.CallOption) error) error } type gateway struct { @@ -61,14 +61,14 @@ func (g *gateway) Start(ctx context.Context) (<-chan error, error) { } func (g *gateway) BroadCast(ctx context.Context, - f func(ctx context.Context, target string, ac agent.AgentClient, copts ...grpc.CallOption) error) (err error) { + f func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) error) (err error) { return g.client.GetClient().RangeConcurrent(ctx, -1, func(ctx context.Context, addr string, conn *grpc.ClientConn, copts ...grpc.CallOption) (err error) { select { case <-ctx.Done(): return nil default: - err = f(ctx, addr, agent.NewAgentClient(conn), copts...) + err = f(ctx, addr, vald.NewValdClient(conn), copts...) if err != nil { log.Debugf("an error occurred while calling RPC of %s: %s", addr, err) return err @@ -79,17 +79,17 @@ func (g *gateway) BroadCast(ctx context.Context, } func (g *gateway) Do(ctx context.Context, - f func(ctx context.Context, target string, ac agent.AgentClient, copts ...grpc.CallOption) error) (err error) { + f func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) error) (err error) { addr := g.client.GetAddrs(ctx)[0] _, err = g.client.GetClient().Do(ctx, addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (interface{}, error) { - return nil, f(ctx, addr, agent.NewAgentClient(conn), copts...) + return nil, f(ctx, addr, vald.NewValdClient(conn), copts...) }) return err } func (g *gateway) DoMulti(ctx context.Context, num int, - f func(ctx context.Context, target string, ac agent.AgentClient, copts ...grpc.CallOption) error) (err error) { + f func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) error) (err error) { var cur uint32 = 0 limit := uint32(num) addrs := g.client.GetAddrs(ctx) @@ -99,7 +99,7 @@ func (g *gateway) DoMulti(ctx context.Context, num int, conn *grpc.ClientConn, copts ...grpc.CallOption) (err error) { if atomic.LoadUint32(&cur) < limit { - err = f(ictx, addr, agent.NewAgentClient(conn), copts...) + err = f(ictx, addr, vald.NewValdClient(conn), copts...) if err != nil { log.Debugf("an error occurred while calling RPC of %s: %s", addr, err) return err diff --git a/pkg/gateway/vald/service/gateway_option.go b/pkg/gateway/vald/service/gateway_option.go index b385031d80..623f9d84fa 100644 --- a/pkg/gateway/vald/service/gateway_option.go +++ b/pkg/gateway/vald/service/gateway_option.go @@ -18,17 +18,15 @@ package service import ( - "github.com/vdaas/vald/internal/client/discoverer" + "github.com/vdaas/vald/internal/client/v1/client/discoverer" "github.com/vdaas/vald/internal/errgroup" ) type GWOption func(g *gateway) error -var ( - defaultGWOpts = []GWOption{ - WithErrGroup(errgroup.Get()), - } -) +var defaultGWOpts = []GWOption{ + WithErrGroup(errgroup.Get()), +} func WithDiscoverer(c discoverer.Client) GWOption { return func(g *gateway) error { diff --git a/pkg/gateway/vald/service/gateway_option_test.go b/pkg/gateway/vald/service/gateway_option_test.go index 03d9ac77b8..cbd0b5d8eb 100644 --- a/pkg/gateway/vald/service/gateway_option_test.go +++ b/pkg/gateway/vald/service/gateway_option_test.go @@ -20,13 +20,14 @@ package service import ( "testing" - "github.com/vdaas/vald/internal/client/discoverer" + "github.com/vdaas/vald/internal/client/v1/client/discoverer" "github.com/vdaas/vald/internal/errgroup" - "go.uber.org/goleak" ) func TestWithDiscoverer(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { c discoverer.Client @@ -64,7 +65,7 @@ func TestWithDiscoverer(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -100,9 +101,11 @@ func TestWithDiscoverer(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -123,7 +126,7 @@ func TestWithDiscoverer(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -131,7 +134,7 @@ func TestWithDiscoverer(t *testing.T) { got := WithDiscoverer(test.args.c) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -140,6 +143,8 @@ func TestWithDiscoverer(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -177,7 +182,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -213,9 +218,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -236,7 +243,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -244,7 +251,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/gateway/vald/service/gateway_test.go b/pkg/gateway/vald/service/gateway_test.go index 05a58a2cc7..1053a62347 100644 --- a/pkg/gateway/vald/service/gateway_test.go +++ b/pkg/gateway/vald/service/gateway_test.go @@ -22,16 +22,16 @@ import ( "reflect" "testing" - agent "github.com/vdaas/vald/apis/grpc/agent/core" - "github.com/vdaas/vald/internal/client/discoverer" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/client/v1/client/discoverer" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestNewGateway(t *testing.T) { + t.Parallel() type args struct { opts []GWOption } @@ -84,9 +84,11 @@ func TestNewGateway(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -101,12 +103,12 @@ func TestNewGateway(t *testing.T) { if err := test.checkFunc(test.want, gotGw, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gateway_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -172,9 +174,11 @@ func Test_gateway_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -193,15 +197,15 @@ func Test_gateway_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gateway_BroadCast(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - f func(ctx context.Context, target string, ac agent.AgentClient, copts ...grpc.CallOption) error + f func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) error } type fields struct { client discoverer.Client @@ -263,9 +267,11 @@ func Test_gateway_BroadCast(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -284,15 +290,15 @@ func Test_gateway_BroadCast(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gateway_Do(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - f func(ctx context.Context, target string, ac agent.AgentClient, copts ...grpc.CallOption) error + f func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) error } type fields struct { client discoverer.Client @@ -354,9 +360,11 @@ func Test_gateway_Do(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -375,16 +383,16 @@ func Test_gateway_Do(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gateway_DoMulti(t *testing.T) { + t.Parallel() type args struct { ctx context.Context num int - f func(ctx context.Context, target string, ac agent.AgentClient, copts ...grpc.CallOption) error + f func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) error } type fields struct { client discoverer.Client @@ -448,9 +456,11 @@ func Test_gateway_DoMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -469,12 +479,12 @@ func Test_gateway_DoMulti(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gateway_GetAgentCount(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -536,9 +546,11 @@ func Test_gateway_GetAgentCount(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -557,7 +569,6 @@ func Test_gateway_GetAgentCount(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/gateway/vald/service/meta.go b/pkg/gateway/vald/service/meta.go index 1ca8f43ffb..e7f837e4e7 100644 --- a/pkg/gateway/vald/service/meta.go +++ b/pkg/gateway/vald/service/meta.go @@ -21,8 +21,8 @@ import ( "context" "reflect" - gmeta "github.com/vdaas/vald/apis/grpc/meta" - "github.com/vdaas/vald/apis/grpc/payload" + gmeta "github.com/vdaas/vald/apis/grpc/v1/meta" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/cache" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" diff --git a/pkg/gateway/vald/service/meta_option.go b/pkg/gateway/vald/service/meta_option.go index a5014a1868..110e38a104 100644 --- a/pkg/gateway/vald/service/meta_option.go +++ b/pkg/gateway/vald/service/meta_option.go @@ -27,13 +27,11 @@ import ( type MetaOption func(m *meta) error -var ( - defaultMetaOpts = []MetaOption{ - WithMetaCacheEnabled(true), - WithMetaCacheExpireDuration("30m"), - WithMetaCacheExpiredCheckDuration("2m"), - } -) +var defaultMetaOpts = []MetaOption{ + WithMetaCacheEnabled(true), + WithMetaCacheExpireDuration("30m"), + WithMetaCacheExpiredCheckDuration("2m"), +} func WithMetaAddr(addr string) MetaOption { return func(m *meta) error { diff --git a/pkg/gateway/vald/service/meta_option_test.go b/pkg/gateway/vald/service/meta_option_test.go index a7e7ec7e0c..10419ed307 100644 --- a/pkg/gateway/vald/service/meta_option_test.go +++ b/pkg/gateway/vald/service/meta_option_test.go @@ -22,11 +22,12 @@ import ( "github.com/vdaas/vald/internal/cache" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestWithMetaAddr(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { addr string @@ -64,7 +65,7 @@ func TestWithMetaAddr(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -100,9 +101,11 @@ func TestWithMetaAddr(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -123,7 +126,7 @@ func TestWithMetaAddr(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -131,7 +134,7 @@ func TestWithMetaAddr(t *testing.T) { got := WithMetaAddr(test.args.addr) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -140,6 +143,8 @@ func TestWithMetaAddr(t *testing.T) { } func TestWithMetaHostPort(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { host string @@ -178,7 +183,7 @@ func TestWithMetaHostPort(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -216,9 +221,11 @@ func TestWithMetaHostPort(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -239,7 +246,7 @@ func TestWithMetaHostPort(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -247,7 +254,7 @@ func TestWithMetaHostPort(t *testing.T) { got := WithMetaHostPort(test.args.host, test.args.port) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -256,6 +263,8 @@ func TestWithMetaHostPort(t *testing.T) { } func TestWithMetaClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { client grpc.Client @@ -293,7 +302,7 @@ func TestWithMetaClient(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -329,9 +338,11 @@ func TestWithMetaClient(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -352,7 +363,7 @@ func TestWithMetaClient(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -360,7 +371,7 @@ func TestWithMetaClient(t *testing.T) { got := WithMetaClient(test.args.client) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -369,6 +380,8 @@ func TestWithMetaClient(t *testing.T) { } func TestWithMetaCacheEnabled(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { flg bool @@ -406,7 +419,7 @@ func TestWithMetaCacheEnabled(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -442,9 +455,11 @@ func TestWithMetaCacheEnabled(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -465,7 +480,7 @@ func TestWithMetaCacheEnabled(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -473,7 +488,7 @@ func TestWithMetaCacheEnabled(t *testing.T) { got := WithMetaCacheEnabled(test.args.flg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -482,6 +497,8 @@ func TestWithMetaCacheEnabled(t *testing.T) { } func TestWithMetaCache(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { c cache.Cache @@ -519,7 +536,7 @@ func TestWithMetaCache(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -555,9 +572,11 @@ func TestWithMetaCache(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -578,7 +597,7 @@ func TestWithMetaCache(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -586,7 +605,7 @@ func TestWithMetaCache(t *testing.T) { got := WithMetaCache(test.args.c) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -595,6 +614,8 @@ func TestWithMetaCache(t *testing.T) { } func TestWithMetaCacheExpireDuration(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -632,7 +653,7 @@ func TestWithMetaCacheExpireDuration(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -668,9 +689,11 @@ func TestWithMetaCacheExpireDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -691,7 +714,7 @@ func TestWithMetaCacheExpireDuration(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -699,7 +722,7 @@ func TestWithMetaCacheExpireDuration(t *testing.T) { got := WithMetaCacheExpireDuration(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -708,6 +731,8 @@ func TestWithMetaCacheExpireDuration(t *testing.T) { } func TestWithMetaCacheExpiredCheckDuration(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -745,7 +770,7 @@ func TestWithMetaCacheExpiredCheckDuration(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -781,9 +806,11 @@ func TestWithMetaCacheExpiredCheckDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -804,7 +831,7 @@ func TestWithMetaCacheExpiredCheckDuration(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -812,7 +839,7 @@ func TestWithMetaCacheExpiredCheckDuration(t *testing.T) { got := WithMetaCacheExpiredCheckDuration(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/gateway/vald/service/meta_test.go b/pkg/gateway/vald/service/meta_test.go index 6df043828b..3ec9140f08 100644 --- a/pkg/gateway/vald/service/meta_test.go +++ b/pkg/gateway/vald/service/meta_test.go @@ -25,11 +25,11 @@ import ( "github.com/vdaas/vald/internal/cache" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestNewMeta(t *testing.T) { + t.Parallel() type args struct { opts []MetaOption } @@ -82,9 +82,11 @@ func TestNewMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -99,12 +101,12 @@ func TestNewMeta(t *testing.T) { if err := test.checkFunc(test.want, gotMi, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_meta_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -182,9 +184,11 @@ func Test_meta_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -207,12 +211,12 @@ func Test_meta_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_meta_Exists(t *testing.T) { + t.Parallel() type args struct { ctx context.Context meta string @@ -293,9 +297,11 @@ func Test_meta_Exists(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -318,12 +324,12 @@ func Test_meta_Exists(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_meta_GetMeta(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -404,9 +410,11 @@ func Test_meta_GetMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -429,12 +437,12 @@ func Test_meta_GetMeta(t *testing.T) { if err := test.checkFunc(test.want, gotV, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_meta_GetMetas(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuids []string @@ -515,9 +523,11 @@ func Test_meta_GetMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -540,12 +550,12 @@ func Test_meta_GetMetas(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_meta_GetUUID(t *testing.T) { + t.Parallel() type args struct { ctx context.Context meta string @@ -626,9 +636,11 @@ func Test_meta_GetUUID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -651,12 +663,12 @@ func Test_meta_GetUUID(t *testing.T) { if err := test.checkFunc(test.want, gotK, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_meta_GetUUIDs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context metas []string @@ -737,9 +749,11 @@ func Test_meta_GetUUIDs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -762,12 +776,12 @@ func Test_meta_GetUUIDs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_meta_SetUUIDandMeta(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -847,9 +861,11 @@ func Test_meta_SetUUIDandMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -872,12 +888,12 @@ func Test_meta_SetUUIDandMeta(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_meta_SetUUIDandMetas(t *testing.T) { + t.Parallel() type args struct { ctx context.Context kvs map[string]string @@ -954,9 +970,11 @@ func Test_meta_SetUUIDandMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -979,12 +997,12 @@ func Test_meta_SetUUIDandMetas(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_meta_DeleteMeta(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -1065,9 +1083,11 @@ func Test_meta_DeleteMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1090,12 +1110,12 @@ func Test_meta_DeleteMeta(t *testing.T) { if err := test.checkFunc(test.want, gotV, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_meta_DeleteMetas(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuids []string @@ -1176,9 +1196,11 @@ func Test_meta_DeleteMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1201,12 +1223,12 @@ func Test_meta_DeleteMetas(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_meta_DeleteUUID(t *testing.T) { + t.Parallel() type args struct { ctx context.Context meta string @@ -1287,9 +1309,11 @@ func Test_meta_DeleteUUID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1312,12 +1336,12 @@ func Test_meta_DeleteUUID(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_meta_DeleteUUIDs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context metas []string @@ -1398,9 +1422,11 @@ func Test_meta_DeleteUUIDs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1423,7 +1449,6 @@ func Test_meta_DeleteUUIDs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/gateway/vald/usecase/vald.go b/pkg/gateway/vald/usecase/vald.go index 7c8cf7f1ee..8d6cb323b4 100644 --- a/pkg/gateway/vald/usecase/vald.go +++ b/pkg/gateway/vald/usecase/vald.go @@ -20,7 +20,7 @@ import ( "context" "github.com/vdaas/vald/apis/grpc/gateway/vald" - "github.com/vdaas/vald/internal/client/discoverer" + "github.com/vdaas/vald/internal/client/v1/client/discoverer" iconf "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" @@ -79,7 +79,7 @@ func New(cfg *config.Data) (r runner.Runner, err error) { ) var obs observability.Observability - if cfg.Observability.Enabled { + if cfg.Observability != nil && cfg.Observability.Enabled { obs, err = observability.NewWithConfig(cfg.Observability) if err != nil { return nil, err @@ -164,7 +164,7 @@ func New(cfg *config.Data) (r runner.Runner, err error) { ef.Client.Opts(), grpc.WithErrGroup(eg), ) - if cfg.Observability.Enabled { + if cfg.Observability != nil && cfg.Observability.Enabled { egressFilterClientOptions = append( egressFilterClientOptions, grpc.WithDialOptions( @@ -177,6 +177,9 @@ func New(cfg *config.Data) (r runner.Runner, err error) { grpc.New(egressFilterClientOptions...), ), ) + if err != nil { + return nil, err + } } v := handler.New( @@ -203,7 +206,7 @@ func New(cfg *config.Data) (r runner.Runner, err error) { }), } - if cfg.Observability.Enabled { + if cfg.Observability != nil && cfg.Observability.Enabled { grpcServerOptions = append( grpcServerOptions, server.WithGRPCOption( diff --git a/pkg/gateway/vald/usecase/vald_test.go b/pkg/gateway/vald/usecase/vald_test.go index 4a68faa657..9a3b26b90a 100644 --- a/pkg/gateway/vald/usecase/vald_test.go +++ b/pkg/gateway/vald/usecase/vald_test.go @@ -28,11 +28,11 @@ import ( "github.com/vdaas/vald/internal/servers/starter" "github.com/vdaas/vald/pkg/gateway/vald/config" "github.com/vdaas/vald/pkg/gateway/vald/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -85,9 +85,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -102,12 +104,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -187,9 +189,11 @@ func Test_run_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -214,12 +218,12 @@ func Test_run_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -303,9 +307,11 @@ func Test_run_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -330,12 +336,12 @@ func Test_run_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -415,9 +421,11 @@ func Test_run_PreStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -442,12 +450,12 @@ func Test_run_PreStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Stop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -527,9 +535,11 @@ func Test_run_Stop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -554,12 +564,12 @@ func Test_run_Stop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -639,9 +649,11 @@ func Test_run_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -666,7 +678,6 @@ func Test_run_PostStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/backup/cassandra/config/config_test.go b/pkg/manager/backup/cassandra/config/config_test.go index eba18af56a..e90809127a 100644 --- a/pkg/manager/backup/cassandra/config/config_test.go +++ b/pkg/manager/backup/cassandra/config/config_test.go @@ -26,6 +26,7 @@ import ( ) func TestNewConfig(t *testing.T) { + t.Parallel() type args struct { path string } @@ -78,9 +79,11 @@ func TestNewConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -95,7 +98,6 @@ func TestNewConfig(t *testing.T) { if err := test.checkFunc(test.want, gotCfg, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/backup/cassandra/handler/grpc/handler.go b/pkg/manager/backup/cassandra/handler/grpc/handler.go index eba8f398aa..c5b1f772a8 100644 --- a/pkg/manager/backup/cassandra/handler/grpc/handler.go +++ b/pkg/manager/backup/cassandra/handler/grpc/handler.go @@ -21,8 +21,8 @@ import ( "context" "fmt" - "github.com/vdaas/vald/apis/grpc/manager/backup" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/backup" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" @@ -47,7 +47,7 @@ func New(opts ...Option) Server { return s } -func (s *server) GetVector(ctx context.Context, req *payload.Backup_GetVector_Request) (res *payload.Backup_Compressed_MetaVector, err error) { +func (s *server) GetVector(ctx context.Context, req *payload.Backup_GetVector_Request) (res *payload.Backup_Compressed_Vector, err error) { ctx, span := trace.StartSpan(ctx, "vald/manager-backup-cassandra.GetVector") defer func() { if span != nil { @@ -55,7 +55,7 @@ func (s *server) GetVector(ctx context.Context, req *payload.Backup_GetVector_Re } }() uuid := req.GetUuid() - meta, err := s.cassandra.GetMeta(ctx, uuid) + vector, err := s.cassandra.GetVector(ctx, uuid) if err != nil { switch { case errors.IsErrCassandraNotFound(err): @@ -80,7 +80,7 @@ func (s *server) GetVector(ctx context.Context, req *payload.Backup_GetVector_Re } } - return toBackupMetaVector(meta) + return toBackupVector(vector) } func (s *server) Locations(ctx context.Context, req *payload.Backup_Locations_Request) (res *payload.Info_IPs, err error) { @@ -105,63 +105,63 @@ func (s *server) Locations(ctx context.Context, req *payload.Backup_Locations_Re }, nil } -func (s *server) Register(ctx context.Context, meta *payload.Backup_Compressed_MetaVector) (res *payload.Empty, err error) { +func (s *server) Register(ctx context.Context, vector *payload.Backup_Compressed_Vector) (res *payload.Empty, err error) { ctx, span := trace.StartSpan(ctx, "vald/manager-backup-cassandra.Register") defer func() { if span != nil { span.End() } }() - uuid := meta.GetUuid() - m, err := toModelMetaVector(meta) + uuid := vector.GetUuid() + m, err := toModelVector(vector) if err != nil { log.Errorf("[Register]\tunknown error\t%+v", err) if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) } - return nil, status.WrapWithInternal(fmt.Sprintf("Register API uuid %s's could not convert vector to meta_vector", uuid), err, info.Get()) + return nil, status.WrapWithInternal(fmt.Sprintf("Register API uuid %s's could not convert vector to backup format", uuid), err, info.Get()) } - err = s.cassandra.SetMeta(ctx, m) + err = s.cassandra.SetVector(ctx, m) if err != nil { log.Errorf("[Register]\tunknown error\t%+v", err) if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) } - return nil, status.WrapWithInternal(fmt.Sprintf("Register API uuid %s's failed to backup metadata", uuid), err, info.Get()) + return nil, status.WrapWithInternal(fmt.Sprintf("Register API uuid %s's failed to backup vector", uuid), err, info.Get()) } return new(payload.Empty), nil } -func (s *server) RegisterMulti(ctx context.Context, metas *payload.Backup_Compressed_MetaVectors) (res *payload.Empty, err error) { +func (s *server) RegisterMulti(ctx context.Context, vectors *payload.Backup_Compressed_Vectors) (res *payload.Empty, err error) { ctx, span := trace.StartSpan(ctx, "vald/manager-backup-cassandra.RegisterMulti") defer func() { if span != nil { span.End() } }() - ms := make([]*model.MetaVector, 0, len(metas.GetVectors())) - for _, meta := range metas.Vectors { - var m *model.MetaVector - m, err = toModelMetaVector(meta) + ms := make([]*model.Vector, 0, len(vectors.GetVectors())) + for _, vector := range vectors.Vectors { + var m *model.Vector + m, err = toModelVector(vector) if err != nil { log.Errorf("[RegisterMulti]\tunknown error\t%+v", err) if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) } - return nil, status.WrapWithInternal(fmt.Sprintf("RegisterMulti API uuids %s's could not convert vector to meta_vector", meta.GetUuid()), err, info.Get()) + return nil, status.WrapWithInternal(fmt.Sprintf("RegisterMulti API uuids %s's could not convert vector to backup format", vector.GetUuid()), err, info.Get()) } ms = append(ms, m) } - err = s.cassandra.SetMetas(ctx, ms...) + err = s.cassandra.SetVectors(ctx, ms...) if err != nil { log.Errorf("[RegisterMulti]\tunknown error\t%+v", err) if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) } - return nil, status.WrapWithInternal(fmt.Sprintf("RegisterMulti API failed to backup metadatas %#v", ms), err, info.Get()) + return nil, status.WrapWithInternal(fmt.Sprintf("RegisterMulti API failed to backup vectors %#v", ms), err, info.Get()) } return new(payload.Empty), nil @@ -175,13 +175,13 @@ func (s *server) Remove(ctx context.Context, req *payload.Backup_Remove_Request) } }() uuid := req.GetUuid() - err = s.cassandra.DeleteMeta(ctx, uuid) + err = s.cassandra.DeleteVector(ctx, uuid) if err != nil { log.Errorf("[Remove]\tunknown error\t%+v", err) if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) } - return nil, status.WrapWithInternal(fmt.Sprintf("Remove API uuid %s's could not DeleteMeta", uuid), err, info.Get()) + return nil, status.WrapWithInternal(fmt.Sprintf("Remove API uuid %s's could not DeleteVector", uuid), err, info.Get()) } return new(payload.Empty), nil @@ -195,13 +195,13 @@ func (s *server) RemoveMulti(ctx context.Context, req *payload.Backup_Remove_Req } }() uuids := req.GetUuids() - err = s.cassandra.DeleteMetas(ctx, uuids...) + err = s.cassandra.DeleteVectors(ctx, uuids...) if err != nil { log.Errorf("[RemoveMulti]\tunknown error\t%+v", err) if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) } - return nil, status.WrapWithInternal(fmt.Sprintf("RemoveMulti API uuids %#v could not DeleteMetas", uuids), err, info.Get()) + return nil, status.WrapWithInternal(fmt.Sprintf("RemoveMulti API uuids %#v could not DeleteVectors", uuids), err, info.Get()) } return new(payload.Empty), nil @@ -247,20 +247,18 @@ func (s *server) RemoveIPs(ctx context.Context, req *payload.Backup_IP_Remove_Re return new(payload.Empty), nil } -func toBackupMetaVector(meta *model.MetaVector) (res *payload.Backup_Compressed_MetaVector, err error) { - return &payload.Backup_Compressed_MetaVector{ - Uuid: meta.UUID, - Meta: meta.Meta, - Vector: meta.Vector, - Ips: meta.IPs, +func toBackupVector(vector *model.Vector) (res *payload.Backup_Compressed_Vector, err error) { + return &payload.Backup_Compressed_Vector{ + Uuid: vector.UUID, + Vector: vector.Vector, + Ips: vector.IPs, }, nil } -func toModelMetaVector(obj *payload.Backup_Compressed_MetaVector) (res *model.MetaVector, err error) { - return &model.MetaVector{ +func toModelVector(obj *payload.Backup_Compressed_Vector) (res *model.Vector, err error) { + return &model.Vector{ UUID: obj.Uuid, Vector: obj.Vector, - Meta: obj.Meta, IPs: obj.Ips, }, nil } diff --git a/pkg/manager/backup/cassandra/handler/grpc/handler_test.go b/pkg/manager/backup/cassandra/handler/grpc/handler_test.go index 9969005471..74eb17eaba 100644 --- a/pkg/manager/backup/cassandra/handler/grpc/handler_test.go +++ b/pkg/manager/backup/cassandra/handler/grpc/handler_test.go @@ -22,15 +22,15 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/pkg/manager/backup/cassandra/model" "github.com/vdaas/vald/pkg/manager/backup/cassandra/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -79,9 +79,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -96,12 +98,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_GetVector(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_GetVector_Request @@ -110,7 +112,7 @@ func Test_server_GetVector(t *testing.T) { cassandra service.Cassandra } type want struct { - wantRes *payload.Backup_Compressed_MetaVector + wantRes *payload.Backup_Compressed_Vector err error } type test struct { @@ -118,11 +120,11 @@ func Test_server_GetVector(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Backup_Compressed_MetaVector, error) error + checkFunc func(want, *payload.Backup_Compressed_Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Backup_Compressed_MetaVector, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Backup_Compressed_Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -167,9 +169,11 @@ func Test_server_GetVector(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -187,12 +191,12 @@ func Test_server_GetVector(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Locations(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_Locations_Request @@ -258,9 +262,11 @@ func Test_server_Locations(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -278,15 +284,15 @@ func Test_server_Locations(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Register(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - meta *payload.Backup_Compressed_MetaVector + ctx context.Context + vector *payload.Backup_Compressed_Vector } type fields struct { cassandra service.Cassandra @@ -320,7 +326,7 @@ func Test_server_Register(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - meta: nil, + vector: nil, }, fields: fields { cassandra: nil, @@ -337,7 +343,7 @@ func Test_server_Register(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - meta: nil, + vector: nil, }, fields: fields { cassandra: nil, @@ -349,9 +355,11 @@ func Test_server_Register(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -365,19 +373,19 @@ func Test_server_Register(t *testing.T) { cassandra: test.fields.cassandra, } - gotRes, err := s.Register(test.args.ctx, test.args.meta) + gotRes, err := s.Register(test.args.ctx, test.args.vector) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_RegisterMulti(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - metas *payload.Backup_Compressed_MetaVectors + ctx context.Context + vectors *payload.Backup_Compressed_Vectors } type fields struct { cassandra service.Cassandra @@ -411,7 +419,7 @@ func Test_server_RegisterMulti(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - metas: nil, + vectors: nil, }, fields: fields { cassandra: nil, @@ -428,7 +436,7 @@ func Test_server_RegisterMulti(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - metas: nil, + vectors: nil, }, fields: fields { cassandra: nil, @@ -440,9 +448,11 @@ func Test_server_RegisterMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -456,16 +466,16 @@ func Test_server_RegisterMulti(t *testing.T) { cassandra: test.fields.cassandra, } - gotRes, err := s.RegisterMulti(test.args.ctx, test.args.metas) + gotRes, err := s.RegisterMulti(test.args.ctx, test.args.vectors) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Remove(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_Remove_Request @@ -531,9 +541,11 @@ func Test_server_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -551,12 +563,12 @@ func Test_server_Remove(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_RemoveMulti(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_Remove_RequestMulti @@ -622,9 +634,11 @@ func Test_server_RemoveMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -642,12 +656,12 @@ func Test_server_RemoveMulti(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_RegisterIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_IP_Register_Request @@ -713,9 +727,11 @@ func Test_server_RegisterIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -733,12 +749,12 @@ func Test_server_RegisterIPs(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_RemoveIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_IP_Remove_Request @@ -804,9 +820,11 @@ func Test_server_RemoveIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -824,28 +842,28 @@ func Test_server_RemoveIPs(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_toBackupMetaVector(t *testing.T) { +func Test_toBackupVector(t *testing.T) { + t.Parallel() type args struct { - meta *model.MetaVector + vector *model.Vector } type want struct { - wantRes *payload.Backup_Compressed_MetaVector + wantRes *payload.Backup_Compressed_Vector err error } type test struct { name string args args want want - checkFunc func(want, *payload.Backup_Compressed_MetaVector, error) error + checkFunc func(want, *payload.Backup_Compressed_Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Backup_Compressed_MetaVector, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Backup_Compressed_Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -860,7 +878,7 @@ func Test_toBackupMetaVector(t *testing.T) { { name: "test_case_1", args: args { - meta: nil, + vector: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -873,7 +891,7 @@ func Test_toBackupMetaVector(t *testing.T) { return test { name: "test_case_2", args: args { - meta: nil, + vector: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -882,9 +900,11 @@ func Test_toBackupMetaVector(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -895,32 +915,32 @@ func Test_toBackupMetaVector(t *testing.T) { test.checkFunc = defaultCheckFunc } - gotRes, err := toBackupMetaVector(test.args.meta) + gotRes, err := toBackupVector(test.args.vector) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_toModelMetaVector(t *testing.T) { +func Test_toModelVector(t *testing.T) { + t.Parallel() type args struct { - obj *payload.Backup_Compressed_MetaVector + obj *payload.Backup_Compressed_Vector } type want struct { - wantRes *model.MetaVector + wantRes *model.Vector err error } type test struct { name string args args want want - checkFunc func(want, *model.MetaVector, error) error + checkFunc func(want, *model.Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *model.MetaVector, err error) error { + defaultCheckFunc := func(w want, gotRes *model.Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -957,9 +977,11 @@ func Test_toModelMetaVector(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -970,11 +992,10 @@ func Test_toModelMetaVector(t *testing.T) { test.checkFunc = defaultCheckFunc } - gotRes, err := toModelMetaVector(test.args.obj) + gotRes, err := toModelVector(test.args.obj) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/backup/cassandra/handler/grpc/option.go b/pkg/manager/backup/cassandra/handler/grpc/option.go index 08ab0680ae..855f184a7e 100644 --- a/pkg/manager/backup/cassandra/handler/grpc/option.go +++ b/pkg/manager/backup/cassandra/handler/grpc/option.go @@ -21,9 +21,7 @@ import "github.com/vdaas/vald/pkg/manager/backup/cassandra/service" type Option func(*server) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithCassandra(c service.Cassandra) Option { return func(s *server) { diff --git a/pkg/manager/backup/cassandra/handler/grpc/option_test.go b/pkg/manager/backup/cassandra/handler/grpc/option_test.go index 057c4ad965..9703ee24e1 100644 --- a/pkg/manager/backup/cassandra/handler/grpc/option_test.go +++ b/pkg/manager/backup/cassandra/handler/grpc/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/pkg/manager/backup/cassandra/service" - "go.uber.org/goleak" ) func TestWithCassandra(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { c service.Cassandra @@ -63,7 +64,7 @@ func TestWithCassandra(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithCassandra(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithCassandra(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithCassandra(t *testing.T) { got := WithCassandra(test.args.c) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/backup/cassandra/handler/rest/handler.go b/pkg/manager/backup/cassandra/handler/rest/handler.go index c1ba0ecfe2..a10c4ec099 100644 --- a/pkg/manager/backup/cassandra/handler/rest/handler.go +++ b/pkg/manager/backup/cassandra/handler/rest/handler.go @@ -20,8 +20,8 @@ package rest import ( "net/http" - "github.com/vdaas/vald/apis/grpc/manager/backup" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/backup" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/net/http/json" ) @@ -64,14 +64,14 @@ func (h *handler) Locations(w http.ResponseWriter, r *http.Request) (int, error) } func (h *handler) Register(w http.ResponseWriter, r *http.Request) (int, error) { - var req *payload.Backup_Compressed_MetaVector + var req *payload.Backup_Compressed_Vector return json.Handler(w, r, &req, func() (interface{}, error) { return h.backup.Register(r.Context(), req) }) } func (h *handler) RegisterMulti(w http.ResponseWriter, r *http.Request) (int, error) { - var req *payload.Backup_Compressed_MetaVectors + var req *payload.Backup_Compressed_Vectors return json.Handler(w, r, &req, func() (interface{}, error) { return h.backup.RegisterMulti(r.Context(), req) }) diff --git a/pkg/manager/backup/cassandra/handler/rest/handler_test.go b/pkg/manager/backup/cassandra/handler/rest/handler_test.go index 37d902b265..2faaa990c8 100644 --- a/pkg/manager/backup/cassandra/handler/rest/handler_test.go +++ b/pkg/manager/backup/cassandra/handler/rest/handler_test.go @@ -22,13 +22,13 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/manager/backup" + "github.com/vdaas/vald/apis/grpc/v1/manager/backup" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -77,9 +77,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -94,12 +96,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_GetVector(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -165,9 +167,11 @@ func Test_handler_GetVector(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -185,12 +189,12 @@ func Test_handler_GetVector(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Locations(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -256,9 +260,11 @@ func Test_handler_Locations(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -276,12 +282,12 @@ func Test_handler_Locations(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Register(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -347,9 +353,11 @@ func Test_handler_Register(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,12 +375,12 @@ func Test_handler_Register(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_RegisterMulti(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -438,9 +446,11 @@ func Test_handler_RegisterMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -458,12 +468,12 @@ func Test_handler_RegisterMulti(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Remove(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -529,9 +539,11 @@ func Test_handler_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -549,12 +561,12 @@ func Test_handler_Remove(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_RemoveMulti(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -620,9 +632,11 @@ func Test_handler_RemoveMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -640,12 +654,12 @@ func Test_handler_RemoveMulti(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_RegisterIPs(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -711,9 +725,11 @@ func Test_handler_RegisterIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -731,12 +747,12 @@ func Test_handler_RegisterIPs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_RemoveIPs(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -802,9 +818,11 @@ func Test_handler_RemoveIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -822,7 +840,6 @@ func Test_handler_RemoveIPs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/backup/cassandra/handler/rest/option.go b/pkg/manager/backup/cassandra/handler/rest/option.go index 7aff12c199..e1d09c440e 100644 --- a/pkg/manager/backup/cassandra/handler/rest/option.go +++ b/pkg/manager/backup/cassandra/handler/rest/option.go @@ -17,13 +17,11 @@ // Package rest provides rest api logic package rest -import "github.com/vdaas/vald/apis/grpc/manager/backup" +import "github.com/vdaas/vald/apis/grpc/v1/manager/backup" type Option func(*handler) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithBackup(b backup.BackupServer) Option { return func(h *handler) { diff --git a/pkg/manager/backup/cassandra/handler/rest/option_test.go b/pkg/manager/backup/cassandra/handler/rest/option_test.go index 9535622a92..b62608c5fa 100644 --- a/pkg/manager/backup/cassandra/handler/rest/option_test.go +++ b/pkg/manager/backup/cassandra/handler/rest/option_test.go @@ -20,12 +20,13 @@ package rest import ( "testing" - "github.com/vdaas/vald/apis/grpc/manager/backup" - + "github.com/vdaas/vald/apis/grpc/v1/manager/backup" "go.uber.org/goleak" ) func TestWithBackup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { b backup.BackupServer @@ -63,7 +64,7 @@ func TestWithBackup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithBackup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithBackup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithBackup(t *testing.T) { got := WithBackup(test.args.b) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/backup/cassandra/model/model.go b/pkg/manager/backup/cassandra/model/model.go index fd5fdf6fcb..9aadce555b 100644 --- a/pkg/manager/backup/cassandra/model/model.go +++ b/pkg/manager/backup/cassandra/model/model.go @@ -17,9 +17,8 @@ // Package grpc provides grpc server logic package model -type MetaVector struct { +type Vector struct { UUID string `db:"uuid"` Vector []byte `db:"vector"` - Meta string `db:"meta"` IPs []string `db:"ips"` } diff --git a/pkg/manager/backup/cassandra/router/option.go b/pkg/manager/backup/cassandra/router/option.go index cbdce18a3e..b44ba9b2f0 100644 --- a/pkg/manager/backup/cassandra/router/option.go +++ b/pkg/manager/backup/cassandra/router/option.go @@ -24,11 +24,9 @@ import ( type Option func(*router) -var ( - defaultOpts = []Option{ - WithTimeout("3s"), - } -) +var defaultOpts = []Option{ + WithTimeout("3s"), +} func WithHandler(h rest.Handler) Option { return func(r *router) { diff --git a/pkg/manager/backup/cassandra/router/option_test.go b/pkg/manager/backup/cassandra/router/option_test.go index 5bab19cb39..f7a8d9c410 100644 --- a/pkg/manager/backup/cassandra/router/option_test.go +++ b/pkg/manager/backup/cassandra/router/option_test.go @@ -22,11 +22,12 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/pkg/manager/backup/cassandra/handler/rest" - "go.uber.org/goleak" ) func TestWithHandler(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { h rest.Handler @@ -64,7 +65,7 @@ func TestWithHandler(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -100,9 +101,11 @@ func TestWithHandler(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -123,7 +126,7 @@ func TestWithHandler(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -131,7 +134,7 @@ func TestWithHandler(t *testing.T) { got := WithHandler(test.args.h) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -140,6 +143,8 @@ func TestWithHandler(t *testing.T) { } func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { timeout string @@ -177,7 +182,7 @@ func TestWithTimeout(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -213,9 +218,11 @@ func TestWithTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -236,7 +243,7 @@ func TestWithTimeout(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -244,7 +251,7 @@ func TestWithTimeout(t *testing.T) { got := WithTimeout(test.args.timeout) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -253,6 +260,8 @@ func TestWithTimeout(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -290,7 +299,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -326,9 +335,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -349,7 +360,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -357,7 +368,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/backup/cassandra/router/router.go b/pkg/manager/backup/cassandra/router/router.go index 5e7bc0a0c9..cc562adf93 100644 --- a/pkg/manager/backup/cassandra/router/router.go +++ b/pkg/manager/backup/cassandra/router/router.go @@ -32,7 +32,7 @@ type router struct { timeout string } -// New returns REST route&method information from handler interface +// New returns REST route&method information from handler interface. func New(opts ...Option) http.Handler { r := new(router) @@ -48,14 +48,15 @@ func New(opts ...Option) http.Handler { middleware.WithTimeout(r.timeout), middleware.WithErrorGroup(r.eg), )), - routing.WithRoutes([]routing.Route{{ - "GetVector", - []string{ - http.MethodGet, + routing.WithRoutes([]routing.Route{ + { + "GetVector", + []string{ + http.MethodGet, + }, + "/vector/{uuid}", + h.GetVector, }, - "/vector/{uuid}", - h.GetVector, - }, { "Locations", []string{ diff --git a/pkg/manager/backup/cassandra/router/router_test.go b/pkg/manager/backup/cassandra/router/router_test.go index 25eaec0bef..97248787ae 100644 --- a/pkg/manager/backup/cassandra/router/router_test.go +++ b/pkg/manager/backup/cassandra/router/router_test.go @@ -23,9 +23,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -74,8 +76,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -90,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/backup/cassandra/service/cassandra.go b/pkg/manager/backup/cassandra/service/cassandra.go index 5ff5c000c0..7a487f4bd8 100644 --- a/pkg/manager/backup/cassandra/service/cassandra.go +++ b/pkg/manager/backup/cassandra/service/cassandra.go @@ -29,30 +29,27 @@ import ( const ( uuidColumn = "uuid" vectorColumn = "vector" - metaColumn = "meta" ipsColumn = "ips" ) -var ( - metaColumns = []string{uuidColumn, vectorColumn, metaColumn, ipsColumn} -) +var columns = []string{uuidColumn, vectorColumn, ipsColumn} type Cassandra interface { Connect(ctx context.Context) error Close(ctx context.Context) error - GetMeta(ctx context.Context, uuid string) (*model.MetaVector, error) + GetVector(ctx context.Context, uuid string) (*model.Vector, error) GetIPs(ctx context.Context, uuid string) ([]string, error) - SetMeta(ctx context.Context, meta *model.MetaVector) error - SetMetas(ctx context.Context, metas ...*model.MetaVector) error - DeleteMeta(ctx context.Context, uuid string) error - DeleteMetas(ctx context.Context, uuids ...string) error + SetVector(ctx context.Context, vec *model.Vector) error + SetVectors(ctx context.Context, vecs ...*model.Vector) error + DeleteVector(ctx context.Context, uuid string) error + DeleteVectors(ctx context.Context, uuids ...string) error SetIPs(ctx context.Context, uuid string, ips ...string) error RemoveIPs(ctx context.Context, ips ...string) error } type client struct { db cassandra.Cassandra - metaTable string + tableName string } func New(opts ...Option) (Cassandra, error) { @@ -74,25 +71,25 @@ func (c *client) Close(ctx context.Context) error { return c.db.Close(ctx) } -func (c *client) getMetaVector(ctx context.Context, uuid string) (*model.MetaVector, error) { - var metaVector model.MetaVector - if err := c.db.Query(cassandra.Select(c.metaTable, - metaColumns, +func (c *client) getVector(ctx context.Context, uuid string) (*model.Vector, error) { + var vector model.Vector + if err := c.db.Query(cassandra.Select(c.tableName, + columns, cassandra.Eq(uuidColumn))). BindMap(map[string]interface{}{ uuidColumn: uuid, - }).GetRelease(&metaVector); err != nil { + }).GetRelease(&vector); err != nil { return nil, cassandra.WrapErrorWithKeys(err, uuid) } - return &metaVector, nil + return &vector, nil } -func (c *client) GetMeta(ctx context.Context, uuid string) (*model.MetaVector, error) { - return c.getMetaVector(ctx, uuid) +func (c *client) GetVector(ctx context.Context, uuid string) (*model.Vector, error) { + return c.getVector(ctx, uuid) } func (c *client) GetIPs(ctx context.Context, uuid string) ([]string, error) { - mv, err := c.getMetaVector(ctx, uuid) + mv, err := c.getVector(ctx, uuid) if err != nil { return nil, err } @@ -100,37 +97,36 @@ func (c *client) GetIPs(ctx context.Context, uuid string) ([]string, error) { return mv.IPs, nil } -func (c *client) SetMeta(ctx context.Context, meta *model.MetaVector) error { - stmt, names := cassandra.Insert(c.metaTable, metaColumns...).ToCql() - return c.db.Query(stmt, names).BindStruct(meta).ExecRelease() +func (c *client) SetVector(ctx context.Context, vec *model.Vector) error { + stmt, names := cassandra.Insert(c.tableName, columns...).ToCql() + return c.db.Query(stmt, names).BindStruct(vec).ExecRelease() } -func (c *client) SetMetas(ctx context.Context, metas ...*model.MetaVector) error { - ib := cassandra.Insert(c.metaTable, metaColumns...) +func (c *client) SetVectors(ctx context.Context, vecs ...*model.Vector) error { + ib := cassandra.Insert(c.tableName, columns...) bt := cassandra.Batch() - entities := make(map[string]interface{}, len(metas)*4) - for i, mv := range metas { + entities := make(map[string]interface{}, len(vecs)*3) + for i, mv := range vecs { prefix := "p" + strconv.Itoa(i) bt = bt.AddWithPrefix(prefix, ib) entities[prefix+"."+uuidColumn] = mv.UUID entities[prefix+"."+vectorColumn] = mv.Vector - entities[prefix+"."+metaColumn] = mv.Meta entities[prefix+"."+ipsColumn] = mv.IPs } return c.db.Query(bt.ToCql()).BindMap(entities).ExecRelease() } -func (c *client) DeleteMeta(ctx context.Context, uuid string) error { - return c.db.Query(cassandra.Delete(c.metaTable, +func (c *client) DeleteVector(ctx context.Context, uuid string) error { + return c.db.Query(cassandra.Delete(c.tableName, cassandra.Eq(uuidColumn)).ToCql()). BindMap(map[string]interface{}{uuidColumn: uuid}). ExecRelease() } -func (c *client) DeleteMetas(ctx context.Context, uuids ...string) error { - deleteBuilder := cassandra.Delete(c.metaTable, cassandra.Eq(uuidColumn)) +func (c *client) DeleteVectors(ctx context.Context, uuids ...string) error { + deleteBuilder := cassandra.Delete(c.tableName, cassandra.Eq(uuidColumn)) bt := cassandra.Batch() bindUUIDs := make(map[string]interface{}, len(uuids)) for i, uuid := range uuids { @@ -143,7 +139,7 @@ func (c *client) DeleteMetas(ctx context.Context, uuids ...string) error { } func (c *client) SetIPs(ctx context.Context, uuid string, ips ...string) error { - return c.db.Query(cassandra.Update(c.metaTable). + return c.db.Query(cassandra.Update(c.tableName). AddNamed(ipsColumn, ipsColumn). Where(cassandra.Eq(uuidColumn)).ToCql()). BindMap(map[string]interface{}{ @@ -153,19 +149,19 @@ func (c *client) SetIPs(ctx context.Context, uuid string, ips ...string) error { } func (c *client) RemoveIPs(ctx context.Context, ips ...string) error { - var metaVectors []model.MetaVector + var vectors []model.Vector for _, ip := range ips { - err := c.db.Query(cassandra.Select(c.metaTable, + err := c.db.Query(cassandra.Select(c.tableName, []string{uuidColumn, ipsColumn}, cassandra.Contains(ipsColumn))). BindMap(map[string]interface{}{ipsColumn: ip}). - SelectRelease(&metaVectors) + SelectRelease(&vectors) if err != nil { return err } - for _, mv := range metaVectors { + for _, mv := range vectors { currentIPs := mv.IPs newIPs := make([]string, 0, len(currentIPs)-1) for i, cIP := range currentIPs { @@ -178,7 +174,7 @@ func (c *client) RemoveIPs(ctx context.Context, ips ...string) error { newIPs = append(newIPs, cIP) } - err = c.db.Query(cassandra.Update(c.metaTable).Set(ipsColumn). + err = c.db.Query(cassandra.Update(c.tableName).Set(ipsColumn). Where(cassandra.Eq(uuidColumn)).ToCql()). BindMap(map[string]interface{}{ uuidColumn: mv.UUID, diff --git a/pkg/manager/backup/cassandra/service/cassandra_test.go b/pkg/manager/backup/cassandra/service/cassandra_test.go index 92f6804691..72e78aae32 100644 --- a/pkg/manager/backup/cassandra/service/cassandra_test.go +++ b/pkg/manager/backup/cassandra/service/cassandra_test.go @@ -28,6 +28,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -80,8 +81,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -97,18 +100,18 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Connect(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } type fields struct { db cassandra.Cassandra - metaTable string + tableName string } type want struct { err error @@ -138,7 +141,7 @@ func Test_client_Connect(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -155,7 +158,7 @@ func Test_client_Connect(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -164,8 +167,10 @@ func Test_client_Connect(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -178,25 +183,25 @@ func Test_client_Connect(t *testing.T) { } c := &client{ db: test.fields.db, - metaTable: test.fields.metaTable, + tableName: test.fields.tableName, } err := c.Connect(test.args.ctx) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Close(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } type fields struct { db cassandra.Cassandra - metaTable string + tableName string } type want struct { err error @@ -226,7 +231,7 @@ func Test_client_Close(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -243,7 +248,7 @@ func Test_client_Close(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -252,8 +257,10 @@ func Test_client_Close(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -266,29 +273,29 @@ func Test_client_Close(t *testing.T) { } c := &client{ db: test.fields.db, - metaTable: test.fields.metaTable, + tableName: test.fields.tableName, } err := c.Close(test.args.ctx) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_client_getMetaVector(t *testing.T) { +func Test_client_getVector(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string } type fields struct { db cassandra.Cassandra - metaTable string + tableName string } type want struct { - want *model.MetaVector + want *model.Vector err error } type test struct { @@ -296,11 +303,11 @@ func Test_client_getMetaVector(t *testing.T) { args args fields fields want want - checkFunc func(want, *model.MetaVector, error) error + checkFunc func(want, *model.Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *model.MetaVector, err error) error { + defaultCheckFunc := func(w want, got *model.Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -320,7 +327,7 @@ func Test_client_getMetaVector(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -338,7 +345,7 @@ func Test_client_getMetaVector(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -347,8 +354,10 @@ func Test_client_getMetaVector(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -361,29 +370,29 @@ func Test_client_getMetaVector(t *testing.T) { } c := &client{ db: test.fields.db, - metaTable: test.fields.metaTable, + tableName: test.fields.tableName, } - got, err := c.getMetaVector(test.args.ctx, test.args.uuid) + got, err := c.getVector(test.args.ctx, test.args.uuid) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_client_GetMeta(t *testing.T) { +func Test_client_GetVector(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string } type fields struct { db cassandra.Cassandra - metaTable string + tableName string } type want struct { - want *model.MetaVector + want *model.Vector err error } type test struct { @@ -391,11 +400,11 @@ func Test_client_GetMeta(t *testing.T) { args args fields fields want want - checkFunc func(want, *model.MetaVector, error) error + checkFunc func(want, *model.Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *model.MetaVector, err error) error { + defaultCheckFunc := func(w want, got *model.Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -415,7 +424,7 @@ func Test_client_GetMeta(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -433,7 +442,7 @@ func Test_client_GetMeta(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -442,8 +451,10 @@ func Test_client_GetMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -456,26 +467,26 @@ func Test_client_GetMeta(t *testing.T) { } c := &client{ db: test.fields.db, - metaTable: test.fields.metaTable, + tableName: test.fields.tableName, } - got, err := c.GetMeta(test.args.ctx, test.args.uuid) + got, err := c.GetVector(test.args.ctx, test.args.uuid) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_GetIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string } type fields struct { db cassandra.Cassandra - metaTable string + tableName string } type want struct { want []string @@ -510,7 +521,7 @@ func Test_client_GetIPs(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -528,7 +539,7 @@ func Test_client_GetIPs(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -537,8 +548,10 @@ func Test_client_GetIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -551,26 +564,26 @@ func Test_client_GetIPs(t *testing.T) { } c := &client{ db: test.fields.db, - metaTable: test.fields.metaTable, + tableName: test.fields.tableName, } got, err := c.GetIPs(test.args.ctx, test.args.uuid) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_client_SetMeta(t *testing.T) { +func Test_client_SetVector(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - meta *model.MetaVector + ctx context.Context + vec *model.Vector } type fields struct { db cassandra.Cassandra - metaTable string + tableName string } type want struct { err error @@ -597,11 +610,11 @@ func Test_client_SetMeta(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - meta: nil, + vec: nil, }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -615,11 +628,11 @@ func Test_client_SetMeta(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - meta: nil, + vec: nil, }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -628,8 +641,10 @@ func Test_client_SetMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -642,26 +657,26 @@ func Test_client_SetMeta(t *testing.T) { } c := &client{ db: test.fields.db, - metaTable: test.fields.metaTable, + tableName: test.fields.tableName, } - err := c.SetMeta(test.args.ctx, test.args.meta) + err := c.SetVector(test.args.ctx, test.args.vec) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_client_SetMetas(t *testing.T) { +func Test_client_SetVectors(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - metas []*model.MetaVector + ctx context.Context + vecs []*model.Vector } type fields struct { db cassandra.Cassandra - metaTable string + tableName string } type want struct { err error @@ -688,11 +703,11 @@ func Test_client_SetMetas(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - metas: nil, + vecs: nil, }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -706,11 +721,11 @@ func Test_client_SetMetas(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - metas: nil, + vecs: nil, }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -719,8 +734,10 @@ func Test_client_SetMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -733,26 +750,26 @@ func Test_client_SetMetas(t *testing.T) { } c := &client{ db: test.fields.db, - metaTable: test.fields.metaTable, + tableName: test.fields.tableName, } - err := c.SetMetas(test.args.ctx, test.args.metas...) + err := c.SetVectors(test.args.ctx, test.args.vecs...) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_client_DeleteMeta(t *testing.T) { +func Test_client_DeleteVector(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string } type fields struct { db cassandra.Cassandra - metaTable string + tableName string } type want struct { err error @@ -783,7 +800,7 @@ func Test_client_DeleteMeta(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -801,7 +818,7 @@ func Test_client_DeleteMeta(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -810,8 +827,10 @@ func Test_client_DeleteMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -824,26 +843,26 @@ func Test_client_DeleteMeta(t *testing.T) { } c := &client{ db: test.fields.db, - metaTable: test.fields.metaTable, + tableName: test.fields.tableName, } - err := c.DeleteMeta(test.args.ctx, test.args.uuid) + err := c.DeleteVector(test.args.ctx, test.args.uuid) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_client_DeleteMetas(t *testing.T) { +func Test_client_DeleteVectors(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuids []string } type fields struct { db cassandra.Cassandra - metaTable string + tableName string } type want struct { err error @@ -874,7 +893,7 @@ func Test_client_DeleteMetas(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -892,7 +911,7 @@ func Test_client_DeleteMetas(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -901,8 +920,10 @@ func Test_client_DeleteMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -915,19 +936,19 @@ func Test_client_DeleteMetas(t *testing.T) { } c := &client{ db: test.fields.db, - metaTable: test.fields.metaTable, + tableName: test.fields.tableName, } - err := c.DeleteMetas(test.args.ctx, test.args.uuids...) + err := c.DeleteVectors(test.args.ctx, test.args.uuids...) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_SetIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -935,7 +956,7 @@ func Test_client_SetIPs(t *testing.T) { } type fields struct { db cassandra.Cassandra - metaTable string + tableName string } type want struct { err error @@ -967,7 +988,7 @@ func Test_client_SetIPs(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -986,7 +1007,7 @@ func Test_client_SetIPs(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -995,8 +1016,10 @@ func Test_client_SetIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1009,26 +1032,26 @@ func Test_client_SetIPs(t *testing.T) { } c := &client{ db: test.fields.db, - metaTable: test.fields.metaTable, + tableName: test.fields.tableName, } err := c.SetIPs(test.args.ctx, test.args.uuid, test.args.ips...) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_RemoveIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context ips []string } type fields struct { db cassandra.Cassandra - metaTable string + tableName string } type want struct { err error @@ -1059,7 +1082,7 @@ func Test_client_RemoveIPs(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -1077,7 +1100,7 @@ func Test_client_RemoveIPs(t *testing.T) { }, fields: fields { db: nil, - metaTable: "", + tableName: "", }, want: want{}, checkFunc: defaultCheckFunc, @@ -1086,8 +1109,10 @@ func Test_client_RemoveIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1100,14 +1125,13 @@ func Test_client_RemoveIPs(t *testing.T) { } c := &client{ db: test.fields.db, - metaTable: test.fields.metaTable, + tableName: test.fields.tableName, } err := c.RemoveIPs(test.args.ctx, test.args.ips...) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/backup/cassandra/service/option.go b/pkg/manager/backup/cassandra/service/option.go index 59a8763dec..654a70b75b 100644 --- a/pkg/manager/backup/cassandra/service/option.go +++ b/pkg/manager/backup/cassandra/service/option.go @@ -23,11 +23,9 @@ import ( type Option func(*client) error -var ( - defaultOpts = []Option{ - WithMetaTable("meta_vector"), - } -) +var defaultOpts = []Option{ + WithTableName("backup_vector"), +} func WithCassandra(db cassandra.Cassandra) Option { return func(c *client) error { @@ -39,10 +37,10 @@ func WithCassandra(db cassandra.Cassandra) Option { } } -func WithMetaTable(name string) Option { +func WithTableName(name string) Option { return func(c *client) error { if name != "" { - c.metaTable = name + c.tableName = name } return nil diff --git a/pkg/manager/backup/cassandra/service/option_test.go b/pkg/manager/backup/cassandra/service/option_test.go new file mode 100644 index 0000000000..78f9012d41 --- /dev/null +++ b/pkg/manager/backup/cassandra/service/option_test.go @@ -0,0 +1,259 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package service manages the main logic of server. +package service + +import ( + "testing" + + "github.com/vdaas/vald/internal/db/nosql/cassandra" + "go.uber.org/goleak" +) + +func TestWithCassandra(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + db cassandra.Cassandra + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + db: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + db: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithCassandra(test.args.db) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithCassandra(test.args.db) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithTableName(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + name string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + name: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + name: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithTableName(test.args.name) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithTableName(test.args.name) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/manager/backup/cassandra/usecase/backupd.go b/pkg/manager/backup/cassandra/usecase/backupd.go index b1788f9ada..dedb69501b 100644 --- a/pkg/manager/backup/cassandra/usecase/backupd.go +++ b/pkg/manager/backup/cassandra/usecase/backupd.go @@ -19,7 +19,7 @@ package usecase import ( "context" - "github.com/vdaas/vald/apis/grpc/manager/backup" + "github.com/vdaas/vald/apis/grpc/v1/manager/backup" iconf "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/db/nosql/cassandra" "github.com/vdaas/vald/internal/errgroup" @@ -72,7 +72,7 @@ func New(cfg *config.Data) (r runner.Runner, err error) { c, err := service.New( service.WithCassandra(db), - service.WithMetaTable(cfg.Cassandra.MetaTable), + service.WithTableName(cfg.Cassandra.VectorBackupTable), ) if err != nil { return nil, err @@ -136,7 +136,6 @@ func New(cfg *config.Data) (r runner.Runner, err error) { }), // TODO add GraphQL handler ) - if err != nil { return nil, err } diff --git a/pkg/manager/backup/cassandra/usecase/backupd_test.go b/pkg/manager/backup/cassandra/usecase/backupd_test.go index 6c368521b0..bceb3f09f2 100644 --- a/pkg/manager/backup/cassandra/usecase/backupd_test.go +++ b/pkg/manager/backup/cassandra/usecase/backupd_test.go @@ -28,11 +28,11 @@ import ( "github.com/vdaas/vald/internal/servers/starter" "github.com/vdaas/vald/pkg/manager/backup/cassandra/config" "github.com/vdaas/vald/pkg/manager/backup/cassandra/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -85,9 +85,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -102,12 +104,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -178,9 +180,11 @@ func Test_run_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -202,12 +206,12 @@ func Test_run_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -282,9 +286,11 @@ func Test_run_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -306,12 +312,12 @@ func Test_run_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -382,9 +388,11 @@ func Test_run_PreStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -406,12 +414,12 @@ func Test_run_PreStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Stop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -482,9 +490,11 @@ func Test_run_Stop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -506,12 +516,12 @@ func Test_run_Stop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -582,9 +592,11 @@ func Test_run_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -606,7 +618,6 @@ func Test_run_PostStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/backup/mysql/config/config_test.go b/pkg/manager/backup/mysql/config/config_test.go index eba18af56a..e90809127a 100644 --- a/pkg/manager/backup/mysql/config/config_test.go +++ b/pkg/manager/backup/mysql/config/config_test.go @@ -26,6 +26,7 @@ import ( ) func TestNewConfig(t *testing.T) { + t.Parallel() type args struct { path string } @@ -78,9 +79,11 @@ func TestNewConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -95,7 +98,6 @@ func TestNewConfig(t *testing.T) { if err := test.checkFunc(test.want, gotCfg, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/backup/mysql/handler/grpc/handler.go b/pkg/manager/backup/mysql/handler/grpc/handler.go index e60df634b1..ffa742ca2c 100644 --- a/pkg/manager/backup/mysql/handler/grpc/handler.go +++ b/pkg/manager/backup/mysql/handler/grpc/handler.go @@ -21,8 +21,8 @@ import ( "context" "fmt" - "github.com/vdaas/vald/apis/grpc/manager/backup" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/backup" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" @@ -47,7 +47,7 @@ func New(opts ...Option) Server { return s } -func (s *server) GetVector(ctx context.Context, req *payload.Backup_GetVector_Request) (res *payload.Backup_Compressed_MetaVector, err error) { +func (s *server) GetVector(ctx context.Context, req *payload.Backup_GetVector_Request) (res *payload.Backup_Compressed_Vector, err error) { ctx, span := trace.StartSpan(ctx, "vald/manager-backup-mysql.GetVector") defer func() { if span != nil { @@ -55,7 +55,7 @@ func (s *server) GetVector(ctx context.Context, req *payload.Backup_GetVector_Re } }() uuid := req.GetUuid() - meta, err := s.mysql.GetMeta(ctx, uuid) + vector, err := s.mysql.GetVector(ctx, uuid) if err != nil { if errors.IsErrMySQLNotFound(err) { log.Warnf("[GetVector]\tnot found\t%v\t%s", req.Uuid, err.Error()) @@ -71,7 +71,7 @@ func (s *server) GetVector(ctx context.Context, req *payload.Backup_GetVector_Re return nil, status.WrapWithUnknown(fmt.Sprintf("GetVector API mysql uuid %s's unknown error occurred", uuid), err, info.Get()) } - return toBackupMetaVector(meta) + return toBackupVector(vector) } func (s *server) Locations(ctx context.Context, req *payload.Backup_Locations_Request) (res *payload.Info_IPs, err error) { @@ -96,63 +96,63 @@ func (s *server) Locations(ctx context.Context, req *payload.Backup_Locations_Re }, nil } -func (s *server) Register(ctx context.Context, meta *payload.Backup_Compressed_MetaVector) (res *payload.Empty, err error) { +func (s *server) Register(ctx context.Context, vector *payload.Backup_Compressed_Vector) (res *payload.Empty, err error) { ctx, span := trace.StartSpan(ctx, "vald/manager-backup-mysql.Register") defer func() { if span != nil { span.End() } }() - uuid := meta.GetUuid() - m, err := toModelMetaVector(meta) + uuid := vector.GetUuid() + m, err := toModelVector(vector) if err != nil { log.Errorf("[Register]\tunknown error\t%+v", err) if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) } - return nil, status.WrapWithInternal(fmt.Sprintf("Register API uuid %s's could not convert vector to meta_vector", uuid), err, info.Get()) + return nil, status.WrapWithInternal(fmt.Sprintf("Register API uuid %s's could not convert vector to backup format", uuid), err, info.Get()) } - err = s.mysql.SetMeta(ctx, m) + err = s.mysql.SetVector(ctx, m) if err != nil { log.Errorf("[Register]\tunknown error\t%+v", err) if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) } - return nil, status.WrapWithInternal(fmt.Sprintf("Register API uuid %s's failed to backup metadata", uuid), err, info.Get()) + return nil, status.WrapWithInternal(fmt.Sprintf("Register API uuid %s's failed to backup vector", uuid), err, info.Get()) } return new(payload.Empty), nil } -func (s *server) RegisterMulti(ctx context.Context, metas *payload.Backup_Compressed_MetaVectors) (res *payload.Empty, err error) { +func (s *server) RegisterMulti(ctx context.Context, vectors *payload.Backup_Compressed_Vectors) (res *payload.Empty, err error) { ctx, span := trace.StartSpan(ctx, "vald/manager-backup-mysql.RegisterMulti") defer func() { if span != nil { span.End() } }() - ms := make([]*model.MetaVector, 0, len(metas.GetVectors())) - for _, meta := range metas.Vectors { - var m *model.MetaVector - m, err = toModelMetaVector(meta) + ms := make([]*model.Vector, 0, len(vectors.GetVectors())) + for _, vector := range vectors.Vectors { + var m *model.Vector + m, err = toModelVector(vector) if err != nil { log.Errorf("[RegisterMulti]\tunknown error\t%+v", err) if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) } - return nil, status.WrapWithInternal(fmt.Sprintf("RegisterMulti API uuids %s's could not convert vector to meta_vector", meta.GetUuid()), err, info.Get()) + return nil, status.WrapWithInternal(fmt.Sprintf("RegisterMulti API uuids %s's could not convert vector to backup format", vector.GetUuid()), err, info.Get()) } ms = append(ms, m) } - err = s.mysql.SetMetas(ctx, ms...) + err = s.mysql.SetVectors(ctx, ms...) if err != nil { log.Errorf("[RegisterMulti]\tunknown error\t%+v", err) if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) } - return nil, status.WrapWithInternal(fmt.Sprintf("RegisterMulti API failed to backup metadatas %#v", ms), err, info.Get()) + return nil, status.WrapWithInternal(fmt.Sprintf("RegisterMulti API failed to backup vectors %#v", ms), err, info.Get()) } return new(payload.Empty), nil @@ -166,13 +166,13 @@ func (s *server) Remove(ctx context.Context, req *payload.Backup_Remove_Request) } }() uuid := req.GetUuid() - err = s.mysql.DeleteMeta(ctx, uuid) + err = s.mysql.DeleteVector(ctx, uuid) if err != nil { log.Errorf("[Remove]\tunknown error\t%+v", err) if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) } - return nil, status.WrapWithInternal(fmt.Sprintf("Remove API uuid %s's could not DeleteMeta", uuid), err, info.Get()) + return nil, status.WrapWithInternal(fmt.Sprintf("Remove API uuid %s's could not DeleteVector", uuid), err, info.Get()) } return new(payload.Empty), nil @@ -186,13 +186,13 @@ func (s *server) RemoveMulti(ctx context.Context, req *payload.Backup_Remove_Req } }() uuids := req.GetUuids() - err = s.mysql.DeleteMetas(ctx, uuids...) + err = s.mysql.DeleteVectors(ctx, uuids...) if err != nil { log.Errorf("[RemoveMulti]\tunknown error\t%+v", err) if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) } - return nil, status.WrapWithInternal(fmt.Sprintf("RemoveMulti API uuids %#v could not DeleteMetas", uuids), err, info.Get()) + return nil, status.WrapWithInternal(fmt.Sprintf("RemoveMulti API uuids %#v could not DeleteVectors", uuids), err, info.Get()) } return new(payload.Empty), nil @@ -238,20 +238,18 @@ func (s *server) RemoveIPs(ctx context.Context, req *payload.Backup_IP_Remove_Re return new(payload.Empty), nil } -func toBackupMetaVector(meta *model.MetaVector) (res *payload.Backup_Compressed_MetaVector, err error) { - return &payload.Backup_Compressed_MetaVector{ - Uuid: meta.GetUUID(), - Meta: meta.GetMeta(), - Vector: meta.GetVector(), - Ips: meta.GetIPs(), +func toBackupVector(vector *model.Vector) (res *payload.Backup_Compressed_Vector, err error) { + return &payload.Backup_Compressed_Vector{ + Uuid: vector.GetUUID(), + Vector: vector.GetVector(), + Ips: vector.GetIPs(), }, nil } -func toModelMetaVector(obj *payload.Backup_Compressed_MetaVector) (res *model.MetaVector, err error) { - return &model.MetaVector{ +func toModelVector(obj *payload.Backup_Compressed_Vector) (res *model.Vector, err error) { + return &model.Vector{ UUID: obj.Uuid, Vector: obj.Vector, - Meta: obj.Meta, IPs: obj.Ips, }, nil } diff --git a/pkg/manager/backup/mysql/handler/grpc/handler_test.go b/pkg/manager/backup/mysql/handler/grpc/handler_test.go index d8e1fb9284..db4f2bd36a 100644 --- a/pkg/manager/backup/mysql/handler/grpc/handler_test.go +++ b/pkg/manager/backup/mysql/handler/grpc/handler_test.go @@ -22,15 +22,15 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/pkg/manager/backup/mysql/model" "github.com/vdaas/vald/pkg/manager/backup/mysql/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -79,9 +79,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -96,12 +98,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_GetVector(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_GetVector_Request @@ -110,7 +112,7 @@ func Test_server_GetVector(t *testing.T) { mysql service.MySQL } type want struct { - wantRes *payload.Backup_Compressed_MetaVector + wantRes *payload.Backup_Compressed_Vector err error } type test struct { @@ -118,11 +120,11 @@ func Test_server_GetVector(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Backup_Compressed_MetaVector, error) error + checkFunc func(want, *payload.Backup_Compressed_Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Backup_Compressed_MetaVector, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Backup_Compressed_Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -167,9 +169,11 @@ func Test_server_GetVector(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -187,12 +191,12 @@ func Test_server_GetVector(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Locations(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_Locations_Request @@ -258,9 +262,11 @@ func Test_server_Locations(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -278,15 +284,15 @@ func Test_server_Locations(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Register(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - meta *payload.Backup_Compressed_MetaVector + ctx context.Context + vector *payload.Backup_Compressed_Vector } type fields struct { mysql service.MySQL @@ -320,7 +326,7 @@ func Test_server_Register(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - meta: nil, + vector: nil, }, fields: fields { mysql: nil, @@ -337,7 +343,7 @@ func Test_server_Register(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - meta: nil, + vector: nil, }, fields: fields { mysql: nil, @@ -349,9 +355,11 @@ func Test_server_Register(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -365,19 +373,19 @@ func Test_server_Register(t *testing.T) { mysql: test.fields.mysql, } - gotRes, err := s.Register(test.args.ctx, test.args.meta) + gotRes, err := s.Register(test.args.ctx, test.args.vector) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_RegisterMulti(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - metas *payload.Backup_Compressed_MetaVectors + ctx context.Context + vectors *payload.Backup_Compressed_Vectors } type fields struct { mysql service.MySQL @@ -411,7 +419,7 @@ func Test_server_RegisterMulti(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - metas: nil, + vectors: nil, }, fields: fields { mysql: nil, @@ -428,7 +436,7 @@ func Test_server_RegisterMulti(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - metas: nil, + vectors: nil, }, fields: fields { mysql: nil, @@ -440,9 +448,11 @@ func Test_server_RegisterMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -456,16 +466,16 @@ func Test_server_RegisterMulti(t *testing.T) { mysql: test.fields.mysql, } - gotRes, err := s.RegisterMulti(test.args.ctx, test.args.metas) + gotRes, err := s.RegisterMulti(test.args.ctx, test.args.vectors) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Remove(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_Remove_Request @@ -531,9 +541,11 @@ func Test_server_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -551,12 +563,12 @@ func Test_server_Remove(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_RemoveMulti(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_Remove_RequestMulti @@ -622,9 +634,11 @@ func Test_server_RemoveMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -642,12 +656,12 @@ func Test_server_RemoveMulti(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_RegisterIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_IP_Register_Request @@ -713,9 +727,11 @@ func Test_server_RegisterIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -733,12 +749,12 @@ func Test_server_RegisterIPs(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_RemoveIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_IP_Remove_Request @@ -804,9 +820,11 @@ func Test_server_RemoveIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -824,28 +842,28 @@ func Test_server_RemoveIPs(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_toBackupMetaVector(t *testing.T) { +func Test_toBackupVector(t *testing.T) { + t.Parallel() type args struct { - meta *model.MetaVector + vector *model.Vector } type want struct { - wantRes *payload.Backup_Compressed_MetaVector + wantRes *payload.Backup_Compressed_Vector err error } type test struct { name string args args want want - checkFunc func(want, *payload.Backup_Compressed_MetaVector, error) error + checkFunc func(want, *payload.Backup_Compressed_Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Backup_Compressed_MetaVector, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Backup_Compressed_Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -860,7 +878,7 @@ func Test_toBackupMetaVector(t *testing.T) { { name: "test_case_1", args: args { - meta: nil, + vector: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -873,7 +891,7 @@ func Test_toBackupMetaVector(t *testing.T) { return test { name: "test_case_2", args: args { - meta: nil, + vector: nil, }, want: want{}, checkFunc: defaultCheckFunc, @@ -882,9 +900,11 @@ func Test_toBackupMetaVector(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -895,32 +915,32 @@ func Test_toBackupMetaVector(t *testing.T) { test.checkFunc = defaultCheckFunc } - gotRes, err := toBackupMetaVector(test.args.meta) + gotRes, err := toBackupVector(test.args.vector) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_toModelMetaVector(t *testing.T) { +func Test_toModelVector(t *testing.T) { + t.Parallel() type args struct { - obj *payload.Backup_Compressed_MetaVector + obj *payload.Backup_Compressed_Vector } type want struct { - wantRes *model.MetaVector + wantRes *model.Vector err error } type test struct { name string args args want want - checkFunc func(want, *model.MetaVector, error) error + checkFunc func(want, *model.Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *model.MetaVector, err error) error { + defaultCheckFunc := func(w want, gotRes *model.Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -957,9 +977,11 @@ func Test_toModelMetaVector(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -970,11 +992,10 @@ func Test_toModelMetaVector(t *testing.T) { test.checkFunc = defaultCheckFunc } - gotRes, err := toModelMetaVector(test.args.obj) + gotRes, err := toModelVector(test.args.obj) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/backup/mysql/handler/grpc/option.go b/pkg/manager/backup/mysql/handler/grpc/option.go index 3e44863555..4976d59ef2 100644 --- a/pkg/manager/backup/mysql/handler/grpc/option.go +++ b/pkg/manager/backup/mysql/handler/grpc/option.go @@ -21,9 +21,7 @@ import "github.com/vdaas/vald/pkg/manager/backup/mysql/service" type Option func(*server) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithMySQL(m service.MySQL) Option { return func(s *server) { diff --git a/pkg/manager/backup/mysql/handler/grpc/option_test.go b/pkg/manager/backup/mysql/handler/grpc/option_test.go index b312e0e7c4..b4fdd97240 100644 --- a/pkg/manager/backup/mysql/handler/grpc/option_test.go +++ b/pkg/manager/backup/mysql/handler/grpc/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/pkg/manager/backup/mysql/service" - "go.uber.org/goleak" ) func TestWithMySQL(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { m service.MySQL @@ -63,7 +64,7 @@ func TestWithMySQL(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithMySQL(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithMySQL(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithMySQL(t *testing.T) { got := WithMySQL(test.args.m) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/backup/mysql/handler/rest/handler.go b/pkg/manager/backup/mysql/handler/rest/handler.go index c1ba0ecfe2..a10c4ec099 100644 --- a/pkg/manager/backup/mysql/handler/rest/handler.go +++ b/pkg/manager/backup/mysql/handler/rest/handler.go @@ -20,8 +20,8 @@ package rest import ( "net/http" - "github.com/vdaas/vald/apis/grpc/manager/backup" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/backup" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/net/http/json" ) @@ -64,14 +64,14 @@ func (h *handler) Locations(w http.ResponseWriter, r *http.Request) (int, error) } func (h *handler) Register(w http.ResponseWriter, r *http.Request) (int, error) { - var req *payload.Backup_Compressed_MetaVector + var req *payload.Backup_Compressed_Vector return json.Handler(w, r, &req, func() (interface{}, error) { return h.backup.Register(r.Context(), req) }) } func (h *handler) RegisterMulti(w http.ResponseWriter, r *http.Request) (int, error) { - var req *payload.Backup_Compressed_MetaVectors + var req *payload.Backup_Compressed_Vectors return json.Handler(w, r, &req, func() (interface{}, error) { return h.backup.RegisterMulti(r.Context(), req) }) diff --git a/pkg/manager/backup/mysql/handler/rest/handler_test.go b/pkg/manager/backup/mysql/handler/rest/handler_test.go index 37d902b265..2faaa990c8 100644 --- a/pkg/manager/backup/mysql/handler/rest/handler_test.go +++ b/pkg/manager/backup/mysql/handler/rest/handler_test.go @@ -22,13 +22,13 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/manager/backup" + "github.com/vdaas/vald/apis/grpc/v1/manager/backup" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -77,9 +77,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -94,12 +96,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_GetVector(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -165,9 +167,11 @@ func Test_handler_GetVector(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -185,12 +189,12 @@ func Test_handler_GetVector(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Locations(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -256,9 +260,11 @@ func Test_handler_Locations(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -276,12 +282,12 @@ func Test_handler_Locations(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Register(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -347,9 +353,11 @@ func Test_handler_Register(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,12 +375,12 @@ func Test_handler_Register(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_RegisterMulti(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -438,9 +446,11 @@ func Test_handler_RegisterMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -458,12 +468,12 @@ func Test_handler_RegisterMulti(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Remove(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -529,9 +539,11 @@ func Test_handler_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -549,12 +561,12 @@ func Test_handler_Remove(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_RemoveMulti(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -620,9 +632,11 @@ func Test_handler_RemoveMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -640,12 +654,12 @@ func Test_handler_RemoveMulti(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_RegisterIPs(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -711,9 +725,11 @@ func Test_handler_RegisterIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -731,12 +747,12 @@ func Test_handler_RegisterIPs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_RemoveIPs(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -802,9 +818,11 @@ func Test_handler_RemoveIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -822,7 +840,6 @@ func Test_handler_RemoveIPs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/backup/mysql/handler/rest/option.go b/pkg/manager/backup/mysql/handler/rest/option.go index 7aff12c199..e1d09c440e 100644 --- a/pkg/manager/backup/mysql/handler/rest/option.go +++ b/pkg/manager/backup/mysql/handler/rest/option.go @@ -17,13 +17,11 @@ // Package rest provides rest api logic package rest -import "github.com/vdaas/vald/apis/grpc/manager/backup" +import "github.com/vdaas/vald/apis/grpc/v1/manager/backup" type Option func(*handler) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithBackup(b backup.BackupServer) Option { return func(h *handler) { diff --git a/pkg/manager/backup/mysql/handler/rest/option_test.go b/pkg/manager/backup/mysql/handler/rest/option_test.go index 9535622a92..b62608c5fa 100644 --- a/pkg/manager/backup/mysql/handler/rest/option_test.go +++ b/pkg/manager/backup/mysql/handler/rest/option_test.go @@ -20,12 +20,13 @@ package rest import ( "testing" - "github.com/vdaas/vald/apis/grpc/manager/backup" - + "github.com/vdaas/vald/apis/grpc/v1/manager/backup" "go.uber.org/goleak" ) func TestWithBackup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { b backup.BackupServer @@ -63,7 +64,7 @@ func TestWithBackup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithBackup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithBackup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithBackup(t *testing.T) { got := WithBackup(test.args.b) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/backup/mysql/model/model.go b/pkg/manager/backup/mysql/model/model.go index 9526895021..15e022aaac 100644 --- a/pkg/manager/backup/mysql/model/model.go +++ b/pkg/manager/backup/mysql/model/model.go @@ -17,14 +17,12 @@ // Package grpc provides grpc server logic package model -type MetaVector struct { +type Vector struct { UUID string Vector []byte - Meta string IPs []string } -func (m *MetaVector) GetUUID() string { return m.UUID } -func (m *MetaVector) GetVector() []byte { return m.Vector } -func (m *MetaVector) GetMeta() string { return m.Meta } -func (m *MetaVector) GetIPs() []string { return m.IPs } +func (m *Vector) GetUUID() string { return m.UUID } +func (m *Vector) GetVector() []byte { return m.Vector } +func (m *Vector) GetIPs() []string { return m.IPs } diff --git a/pkg/manager/backup/mysql/model/model_test.go b/pkg/manager/backup/mysql/model/model_test.go index c710f3362a..fafb05cfb4 100644 --- a/pkg/manager/backup/mysql/model/model_test.go +++ b/pkg/manager/backup/mysql/model/model_test.go @@ -22,13 +22,14 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) -func TestMetaVector_GetUUID(t *testing.T) { +func TestVector_GetUUID(t *testing.T) { + t.Parallel() type fields struct { UUID string Vector []byte - Meta string IPs []string } type want struct { @@ -56,7 +57,6 @@ func TestMetaVector_GetUUID(t *testing.T) { fields: fields { UUID: "", Vector: nil, - Meta: "", IPs: nil, }, want: want{}, @@ -72,7 +72,6 @@ func TestMetaVector_GetUUID(t *testing.T) { fields: fields { UUID: "", Vector: nil, - Meta: "", IPs: nil, }, want: want{}, @@ -82,8 +81,11 @@ func TestMetaVector_GetUUID(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -93,10 +95,9 @@ func TestMetaVector_GetUUID(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - m := &MetaVector{ + m := &Vector{ UUID: test.fields.UUID, Vector: test.fields.Vector, - Meta: test.fields.Meta, IPs: test.fields.IPs, } @@ -104,16 +105,15 @@ func TestMetaVector_GetUUID(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } -func TestMetaVector_GetVector(t *testing.T) { +func TestVector_GetVector(t *testing.T) { + t.Parallel() type fields struct { UUID string Vector []byte - Meta string IPs []string } type want struct { @@ -141,7 +141,6 @@ func TestMetaVector_GetVector(t *testing.T) { fields: fields { UUID: "", Vector: nil, - Meta: "", IPs: nil, }, want: want{}, @@ -157,7 +156,6 @@ func TestMetaVector_GetVector(t *testing.T) { fields: fields { UUID: "", Vector: nil, - Meta: "", IPs: nil, }, want: want{}, @@ -167,8 +165,11 @@ func TestMetaVector_GetVector(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -178,10 +179,9 @@ func TestMetaVector_GetVector(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - m := &MetaVector{ + m := &Vector{ UUID: test.fields.UUID, Vector: test.fields.Vector, - Meta: test.fields.Meta, IPs: test.fields.IPs, } @@ -189,101 +189,15 @@ func TestMetaVector_GetVector(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - - }) - } -} - -func TestMetaVector_GetMeta(t *testing.T) { - type fields struct { - UUID string - Vector []byte - Meta string - IPs []string - } - type want struct { - want string - } - type test struct { - name string - fields fields - want want - checkFunc func(want, string) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got string) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - fields: fields { - UUID: "", - Vector: nil, - Meta: "", - IPs: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - fields: fields { - UUID: "", - Vector: nil, - Meta: "", - IPs: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - m := &MetaVector{ - UUID: test.fields.UUID, - Vector: test.fields.Vector, - Meta: test.fields.Meta, - IPs: test.fields.IPs, - } - - got := m.GetMeta() - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) } } -func TestMetaVector_GetIPs(t *testing.T) { +func TestVector_GetIPs(t *testing.T) { + t.Parallel() type fields struct { UUID string Vector []byte - Meta string IPs []string } type want struct { @@ -311,7 +225,6 @@ func TestMetaVector_GetIPs(t *testing.T) { fields: fields { UUID: "", Vector: nil, - Meta: "", IPs: nil, }, want: want{}, @@ -327,7 +240,6 @@ func TestMetaVector_GetIPs(t *testing.T) { fields: fields { UUID: "", Vector: nil, - Meta: "", IPs: nil, }, want: want{}, @@ -337,8 +249,11 @@ func TestMetaVector_GetIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -348,10 +263,9 @@ func TestMetaVector_GetIPs(t *testing.T) { if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - m := &MetaVector{ + m := &Vector{ UUID: test.fields.UUID, Vector: test.fields.Vector, - Meta: test.fields.Meta, IPs: test.fields.IPs, } @@ -359,7 +273,6 @@ func TestMetaVector_GetIPs(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/backup/mysql/router/option.go b/pkg/manager/backup/mysql/router/option.go index b6780cf1f7..9c107b73ad 100644 --- a/pkg/manager/backup/mysql/router/option.go +++ b/pkg/manager/backup/mysql/router/option.go @@ -24,11 +24,9 @@ import ( type Option func(*router) -var ( - defaultOpts = []Option{ - WithTimeout("3s"), - } -) +var defaultOpts = []Option{ + WithTimeout("3s"), +} func WithHandler(h rest.Handler) Option { return func(r *router) { diff --git a/pkg/manager/backup/mysql/router/option_test.go b/pkg/manager/backup/mysql/router/option_test.go index a03aad88fc..b9c65d69c9 100644 --- a/pkg/manager/backup/mysql/router/option_test.go +++ b/pkg/manager/backup/mysql/router/option_test.go @@ -22,11 +22,12 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/pkg/manager/backup/mysql/handler/rest" - "go.uber.org/goleak" ) func TestWithHandler(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { h rest.Handler @@ -64,7 +65,7 @@ func TestWithHandler(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -100,9 +101,11 @@ func TestWithHandler(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -123,7 +126,7 @@ func TestWithHandler(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -131,7 +134,7 @@ func TestWithHandler(t *testing.T) { got := WithHandler(test.args.h) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -140,6 +143,8 @@ func TestWithHandler(t *testing.T) { } func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { timeout string @@ -177,7 +182,7 @@ func TestWithTimeout(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -213,9 +218,11 @@ func TestWithTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -236,7 +243,7 @@ func TestWithTimeout(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -244,7 +251,7 @@ func TestWithTimeout(t *testing.T) { got := WithTimeout(test.args.timeout) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -253,6 +260,8 @@ func TestWithTimeout(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -290,7 +299,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -326,9 +335,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -349,7 +360,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -357,7 +368,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/backup/mysql/router/router.go b/pkg/manager/backup/mysql/router/router.go index c7a0698635..a69a6e270a 100644 --- a/pkg/manager/backup/mysql/router/router.go +++ b/pkg/manager/backup/mysql/router/router.go @@ -32,7 +32,7 @@ type router struct { timeout string } -// New returns REST route&method information from handler interface +// New returns REST route&method information from handler interface. func New(opts ...Option) http.Handler { r := new(router) @@ -48,14 +48,15 @@ func New(opts ...Option) http.Handler { middleware.WithTimeout(r.timeout), middleware.WithErrorGroup(r.eg), )), - routing.WithRoutes([]routing.Route{{ - "GetVector", - []string{ - http.MethodGet, + routing.WithRoutes([]routing.Route{ + { + "GetVector", + []string{ + http.MethodGet, + }, + "/vector/{uuid}", + h.GetVector, }, - "/vector/{uuid}", - h.GetVector, - }, { "Locations", []string{ diff --git a/pkg/manager/backup/mysql/router/router_test.go b/pkg/manager/backup/mysql/router/router_test.go index 25eaec0bef..97248787ae 100644 --- a/pkg/manager/backup/mysql/router/router_test.go +++ b/pkg/manager/backup/mysql/router/router_test.go @@ -23,9 +23,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -74,8 +76,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -90,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/backup/mysql/service/mysql.go b/pkg/manager/backup/mysql/service/mysql.go index 7df54b3b03..22daa80027 100644 --- a/pkg/manager/backup/mysql/service/mysql.go +++ b/pkg/manager/backup/mysql/service/mysql.go @@ -28,12 +28,12 @@ import ( type MySQL interface { Connect(ctx context.Context) error Close(ctx context.Context) error - GetMeta(ctx context.Context, uuid string) (*model.MetaVector, error) + GetVector(ctx context.Context, uuid string) (*model.Vector, error) GetIPs(ctx context.Context, uuid string) ([]string, error) - SetMeta(ctx context.Context, meta *model.MetaVector) error - SetMetas(ctx context.Context, metas ...*model.MetaVector) error - DeleteMeta(ctx context.Context, uuid string) error - DeleteMetas(ctx context.Context, uuids ...string) error + SetVector(ctx context.Context, vector *model.Vector) error + SetVectors(ctx context.Context, vectors ...*model.Vector) error + DeleteVector(ctx context.Context, uuid string) error + DeleteVectors(ctx context.Context, uuids ...string) error SetIPs(ctx context.Context, uuid string, ips ...string) error RemoveIPs(ctx context.Context, ips ...string) error } @@ -61,16 +61,15 @@ func (c *client) Close(ctx context.Context) error { return c.db.Close(ctx) } -func (c *client) GetMeta(ctx context.Context, uuid string) (*model.MetaVector, error) { - res, err := c.db.GetMeta(ctx, uuid) +func (c *client) GetVector(ctx context.Context, uuid string) (*model.Vector, error) { + res, err := c.db.GetVector(ctx, uuid) if err != nil { return nil, err } - return &model.MetaVector{ + return &model.Vector{ UUID: res.GetUUID(), Vector: res.GetVector(), - Meta: res.GetMeta(), IPs: res.GetIPs(), }, err } @@ -79,25 +78,25 @@ func (c *client) GetIPs(ctx context.Context, uuid string) ([]string, error) { return c.db.GetIPs(ctx, uuid) } -func (c *client) SetMeta(ctx context.Context, meta *model.MetaVector) error { - return c.db.SetMeta(ctx, meta) +func (c *client) SetVector(ctx context.Context, vector *model.Vector) error { + return c.db.SetVector(ctx, vector) } -func (c *client) SetMetas(ctx context.Context, metas ...*model.MetaVector) error { - ms := make([]mysql.MetaVector, 0, len(metas)) - for _, meta := range metas { - m := meta +func (c *client) SetVectors(ctx context.Context, vectors ...*model.Vector) error { + ms := make([]mysql.Vector, 0, len(vectors)) + for _, vector := range vectors { + m := vector ms = append(ms, m) } - return c.db.SetMetas(ctx, ms...) + return c.db.SetVectors(ctx, ms...) } -func (c *client) DeleteMeta(ctx context.Context, uuid string) error { - return c.db.DeleteMeta(ctx, uuid) +func (c *client) DeleteVector(ctx context.Context, uuid string) error { + return c.db.DeleteVector(ctx, uuid) } -func (c *client) DeleteMetas(ctx context.Context, uuids ...string) error { - return c.db.DeleteMetas(ctx, uuids...) +func (c *client) DeleteVectors(ctx context.Context, uuids ...string) error { + return c.db.DeleteVectors(ctx, uuids...) } func (c *client) SetIPs(ctx context.Context, uuid string, ips ...string) error { diff --git a/pkg/manager/backup/mysql/service/mysql_test.go b/pkg/manager/backup/mysql/service/mysql_test.go index 7aff23e687..1f9b4b7efd 100644 --- a/pkg/manager/backup/mysql/service/mysql_test.go +++ b/pkg/manager/backup/mysql/service/mysql_test.go @@ -28,6 +28,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -80,8 +81,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -97,12 +100,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotMs, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Connect(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -161,8 +164,10 @@ func Test_client_Connect(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -181,12 +186,12 @@ func Test_client_Connect(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Close(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -245,8 +250,10 @@ func Test_client_Close(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -265,12 +272,12 @@ func Test_client_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_client_GetMeta(t *testing.T) { +func Test_client_GetVector(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -279,7 +286,7 @@ func Test_client_GetMeta(t *testing.T) { db mysql.MySQL } type want struct { - want *model.MetaVector + want *model.Vector err error } type test struct { @@ -287,11 +294,11 @@ func Test_client_GetMeta(t *testing.T) { args args fields fields want want - checkFunc func(want, *model.MetaVector, error) error + checkFunc func(want, *model.Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got *model.MetaVector, err error) error { + defaultCheckFunc := func(w want, got *model.Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -336,8 +343,10 @@ func Test_client_GetMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -352,16 +361,16 @@ func Test_client_GetMeta(t *testing.T) { db: test.fields.db, } - got, err := c.GetMeta(test.args.ctx, test.args.uuid) + got, err := c.GetVector(test.args.ctx, test.args.uuid) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_GetIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -427,8 +436,10 @@ func Test_client_GetIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -447,15 +458,15 @@ func Test_client_GetIPs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_client_SetMeta(t *testing.T) { +func Test_client_SetVector(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - meta *model.MetaVector + ctx context.Context + vector *model.Vector } type fields struct { db mysql.MySQL @@ -485,7 +496,7 @@ func Test_client_SetMeta(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - meta: nil, + vector: nil, }, fields: fields { db: nil, @@ -502,7 +513,7 @@ func Test_client_SetMeta(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - meta: nil, + vector: nil, }, fields: fields { db: nil, @@ -514,8 +525,10 @@ func Test_client_SetMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -530,19 +543,19 @@ func Test_client_SetMeta(t *testing.T) { db: test.fields.db, } - err := c.SetMeta(test.args.ctx, test.args.meta) + err := c.SetVector(test.args.ctx, test.args.vector) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_client_SetMetas(t *testing.T) { +func Test_client_SetVectors(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - metas []*model.MetaVector + ctx context.Context + vectors []*model.Vector } type fields struct { db mysql.MySQL @@ -572,7 +585,7 @@ func Test_client_SetMetas(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - metas: nil, + vectors: nil, }, fields: fields { db: nil, @@ -589,7 +602,7 @@ func Test_client_SetMetas(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - metas: nil, + vectors: nil, }, fields: fields { db: nil, @@ -601,8 +614,10 @@ func Test_client_SetMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -617,16 +632,16 @@ func Test_client_SetMetas(t *testing.T) { db: test.fields.db, } - err := c.SetMetas(test.args.ctx, test.args.metas...) + err := c.SetVectors(test.args.ctx, test.args.vectors...) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_client_DeleteMeta(t *testing.T) { +func Test_client_DeleteVector(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -688,8 +703,10 @@ func Test_client_DeleteMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -704,16 +721,16 @@ func Test_client_DeleteMeta(t *testing.T) { db: test.fields.db, } - err := c.DeleteMeta(test.args.ctx, test.args.uuid) + err := c.DeleteVector(test.args.ctx, test.args.uuid) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } -func Test_client_DeleteMetas(t *testing.T) { +func Test_client_DeleteVectors(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuids []string @@ -775,8 +792,10 @@ func Test_client_DeleteMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -791,16 +810,16 @@ func Test_client_DeleteMetas(t *testing.T) { db: test.fields.db, } - err := c.DeleteMetas(test.args.ctx, test.args.uuids...) + err := c.DeleteVectors(test.args.ctx, test.args.uuids...) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_SetIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -865,8 +884,10 @@ func Test_client_SetIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -885,12 +906,12 @@ func Test_client_SetIPs(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_RemoveIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context ips []string @@ -952,8 +973,10 @@ func Test_client_RemoveIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -972,7 +995,6 @@ func Test_client_RemoveIPs(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/backup/mysql/service/option.go b/pkg/manager/backup/mysql/service/option.go index e87c9df9ec..7fac4116dc 100644 --- a/pkg/manager/backup/mysql/service/option.go +++ b/pkg/manager/backup/mysql/service/option.go @@ -23,9 +23,7 @@ import ( type Option func(*client) error -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithMySQLClient(m mysql.MySQL) Option { return func(c *client) error { diff --git a/pkg/manager/backup/mysql/service/option_test.go b/pkg/manager/backup/mysql/service/option_test.go new file mode 100644 index 0000000000..ed5132131d --- /dev/null +++ b/pkg/manager/backup/mysql/service/option_test.go @@ -0,0 +1,142 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package service manages the main logic of server. +package service + +import ( + "testing" + + "github.com/vdaas/vald/internal/db/rdb/mysql" + "go.uber.org/goleak" +) + +func TestWithMySQLClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + m mysql.MySQL + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + m: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + m: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithMySQLClient(test.args.m) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithMySQLClient(test.args.m) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/manager/backup/mysql/usecase/backupd.go b/pkg/manager/backup/mysql/usecase/backupd.go index f6a10fb1d7..73f241881f 100644 --- a/pkg/manager/backup/mysql/usecase/backupd.go +++ b/pkg/manager/backup/mysql/usecase/backupd.go @@ -19,7 +19,7 @@ package usecase import ( "context" - "github.com/vdaas/vald/apis/grpc/manager/backup" + "github.com/vdaas/vald/apis/grpc/v1/manager/backup" iconf "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/db/rdb/mysql" "github.com/vdaas/vald/internal/errgroup" @@ -131,7 +131,6 @@ func New(cfg *config.Data) (r runner.Runner, err error) { }), // TODO add GraphQL handler ) - if err != nil { return nil, err } diff --git a/pkg/manager/backup/mysql/usecase/backupd_test.go b/pkg/manager/backup/mysql/usecase/backupd_test.go index a27434769b..f369283d3b 100644 --- a/pkg/manager/backup/mysql/usecase/backupd_test.go +++ b/pkg/manager/backup/mysql/usecase/backupd_test.go @@ -28,11 +28,11 @@ import ( "github.com/vdaas/vald/internal/servers/starter" "github.com/vdaas/vald/pkg/manager/backup/mysql/config" "github.com/vdaas/vald/pkg/manager/backup/mysql/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -85,9 +85,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -102,12 +104,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -178,9 +180,11 @@ func Test_run_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -202,12 +206,12 @@ func Test_run_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -282,9 +286,11 @@ func Test_run_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -306,12 +312,12 @@ func Test_run_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -382,9 +388,11 @@ func Test_run_PreStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -406,12 +414,12 @@ func Test_run_PreStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Stop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -482,9 +490,11 @@ func Test_run_Stop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -506,12 +516,12 @@ func Test_run_Stop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -582,9 +592,11 @@ func Test_run_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -606,7 +618,6 @@ func Test_run_PostStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/compressor/config/config_test.go b/pkg/manager/compressor/config/config_test.go index eba18af56a..e90809127a 100644 --- a/pkg/manager/compressor/config/config_test.go +++ b/pkg/manager/compressor/config/config_test.go @@ -26,6 +26,7 @@ import ( ) func TestNewConfig(t *testing.T) { + t.Parallel() type args struct { path string } @@ -78,9 +79,11 @@ func TestNewConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -95,7 +98,6 @@ func TestNewConfig(t *testing.T) { if err := test.checkFunc(test.want, gotCfg, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/compressor/handler/grpc/handler.go b/pkg/manager/compressor/handler/grpc/handler.go index afff701d3b..4557c4191b 100644 --- a/pkg/manager/compressor/handler/grpc/handler.go +++ b/pkg/manager/compressor/handler/grpc/handler.go @@ -21,8 +21,8 @@ import ( "context" "fmt" - "github.com/vdaas/vald/apis/grpc/manager/compressor" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/compressor" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc/status" @@ -47,7 +47,7 @@ func New(opts ...Option) Server { return s } -func (s *server) GetVector(ctx context.Context, req *payload.Backup_GetVector_Request) (res *payload.Backup_MetaVector, err error) { +func (s *server) GetVector(ctx context.Context, req *payload.Backup_GetVector_Request) (res *payload.Backup_Vector, err error) { ctx, span := trace.StartSpan(ctx, "vald/manager-compressor.GetVector") defer func() { if span != nil { @@ -73,9 +73,8 @@ func (s *server) GetVector(ctx context.Context, req *payload.Backup_GetVector_Re return nil, status.WrapWithInternal(fmt.Sprintf("GetVector API uuid %s's object failed to decompress %#v", uuid, r), err, info.Get()) } - return &payload.Backup_MetaVector{ + return &payload.Backup_Vector{ Uuid: r.GetUuid(), - Meta: r.GetMeta(), Vector: vector, Ips: r.GetIps(), }, nil @@ -103,7 +102,7 @@ func (s *server) Locations(ctx context.Context, req *payload.Backup_Locations_Re }, nil } -func (s *server) Register(ctx context.Context, meta *payload.Backup_MetaVector) (res *payload.Empty, err error) { +func (s *server) Register(ctx context.Context, vec *payload.Backup_Vector) (res *payload.Empty, err error) { ctx, span := trace.StartSpan(ctx, "vald/manager-compressor.Register") defer func() { if span != nil { @@ -111,20 +110,20 @@ func (s *server) Register(ctx context.Context, meta *payload.Backup_MetaVector) } }() - err = s.registerer.Register(ctx, meta) + err = s.registerer.Register(ctx, vec) if err != nil { log.Errorf("[Register]\tregisterer returns error\t%+v", err) if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) } return nil, status.WrapWithInternal( - fmt.Sprintf("Register API uuid %s could not processed", meta.GetUuid()), err, info.Get()) + fmt.Sprintf("Register API uuid %s could not processed", vec.GetUuid()), err, info.Get()) } return new(payload.Empty), nil } -func (s *server) RegisterMulti(ctx context.Context, metas *payload.Backup_MetaVectors) (res *payload.Empty, err error) { +func (s *server) RegisterMulti(ctx context.Context, vecs *payload.Backup_Vectors) (res *payload.Empty, err error) { ctx, span := trace.StartSpan(ctx, "vald/manager-compressor.RegisterMulti") defer func() { if span != nil { @@ -132,7 +131,7 @@ func (s *server) RegisterMulti(ctx context.Context, metas *payload.Backup_MetaVe } }() - err = s.registerer.RegisterMulti(ctx, metas) + err = s.registerer.RegisterMulti(ctx, vecs) if err != nil { log.Errorf("[RegisterMulti]\tregisterer returns error\t%+v", err) if span != nil { diff --git a/pkg/manager/compressor/handler/grpc/handler_test.go b/pkg/manager/compressor/handler/grpc/handler_test.go index e33785e2fe..4fadf0afc6 100644 --- a/pkg/manager/compressor/handler/grpc/handler_test.go +++ b/pkg/manager/compressor/handler/grpc/handler_test.go @@ -22,14 +22,14 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/pkg/manager/compressor/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -78,9 +78,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -95,12 +97,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_GetVector(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_GetVector_Request @@ -111,7 +113,7 @@ func Test_server_GetVector(t *testing.T) { registerer service.Registerer } type want struct { - wantRes *payload.Backup_MetaVector + wantRes *payload.Backup_Vector err error } type test struct { @@ -119,11 +121,11 @@ func Test_server_GetVector(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Backup_MetaVector, error) error + checkFunc func(want, *payload.Backup_Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotRes *payload.Backup_MetaVector, err error) error { + defaultCheckFunc := func(w want, gotRes *payload.Backup_Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -172,9 +174,11 @@ func Test_server_GetVector(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -194,12 +198,12 @@ func Test_server_GetVector(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Locations(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_Locations_Request @@ -271,9 +275,11 @@ func Test_server_Locations(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -293,15 +299,15 @@ func Test_server_Locations(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Register(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - meta *payload.Backup_MetaVector + ctx context.Context + vec *payload.Backup_Vector } type fields struct { backup service.Backup @@ -337,7 +343,7 @@ func Test_server_Register(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - meta: nil, + vec: nil, }, fields: fields { backup: nil, @@ -356,7 +362,7 @@ func Test_server_Register(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - meta: nil, + vec: nil, }, fields: fields { backup: nil, @@ -370,9 +376,11 @@ func Test_server_Register(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -388,19 +396,19 @@ func Test_server_Register(t *testing.T) { registerer: test.fields.registerer, } - gotRes, err := s.Register(test.args.ctx, test.args.meta) + gotRes, err := s.Register(test.args.ctx, test.args.vec) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_RegisterMulti(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - metas *payload.Backup_MetaVectors + ctx context.Context + vecs *payload.Backup_Vectors } type fields struct { backup service.Backup @@ -436,7 +444,7 @@ func Test_server_RegisterMulti(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - metas: nil, + vecs: nil, }, fields: fields { backup: nil, @@ -455,7 +463,7 @@ func Test_server_RegisterMulti(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - metas: nil, + vecs: nil, }, fields: fields { backup: nil, @@ -469,9 +477,11 @@ func Test_server_RegisterMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -487,16 +497,16 @@ func Test_server_RegisterMulti(t *testing.T) { registerer: test.fields.registerer, } - gotRes, err := s.RegisterMulti(test.args.ctx, test.args.metas) + gotRes, err := s.RegisterMulti(test.args.ctx, test.args.vecs) if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Remove(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_Remove_Request @@ -568,9 +578,11 @@ func Test_server_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -590,12 +602,12 @@ func Test_server_Remove(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_RemoveMulti(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_Remove_RequestMulti @@ -667,9 +679,11 @@ func Test_server_RemoveMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -689,12 +703,12 @@ func Test_server_RemoveMulti(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_RegisterIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_IP_Register_Request @@ -766,9 +780,11 @@ func Test_server_RegisterIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -788,12 +804,12 @@ func Test_server_RegisterIPs(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_RemoveIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Backup_IP_Remove_Request @@ -865,9 +881,11 @@ func Test_server_RemoveIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -887,7 +905,6 @@ func Test_server_RemoveIPs(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/compressor/handler/grpc/option.go b/pkg/manager/compressor/handler/grpc/option.go index 6f0f471d36..00a26fcc87 100644 --- a/pkg/manager/compressor/handler/grpc/option.go +++ b/pkg/manager/compressor/handler/grpc/option.go @@ -21,9 +21,7 @@ import "github.com/vdaas/vald/pkg/manager/compressor/service" type Option func(*server) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithCompressor(c service.Compressor) Option { return func(s *server) { diff --git a/pkg/manager/compressor/handler/grpc/option_test.go b/pkg/manager/compressor/handler/grpc/option_test.go index 94fafa6d68..bc7050ffb8 100644 --- a/pkg/manager/compressor/handler/grpc/option_test.go +++ b/pkg/manager/compressor/handler/grpc/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/pkg/manager/compressor/service" - "go.uber.org/goleak" ) func TestWithCompressor(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { c service.Compressor @@ -63,7 +64,7 @@ func TestWithCompressor(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithCompressor(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithCompressor(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithCompressor(t *testing.T) { got := WithCompressor(test.args.c) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -139,6 +142,8 @@ func TestWithCompressor(t *testing.T) { } func TestWithBackup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { b service.Backup @@ -176,7 +181,7 @@ func TestWithBackup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -212,9 +217,11 @@ func TestWithBackup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -235,7 +242,7 @@ func TestWithBackup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -243,7 +250,7 @@ func TestWithBackup(t *testing.T) { got := WithBackup(test.args.b) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -252,6 +259,8 @@ func TestWithBackup(t *testing.T) { } func TestWithRegisterer(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { r service.Registerer @@ -289,7 +298,7 @@ func TestWithRegisterer(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -325,9 +334,11 @@ func TestWithRegisterer(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -348,7 +359,7 @@ func TestWithRegisterer(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -356,7 +367,7 @@ func TestWithRegisterer(t *testing.T) { got := WithRegisterer(test.args.r) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/compressor/handler/rest/handler.go b/pkg/manager/compressor/handler/rest/handler.go index 09422f7ecf..c308509e87 100644 --- a/pkg/manager/compressor/handler/rest/handler.go +++ b/pkg/manager/compressor/handler/rest/handler.go @@ -20,8 +20,8 @@ package rest import ( "net/http" - "github.com/vdaas/vald/apis/grpc/manager/compressor" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/compressor" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/net/http/json" ) @@ -64,14 +64,14 @@ func (h *handler) Locations(w http.ResponseWriter, r *http.Request) (int, error) } func (h *handler) Register(w http.ResponseWriter, r *http.Request) (int, error) { - var req *payload.Backup_MetaVector + var req *payload.Backup_Vector return json.Handler(w, r, &req, func() (interface{}, error) { return h.backup.Register(r.Context(), req) }) } func (h *handler) RegisterMulti(w http.ResponseWriter, r *http.Request) (int, error) { - var req *payload.Backup_MetaVectors + var req *payload.Backup_Vectors return json.Handler(w, r, &req, func() (interface{}, error) { return h.backup.RegisterMulti(r.Context(), req) }) diff --git a/pkg/manager/compressor/handler/rest/handler_test.go b/pkg/manager/compressor/handler/rest/handler_test.go index 728d524800..721310e154 100644 --- a/pkg/manager/compressor/handler/rest/handler_test.go +++ b/pkg/manager/compressor/handler/rest/handler_test.go @@ -22,13 +22,13 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/manager/compressor" + "github.com/vdaas/vald/apis/grpc/v1/manager/compressor" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -77,9 +77,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -94,12 +96,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_GetVector(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -165,9 +167,11 @@ func Test_handler_GetVector(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -185,12 +189,12 @@ func Test_handler_GetVector(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Locations(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -256,9 +260,11 @@ func Test_handler_Locations(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -276,12 +282,12 @@ func Test_handler_Locations(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Register(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -347,9 +353,11 @@ func Test_handler_Register(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,12 +375,12 @@ func Test_handler_Register(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_RegisterMulti(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -438,9 +446,11 @@ func Test_handler_RegisterMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -458,12 +468,12 @@ func Test_handler_RegisterMulti(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Remove(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -529,9 +539,11 @@ func Test_handler_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -549,12 +561,12 @@ func Test_handler_Remove(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_RemoveMulti(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -620,9 +632,11 @@ func Test_handler_RemoveMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -640,12 +654,12 @@ func Test_handler_RemoveMulti(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_RegisterIPs(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -711,9 +725,11 @@ func Test_handler_RegisterIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -731,12 +747,12 @@ func Test_handler_RegisterIPs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_RemoveIPs(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -802,9 +818,11 @@ func Test_handler_RemoveIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -822,7 +840,6 @@ func Test_handler_RemoveIPs(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/compressor/handler/rest/option.go b/pkg/manager/compressor/handler/rest/option.go index 5b9907ec31..bdbf0794b3 100644 --- a/pkg/manager/compressor/handler/rest/option.go +++ b/pkg/manager/compressor/handler/rest/option.go @@ -17,13 +17,11 @@ // Package rest provides rest api logic package rest -import "github.com/vdaas/vald/apis/grpc/manager/compressor" +import "github.com/vdaas/vald/apis/grpc/v1/manager/compressor" type Option func(*handler) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithBackup(b compressor.BackupServer) Option { return func(h *handler) { diff --git a/pkg/manager/compressor/handler/rest/option_test.go b/pkg/manager/compressor/handler/rest/option_test.go index 5a68acefcc..d33a693c3e 100644 --- a/pkg/manager/compressor/handler/rest/option_test.go +++ b/pkg/manager/compressor/handler/rest/option_test.go @@ -20,12 +20,13 @@ package rest import ( "testing" - "github.com/vdaas/vald/apis/grpc/manager/compressor" - + "github.com/vdaas/vald/apis/grpc/v1/manager/compressor" "go.uber.org/goleak" ) func TestWithBackup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { b compressor.BackupServer @@ -63,7 +64,7 @@ func TestWithBackup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithBackup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithBackup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithBackup(t *testing.T) { got := WithBackup(test.args.b) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/compressor/router/option.go b/pkg/manager/compressor/router/option.go index 9a4af7b6d6..c4e3411d2c 100644 --- a/pkg/manager/compressor/router/option.go +++ b/pkg/manager/compressor/router/option.go @@ -24,11 +24,9 @@ import ( type Option func(*router) -var ( - defaultOpts = []Option{ - WithTimeout("3s"), - } -) +var defaultOpts = []Option{ + WithTimeout("3s"), +} func WithHandler(h rest.Handler) Option { return func(r *router) { diff --git a/pkg/manager/compressor/router/option_test.go b/pkg/manager/compressor/router/option_test.go index e77d54ce42..2458b205a9 100644 --- a/pkg/manager/compressor/router/option_test.go +++ b/pkg/manager/compressor/router/option_test.go @@ -22,11 +22,12 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/pkg/manager/compressor/handler/rest" - "go.uber.org/goleak" ) func TestWithHandler(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { h rest.Handler @@ -64,7 +65,7 @@ func TestWithHandler(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -100,9 +101,11 @@ func TestWithHandler(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -123,7 +126,7 @@ func TestWithHandler(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -131,7 +134,7 @@ func TestWithHandler(t *testing.T) { got := WithHandler(test.args.h) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -140,6 +143,8 @@ func TestWithHandler(t *testing.T) { } func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { timeout string @@ -177,7 +182,7 @@ func TestWithTimeout(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -213,9 +218,11 @@ func TestWithTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -236,7 +243,7 @@ func TestWithTimeout(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -244,7 +251,7 @@ func TestWithTimeout(t *testing.T) { got := WithTimeout(test.args.timeout) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -253,6 +260,8 @@ func TestWithTimeout(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -290,7 +299,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -326,9 +335,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -349,7 +360,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -357,7 +368,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/compressor/router/router.go b/pkg/manager/compressor/router/router.go index 0f1eefbd20..95d75a7113 100644 --- a/pkg/manager/compressor/router/router.go +++ b/pkg/manager/compressor/router/router.go @@ -32,7 +32,7 @@ type router struct { timeout string } -// New returns REST route&method information from handler interface +// New returns REST route&method information from handler interface. func New(opts ...Option) http.Handler { r := new(router) @@ -48,14 +48,15 @@ func New(opts ...Option) http.Handler { middleware.WithTimeout(r.timeout), middleware.WithErrorGroup(r.eg), )), - routing.WithRoutes([]routing.Route{{ - "GetVector", - []string{ - http.MethodGet, + routing.WithRoutes([]routing.Route{ + { + "GetVector", + []string{ + http.MethodGet, + }, + "/vector/{uuid}", + h.GetVector, }, - "/vector/{uuid}", - h.GetVector, - }, { "Locations", []string{ diff --git a/pkg/manager/compressor/router/router_test.go b/pkg/manager/compressor/router/router_test.go index 25eaec0bef..97248787ae 100644 --- a/pkg/manager/compressor/router/router_test.go +++ b/pkg/manager/compressor/router/router_test.go @@ -23,9 +23,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -74,8 +76,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -90,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/compressor/service/backup.go b/pkg/manager/compressor/service/backup.go index ea757e6309..8905fbaa0d 100644 --- a/pkg/manager/compressor/service/backup.go +++ b/pkg/manager/compressor/service/backup.go @@ -20,18 +20,18 @@ import ( "context" "reflect" - gback "github.com/vdaas/vald/apis/grpc/manager/backup" - "github.com/vdaas/vald/apis/grpc/payload" + gback "github.com/vdaas/vald/apis/grpc/v1/manager/backup" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" ) type Backup interface { Start(ctx context.Context) (<-chan error, error) - GetObject(ctx context.Context, uuid string) (*payload.Backup_Compressed_MetaVector, error) + GetObject(ctx context.Context, uuid string) (*payload.Backup_Compressed_Vector, error) GetLocation(ctx context.Context, uuid string) ([]string, error) - Register(ctx context.Context, vec *payload.Backup_Compressed_MetaVector) error - RegisterMultiple(ctx context.Context, vecs *payload.Backup_Compressed_MetaVectors) error + Register(ctx context.Context, vec *payload.Backup_Compressed_Vector) error + RegisterMultiple(ctx context.Context, vecs *payload.Backup_Compressed_Vectors) error Remove(ctx context.Context, uuid string) error RemoveMultiple(ctx context.Context, uuids ...string) error RegisterIPs(ctx context.Context, uuid string, ips []string) error @@ -58,7 +58,7 @@ func (b *backup) Start(ctx context.Context) (<-chan error, error) { return b.client.StartConnectionMonitor(ctx) } -func (b *backup) GetObject(ctx context.Context, uuid string) (vec *payload.Backup_Compressed_MetaVector, err error) { +func (b *backup) GetObject(ctx context.Context, uuid string) (vec *payload.Backup_Compressed_Vector, err error) { _, err = b.client.Do(ctx, b.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { vec, err = gback.NewBackupClient(conn).GetVector(ctx, &payload.Backup_GetVector_Request{ @@ -87,7 +87,7 @@ func (b *backup) GetLocation(ctx context.Context, uuid string) (ipList []string, return } -func (b *backup) Register(ctx context.Context, vec *payload.Backup_Compressed_MetaVector) (err error) { +func (b *backup) Register(ctx context.Context, vec *payload.Backup_Compressed_Vector) (err error) { _, err = b.client.Do(ctx, b.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { _, err = gback.NewBackupClient(conn).Register(ctx, vec, copts...) @@ -99,7 +99,7 @@ func (b *backup) Register(ctx context.Context, vec *payload.Backup_Compressed_Me return } -func (b *backup) RegisterMultiple(ctx context.Context, vecs *payload.Backup_Compressed_MetaVectors) (err error) { +func (b *backup) RegisterMultiple(ctx context.Context, vecs *payload.Backup_Compressed_Vectors) (err error) { _, err = b.client.Do(ctx, b.addr, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (i interface{}, err error) { _, err = gback.NewBackupClient(conn).RegisterMulti(ctx, vecs, copts...) diff --git a/pkg/manager/compressor/service/backup_option.go b/pkg/manager/compressor/service/backup_option.go index 3647333b07..cb930eb267 100644 --- a/pkg/manager/compressor/service/backup_option.go +++ b/pkg/manager/compressor/service/backup_option.go @@ -21,9 +21,7 @@ import "github.com/vdaas/vald/internal/net/grpc" type BackupOption func(b *backup) error -var ( - defaultBackupOpts = []BackupOption{} -) +var defaultBackupOpts = []BackupOption{} func WithBackupAddr(addr string) BackupOption { return func(b *backup) error { diff --git a/pkg/manager/compressor/service/backup_option_test.go b/pkg/manager/compressor/service/backup_option_test.go index f4ae710854..366cc18fff 100644 --- a/pkg/manager/compressor/service/backup_option_test.go +++ b/pkg/manager/compressor/service/backup_option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestWithBackupAddr(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { addr string @@ -63,7 +64,7 @@ func TestWithBackupAddr(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithBackupAddr(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithBackupAddr(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithBackupAddr(t *testing.T) { got := WithBackupAddr(test.args.addr) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -139,6 +142,8 @@ func TestWithBackupAddr(t *testing.T) { } func TestWithBackupClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { client grpc.Client @@ -176,7 +181,7 @@ func TestWithBackupClient(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -212,9 +217,11 @@ func TestWithBackupClient(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -235,7 +242,7 @@ func TestWithBackupClient(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -243,7 +250,7 @@ func TestWithBackupClient(t *testing.T) { got := WithBackupClient(test.args.client) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/compressor/service/backup_test.go b/pkg/manager/compressor/service/backup_test.go index 8363798d42..f5ddfd0868 100644 --- a/pkg/manager/compressor/service/backup_test.go +++ b/pkg/manager/compressor/service/backup_test.go @@ -21,14 +21,14 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestNewBackup(t *testing.T) { + t.Parallel() type args struct { opts []BackupOption } @@ -81,9 +81,11 @@ func TestNewBackup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -98,12 +100,12 @@ func TestNewBackup(t *testing.T) { if err := test.checkFunc(test.want, gotBu, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -169,9 +171,11 @@ func Test_backup_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -190,12 +194,12 @@ func Test_backup_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_GetObject(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -205,7 +209,7 @@ func Test_backup_GetObject(t *testing.T) { client grpc.Client } type want struct { - wantVec *payload.Backup_Compressed_MetaVector + wantVec *payload.Backup_Compressed_Vector err error } type test struct { @@ -213,11 +217,11 @@ func Test_backup_GetObject(t *testing.T) { args args fields fields want want - checkFunc func(want, *payload.Backup_Compressed_MetaVector, error) error + checkFunc func(want, *payload.Backup_Compressed_Vector, error) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, gotVec *payload.Backup_Compressed_MetaVector, err error) error { + defaultCheckFunc := func(w want, gotVec *payload.Backup_Compressed_Vector, err error) error { if !errors.Is(err, w.err) { return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } @@ -264,9 +268,11 @@ func Test_backup_GetObject(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -285,12 +291,12 @@ func Test_backup_GetObject(t *testing.T) { if err := test.checkFunc(test.want, gotVec, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_GetLocation(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -359,9 +365,11 @@ func Test_backup_GetLocation(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -380,15 +388,15 @@ func Test_backup_GetLocation(t *testing.T) { if err := test.checkFunc(test.want, gotIpList, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_Register(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - vec *payload.Backup_Compressed_MetaVector + vec *payload.Backup_Compressed_Vector } type fields struct { addr string @@ -450,9 +458,11 @@ func Test_backup_Register(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -471,15 +481,15 @@ func Test_backup_Register(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_RegisterMultiple(t *testing.T) { + t.Parallel() type args struct { ctx context.Context - vecs *payload.Backup_Compressed_MetaVectors + vecs *payload.Backup_Compressed_Vectors } type fields struct { addr string @@ -541,9 +551,11 @@ func Test_backup_RegisterMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -562,12 +574,12 @@ func Test_backup_RegisterMultiple(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_Remove(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -632,9 +644,11 @@ func Test_backup_Remove(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -653,12 +667,12 @@ func Test_backup_Remove(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_RemoveMultiple(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuids []string @@ -723,9 +737,11 @@ func Test_backup_RemoveMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -744,12 +760,12 @@ func Test_backup_RemoveMultiple(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_RegisterIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context uuid string @@ -817,9 +833,11 @@ func Test_backup_RegisterIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -838,12 +856,12 @@ func Test_backup_RegisterIPs(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_backup_RemoveIPs(t *testing.T) { + t.Parallel() type args struct { ctx context.Context ips []string @@ -908,9 +926,11 @@ func Test_backup_RemoveIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -929,7 +949,6 @@ func Test_backup_RemoveIPs(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/compressor/service/compress_option.go b/pkg/manager/compressor/service/compress_option.go index d1b706e60a..2844aabe89 100644 --- a/pkg/manager/compressor/service/compress_option.go +++ b/pkg/manager/compressor/service/compress_option.go @@ -24,13 +24,11 @@ import ( type CompressorOption func(c *compressor) error -var ( - defaultCompressorOpts = []CompressorOption{ - WithCompressorWorker(), - WithCompressAlgorithm("gob"), - WithCompressorErrGroup(errgroup.Get()), - } -) +var defaultCompressorOpts = []CompressorOption{ + WithCompressorWorker(), + WithCompressAlgorithm("gob"), + WithCompressorErrGroup(errgroup.Get()), +} func WithCompressAlgorithm(name string) CompressorOption { return func(c *compressor) error { diff --git a/pkg/manager/compressor/service/compress_option_test.go b/pkg/manager/compressor/service/compress_option_test.go index 697609a691..65daa6512a 100644 --- a/pkg/manager/compressor/service/compress_option_test.go +++ b/pkg/manager/compressor/service/compress_option_test.go @@ -22,11 +22,12 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/worker" - "go.uber.org/goleak" ) func TestWithCompressAlgorithm(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { name string @@ -64,7 +65,7 @@ func TestWithCompressAlgorithm(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -100,9 +101,11 @@ func TestWithCompressAlgorithm(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -123,7 +126,7 @@ func TestWithCompressAlgorithm(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -131,7 +134,7 @@ func TestWithCompressAlgorithm(t *testing.T) { got := WithCompressAlgorithm(test.args.name) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -140,6 +143,8 @@ func TestWithCompressAlgorithm(t *testing.T) { } func TestWithCompressionLevel(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { level int @@ -177,7 +182,7 @@ func TestWithCompressionLevel(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -213,9 +218,11 @@ func TestWithCompressionLevel(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -236,7 +243,7 @@ func TestWithCompressionLevel(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -244,7 +251,7 @@ func TestWithCompressionLevel(t *testing.T) { got := WithCompressionLevel(test.args.level) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -253,6 +260,8 @@ func TestWithCompressionLevel(t *testing.T) { } func TestWithCompressorWorker(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { opts []worker.WorkerOption @@ -290,7 +299,7 @@ func TestWithCompressorWorker(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -326,9 +335,11 @@ func TestWithCompressorWorker(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -349,7 +360,7 @@ func TestWithCompressorWorker(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -357,7 +368,7 @@ func TestWithCompressorWorker(t *testing.T) { got := WithCompressorWorker(test.args.opts...) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -366,6 +377,8 @@ func TestWithCompressorWorker(t *testing.T) { } func TestWithCompressorErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -403,7 +416,7 @@ func TestWithCompressorErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -439,9 +452,11 @@ func TestWithCompressorErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -462,7 +477,7 @@ func TestWithCompressorErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -470,7 +485,7 @@ func TestWithCompressorErrGroup(t *testing.T) { got := WithCompressorErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/compressor/service/compress_test.go b/pkg/manager/compressor/service/compress_test.go index aad9875de3..da0bba16e5 100644 --- a/pkg/manager/compressor/service/compress_test.go +++ b/pkg/manager/compressor/service/compress_test.go @@ -25,11 +25,11 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/worker" - "go.uber.org/goleak" ) func TestNewCompressor(t *testing.T) { + t.Parallel() type args struct { opts []CompressorOption } @@ -82,9 +82,11 @@ func TestNewCompressor(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -99,12 +101,12 @@ func TestNewCompressor(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_compressor_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -178,9 +180,11 @@ func Test_compressor_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -203,12 +207,12 @@ func Test_compressor_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_compressor_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -286,9 +290,11 @@ func Test_compressor_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -311,12 +317,12 @@ func Test_compressor_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_compressor_dispatchCompress(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vectors [][]float32 @@ -397,9 +403,11 @@ func Test_compressor_dispatchCompress(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -422,12 +430,12 @@ func Test_compressor_dispatchCompress(t *testing.T) { if err := test.checkFunc(test.want, gotResults, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_compressor_dispatchDecompress(t *testing.T) { + t.Parallel() type args struct { ctx context.Context bytess [][]byte @@ -508,9 +516,11 @@ func Test_compressor_dispatchDecompress(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -533,12 +543,12 @@ func Test_compressor_dispatchDecompress(t *testing.T) { if err := test.checkFunc(test.want, gotResults, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_compressor_Compress(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vector []float32 @@ -619,9 +629,11 @@ func Test_compressor_Compress(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -644,12 +656,12 @@ func Test_compressor_Compress(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_compressor_Decompress(t *testing.T) { + t.Parallel() type args struct { ctx context.Context bytes []byte @@ -730,9 +742,11 @@ func Test_compressor_Decompress(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -755,12 +769,12 @@ func Test_compressor_Decompress(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_compressor_MultiCompress(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vectors [][]float32 @@ -841,9 +855,11 @@ func Test_compressor_MultiCompress(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -866,12 +882,12 @@ func Test_compressor_MultiCompress(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_compressor_MultiDecompress(t *testing.T) { + t.Parallel() type args struct { ctx context.Context bytess [][]byte @@ -952,9 +968,11 @@ func Test_compressor_MultiDecompress(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -977,12 +995,12 @@ func Test_compressor_MultiDecompress(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_compressor_Len(t *testing.T) { + t.Parallel() type fields struct { algorithm string compressionLevel int @@ -1046,9 +1064,11 @@ func Test_compressor_Len(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1071,12 +1091,12 @@ func Test_compressor_Len(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_compressor_TotalRequested(t *testing.T) { + t.Parallel() type fields struct { algorithm string compressionLevel int @@ -1140,9 +1160,11 @@ func Test_compressor_TotalRequested(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1165,12 +1187,12 @@ func Test_compressor_TotalRequested(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_compressor_TotalCompleted(t *testing.T) { + t.Parallel() type fields struct { algorithm string compressionLevel int @@ -1234,9 +1256,11 @@ func Test_compressor_TotalCompleted(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1259,7 +1283,6 @@ func Test_compressor_TotalCompleted(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/compressor/service/registerer.go b/pkg/manager/compressor/service/registerer.go index 5bcdfc14fb..81d5ca5aea 100644 --- a/pkg/manager/compressor/service/registerer.go +++ b/pkg/manager/compressor/service/registerer.go @@ -21,8 +21,8 @@ import ( "reflect" "sync" - "github.com/vdaas/vald/apis/grpc/payload" - client "github.com/vdaas/vald/internal/client/compressor" + "github.com/vdaas/vald/apis/grpc/v1/payload" + client "github.com/vdaas/vald/internal/client/v1/client/compressor" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" @@ -35,8 +35,8 @@ type Registerer interface { PreStart(ctx context.Context) error Start(ctx context.Context) (<-chan error, error) PostStop(ctx context.Context) error - Register(ctx context.Context, meta *payload.Backup_MetaVector) error - RegisterMulti(ctx context.Context, metas *payload.Backup_MetaVectors) error + Register(ctx context.Context, vec *payload.Backup_Vector) error + RegisterMulti(ctx context.Context, vecs *payload.Backup_Vectors) error Len() uint64 TotalRequested() uint64 TotalCompleted() uint64 @@ -49,8 +49,8 @@ type registerer struct { backup Backup compressor Compressor client client.Client - metas map[string]*payload.Backup_MetaVector - metasMux sync.Mutex + vecs map[string]*payload.Backup_Vector + vecsMu sync.Mutex } func NewRegisterer(opts ...RegistererOption) (Registerer, error) { @@ -61,7 +61,7 @@ func NewRegisterer(opts ...RegistererOption) (Registerer, error) { } } - r.metas = make(map[string]*payload.Backup_MetaVector, 0) + r.vecs = make(map[string]*payload.Backup_Vector, 0) return r, nil } @@ -113,7 +113,7 @@ func (r *registerer) PostStop(ctx context.Context) error { return nil } -func (r *registerer) Register(ctx context.Context, meta *payload.Backup_MetaVector) error { +func (r *registerer) Register(ctx context.Context, vec *payload.Backup_Vector) error { ctx, span := trace.StartSpan(ctx, "vald/manager-compressor/service/Registerer.Register") defer func() { if span != nil { @@ -121,7 +121,7 @@ func (r *registerer) Register(ctx context.Context, meta *payload.Backup_MetaVect } }() - err := r.dispatch(ctx, meta) + err := r.dispatch(ctx, vec) if err != nil && span != nil { span.SetStatus(trace.StatusCodeUnavailable(err.Error())) } @@ -129,7 +129,7 @@ func (r *registerer) Register(ctx context.Context, meta *payload.Backup_MetaVect return err } -func (r *registerer) RegisterMulti(ctx context.Context, metas *payload.Backup_MetaVectors) error { +func (r *registerer) RegisterMulti(ctx context.Context, vecs *payload.Backup_Vectors) error { ctx, span := trace.StartSpan(ctx, "vald/manager-compressor/service/Registerer.RegisterMulti") defer func() { if span != nil { @@ -138,8 +138,8 @@ func (r *registerer) RegisterMulti(ctx context.Context, metas *payload.Backup_Me }() var err, errs error - for _, meta := range metas.GetVectors() { - err = r.Register(ctx, meta) + for _, vec := range vecs.GetVectors() { + err = r.Register(ctx, vec) if err != nil { errs = errors.Wrap(errs, err.Error()) } @@ -164,21 +164,21 @@ func (r *registerer) TotalCompleted() uint64 { return r.worker.TotalCompleted() } -func (r *registerer) dispatch(ctx context.Context, meta *payload.Backup_MetaVector) error { - r.metasMux.Lock() - r.metas[meta.GetUuid()] = meta - r.metasMux.Unlock() +func (r *registerer) dispatch(ctx context.Context, vec *payload.Backup_Vector) error { + r.vecsMu.Lock() + r.vecs[vec.GetUuid()] = vec + r.vecsMu.Unlock() - return r.worker.Dispatch(ctx, r.registerProcessFunc(meta)) + return r.worker.Dispatch(ctx, r.registerProcessFunc(vec)) } -func (r *registerer) registerProcessFunc(meta *payload.Backup_MetaVector) worker.JobFunc { +func (r *registerer) registerProcessFunc(vec *payload.Backup_Vector) worker.JobFunc { return func(ctx context.Context) (err error) { ctx, span := trace.StartSpan(ctx, "vald/manager-compressor/service/Registerer.Register.DispatchedJob") defer func() { - r.metasMux.Lock() - delete(r.metas, meta.GetUuid()) - r.metasMux.Unlock() + r.vecsMu.Lock() + delete(r.vecs, vec.GetUuid()) + r.vecsMu.Unlock() if span != nil { span.End() @@ -187,7 +187,7 @@ func (r *registerer) registerProcessFunc(meta *payload.Backup_MetaVector) worker var vector []byte - vector, err = r.compressor.Compress(ctx, meta.GetVector()) + vector, err = r.compressor.Compress(ctx, vec.GetVector()) if err != nil { if span != nil { span.SetStatus(trace.StatusCodeInternal(err.Error())) @@ -198,11 +198,10 @@ func (r *registerer) registerProcessFunc(meta *payload.Backup_MetaVector) worker err = r.backup.Register( ctx, - &payload.Backup_Compressed_MetaVector{ - Uuid: meta.GetUuid(), - Meta: meta.GetMeta(), + &payload.Backup_Compressed_Vector{ + Uuid: vec.GetUuid(), Vector: vector, - Ips: meta.GetIps(), + Ips: vec.GetIps(), }, ) if err != nil && span != nil { @@ -216,21 +215,21 @@ func (r *registerer) registerProcessFunc(meta *payload.Backup_MetaVector) worker func (r *registerer) forwardMetas(ctx context.Context) (errs error) { var err error - r.metasMux.Lock() + r.vecsMu.Lock() - log.Debugf("compressor registerer queued meta-vector count: %d", len(r.metas)) + log.Debugf("compressor registerer queued vec-vector count: %d", len(r.vecs)) - for uuid, meta := range r.metas { + for uuid, vec := range r.vecs { log.Debugf("forwarding uuid %s", uuid) - err = r.client.Register(ctx, meta) + err = r.client.Register(ctx, vec) if err != nil { log.Errorf("compressor registerer failed to backup uuid %s: %v", uuid, err) errs = errors.Wrap(errs, err.Error()) } } - r.metasMux.Unlock() + r.vecsMu.Unlock() return errs } diff --git a/pkg/manager/compressor/service/registerer_option.go b/pkg/manager/compressor/service/registerer_option.go index df3c546604..4ab48621e7 100644 --- a/pkg/manager/compressor/service/registerer_option.go +++ b/pkg/manager/compressor/service/registerer_option.go @@ -18,19 +18,17 @@ package service import ( - client "github.com/vdaas/vald/internal/client/compressor" + client "github.com/vdaas/vald/internal/client/v1/client/compressor" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/worker" ) type RegistererOption func(r *registerer) error -var ( - defaultRegistererOpts = []RegistererOption{ - WithRegistererWorker(), - WithRegistererErrGroup(errgroup.Get()), - } -) +var defaultRegistererOpts = []RegistererOption{ + WithRegistererWorker(), + WithRegistererErrGroup(errgroup.Get()), +} func WithRegistererWorker(opts ...worker.WorkerOption) RegistererOption { return func(r *registerer) error { diff --git a/pkg/manager/compressor/service/registerer_option_test.go b/pkg/manager/compressor/service/registerer_option_test.go index c1a8141592..aa6e5e99bf 100644 --- a/pkg/manager/compressor/service/registerer_option_test.go +++ b/pkg/manager/compressor/service/registerer_option_test.go @@ -20,14 +20,15 @@ package service import ( "testing" - client "github.com/vdaas/vald/internal/client/compressor" + client "github.com/vdaas/vald/internal/client/v1/client/compressor" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/worker" - "go.uber.org/goleak" ) func TestWithRegistererWorker(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { opts []worker.WorkerOption @@ -65,7 +66,7 @@ func TestWithRegistererWorker(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -101,9 +102,11 @@ func TestWithRegistererWorker(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -124,7 +127,7 @@ func TestWithRegistererWorker(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -132,7 +135,7 @@ func TestWithRegistererWorker(t *testing.T) { got := WithRegistererWorker(test.args.opts...) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -141,6 +144,8 @@ func TestWithRegistererWorker(t *testing.T) { } func TestWithRegistererErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -178,7 +183,7 @@ func TestWithRegistererErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -214,9 +219,11 @@ func TestWithRegistererErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -237,7 +244,7 @@ func TestWithRegistererErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -245,7 +252,7 @@ func TestWithRegistererErrGroup(t *testing.T) { got := WithRegistererErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -254,6 +261,8 @@ func TestWithRegistererErrGroup(t *testing.T) { } func TestWithRegistererBackup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { b Backup @@ -291,7 +300,7 @@ func TestWithRegistererBackup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -327,9 +336,11 @@ func TestWithRegistererBackup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -350,7 +361,7 @@ func TestWithRegistererBackup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -358,7 +369,7 @@ func TestWithRegistererBackup(t *testing.T) { got := WithRegistererBackup(test.args.b) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -367,6 +378,8 @@ func TestWithRegistererBackup(t *testing.T) { } func TestWithRegistererCompressor(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { c Compressor @@ -404,7 +417,7 @@ func TestWithRegistererCompressor(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -440,9 +453,11 @@ func TestWithRegistererCompressor(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -463,7 +478,7 @@ func TestWithRegistererCompressor(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -471,7 +486,7 @@ func TestWithRegistererCompressor(t *testing.T) { got := WithRegistererCompressor(test.args.c) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -480,6 +495,8 @@ func TestWithRegistererCompressor(t *testing.T) { } func TestWithRegistererClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { c client.Client @@ -517,7 +534,7 @@ func TestWithRegistererClient(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -553,9 +570,11 @@ func TestWithRegistererClient(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -576,7 +595,7 @@ func TestWithRegistererClient(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -584,7 +603,7 @@ func TestWithRegistererClient(t *testing.T) { got := WithRegistererClient(test.args.c) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/compressor/service/registerer_test.go b/pkg/manager/compressor/service/registerer_test.go index 4b94923516..82ae18883b 100644 --- a/pkg/manager/compressor/service/registerer_test.go +++ b/pkg/manager/compressor/service/registerer_test.go @@ -22,16 +22,16 @@ import ( "sync" "testing" - "github.com/vdaas/vald/apis/grpc/payload" - client "github.com/vdaas/vald/internal/client/compressor" + "github.com/vdaas/vald/apis/grpc/v1/payload" + client "github.com/vdaas/vald/internal/client/v1/client/compressor" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/worker" - "go.uber.org/goleak" ) func TestNewRegisterer(t *testing.T) { + t.Parallel() type args struct { opts []RegistererOption } @@ -84,9 +84,11 @@ func TestNewRegisterer(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -101,12 +103,12 @@ func TestNewRegisterer(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_registerer_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -117,8 +119,8 @@ func Test_registerer_PreStart(t *testing.T) { backup Backup compressor Compressor client client.Client - metas map[string]*payload.Backup_MetaVector - metasMux sync.Mutex + vecs map[string]*payload.Backup_Vector + vecsMu sync.Mutex } type want struct { err error @@ -153,8 +155,8 @@ func Test_registerer_PreStart(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -176,8 +178,8 @@ func Test_registerer_PreStart(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -186,9 +188,11 @@ func Test_registerer_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -205,20 +209,20 @@ func Test_registerer_PreStart(t *testing.T) { backup: test.fields.backup, compressor: test.fields.compressor, client: test.fields.client, - metas: test.fields.metas, - metasMux: test.fields.metasMux, + vecs: test.fields.vecs, + vecsMu: test.fields.vecsMu, } err := r.PreStart(test.args.ctx) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_registerer_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -229,8 +233,8 @@ func Test_registerer_Start(t *testing.T) { backup Backup compressor Compressor client client.Client - metas map[string]*payload.Backup_MetaVector - metasMux sync.Mutex + vecs map[string]*payload.Backup_Vector + vecsMu sync.Mutex } type want struct { want <-chan error @@ -269,8 +273,8 @@ func Test_registerer_Start(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -292,8 +296,8 @@ func Test_registerer_Start(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -302,9 +306,11 @@ func Test_registerer_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -321,20 +327,20 @@ func Test_registerer_Start(t *testing.T) { backup: test.fields.backup, compressor: test.fields.compressor, client: test.fields.client, - metas: test.fields.metas, - metasMux: test.fields.metasMux, + vecs: test.fields.vecs, + vecsMu: test.fields.vecsMu, } got, err := r.Start(test.args.ctx) if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_registerer_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -345,8 +351,8 @@ func Test_registerer_PostStop(t *testing.T) { backup Backup compressor Compressor client client.Client - metas map[string]*payload.Backup_MetaVector - metasMux sync.Mutex + vecs map[string]*payload.Backup_Vector + vecsMu sync.Mutex } type want struct { err error @@ -381,8 +387,8 @@ func Test_registerer_PostStop(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -404,8 +410,8 @@ func Test_registerer_PostStop(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -414,9 +420,11 @@ func Test_registerer_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -433,23 +441,23 @@ func Test_registerer_PostStop(t *testing.T) { backup: test.fields.backup, compressor: test.fields.compressor, client: test.fields.client, - metas: test.fields.metas, - metasMux: test.fields.metasMux, + vecs: test.fields.vecs, + vecsMu: test.fields.vecsMu, } err := r.PostStop(test.args.ctx) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_registerer_Register(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - meta *payload.Backup_MetaVector + ctx context.Context + vec *payload.Backup_Vector } type fields struct { worker worker.Worker @@ -458,8 +466,8 @@ func Test_registerer_Register(t *testing.T) { backup Backup compressor Compressor client client.Client - metas map[string]*payload.Backup_MetaVector - metasMux sync.Mutex + vecs map[string]*payload.Backup_Vector + vecsMu sync.Mutex } type want struct { err error @@ -486,7 +494,7 @@ func Test_registerer_Register(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - meta: nil, + vec: nil, }, fields: fields { worker: nil, @@ -495,8 +503,8 @@ func Test_registerer_Register(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -510,7 +518,7 @@ func Test_registerer_Register(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - meta: nil, + vec: nil, }, fields: fields { worker: nil, @@ -519,8 +527,8 @@ func Test_registerer_Register(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -529,9 +537,11 @@ func Test_registerer_Register(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -548,23 +558,23 @@ func Test_registerer_Register(t *testing.T) { backup: test.fields.backup, compressor: test.fields.compressor, client: test.fields.client, - metas: test.fields.metas, - metasMux: test.fields.metasMux, + vecs: test.fields.vecs, + vecsMu: test.fields.vecsMu, } - err := r.Register(test.args.ctx, test.args.meta) + err := r.Register(test.args.ctx, test.args.vec) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_registerer_RegisterMulti(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - metas *payload.Backup_MetaVectors + ctx context.Context + vecs *payload.Backup_Vectors } type fields struct { worker worker.Worker @@ -573,8 +583,8 @@ func Test_registerer_RegisterMulti(t *testing.T) { backup Backup compressor Compressor client client.Client - metas map[string]*payload.Backup_MetaVector - metasMux sync.Mutex + vecs map[string]*payload.Backup_Vector + vecsMu sync.Mutex } type want struct { err error @@ -601,7 +611,7 @@ func Test_registerer_RegisterMulti(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - metas: nil, + vecs: nil, }, fields: fields { worker: nil, @@ -610,8 +620,8 @@ func Test_registerer_RegisterMulti(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -625,7 +635,7 @@ func Test_registerer_RegisterMulti(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - metas: nil, + vecs: nil, }, fields: fields { worker: nil, @@ -634,8 +644,8 @@ func Test_registerer_RegisterMulti(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -644,9 +654,11 @@ func Test_registerer_RegisterMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -663,20 +675,20 @@ func Test_registerer_RegisterMulti(t *testing.T) { backup: test.fields.backup, compressor: test.fields.compressor, client: test.fields.client, - metas: test.fields.metas, - metasMux: test.fields.metasMux, + vecs: test.fields.vecs, + vecsMu: test.fields.vecsMu, } - err := r.RegisterMulti(test.args.ctx, test.args.metas) + err := r.RegisterMulti(test.args.ctx, test.args.vecs) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_registerer_Len(t *testing.T) { + t.Parallel() type fields struct { worker worker.Worker workerOpts []worker.WorkerOption @@ -684,8 +696,8 @@ func Test_registerer_Len(t *testing.T) { backup Backup compressor Compressor client client.Client - metas map[string]*payload.Backup_MetaVector - metasMux sync.Mutex + vecs map[string]*payload.Backup_Vector + vecsMu sync.Mutex } type want struct { want uint64 @@ -716,8 +728,8 @@ func Test_registerer_Len(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -736,8 +748,8 @@ func Test_registerer_Len(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -746,9 +758,11 @@ func Test_registerer_Len(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -765,20 +779,20 @@ func Test_registerer_Len(t *testing.T) { backup: test.fields.backup, compressor: test.fields.compressor, client: test.fields.client, - metas: test.fields.metas, - metasMux: test.fields.metasMux, + vecs: test.fields.vecs, + vecsMu: test.fields.vecsMu, } got := r.Len() if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_registerer_TotalRequested(t *testing.T) { + t.Parallel() type fields struct { worker worker.Worker workerOpts []worker.WorkerOption @@ -786,8 +800,8 @@ func Test_registerer_TotalRequested(t *testing.T) { backup Backup compressor Compressor client client.Client - metas map[string]*payload.Backup_MetaVector - metasMux sync.Mutex + vecs map[string]*payload.Backup_Vector + vecsMu sync.Mutex } type want struct { want uint64 @@ -818,8 +832,8 @@ func Test_registerer_TotalRequested(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -838,8 +852,8 @@ func Test_registerer_TotalRequested(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -848,9 +862,11 @@ func Test_registerer_TotalRequested(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -867,20 +883,20 @@ func Test_registerer_TotalRequested(t *testing.T) { backup: test.fields.backup, compressor: test.fields.compressor, client: test.fields.client, - metas: test.fields.metas, - metasMux: test.fields.metasMux, + vecs: test.fields.vecs, + vecsMu: test.fields.vecsMu, } got := r.TotalRequested() if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_registerer_TotalCompleted(t *testing.T) { + t.Parallel() type fields struct { worker worker.Worker workerOpts []worker.WorkerOption @@ -888,8 +904,8 @@ func Test_registerer_TotalCompleted(t *testing.T) { backup Backup compressor Compressor client client.Client - metas map[string]*payload.Backup_MetaVector - metasMux sync.Mutex + vecs map[string]*payload.Backup_Vector + vecsMu sync.Mutex } type want struct { want uint64 @@ -920,8 +936,8 @@ func Test_registerer_TotalCompleted(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -940,8 +956,8 @@ func Test_registerer_TotalCompleted(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -950,9 +966,11 @@ func Test_registerer_TotalCompleted(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -969,23 +987,23 @@ func Test_registerer_TotalCompleted(t *testing.T) { backup: test.fields.backup, compressor: test.fields.compressor, client: test.fields.client, - metas: test.fields.metas, - metasMux: test.fields.metasMux, + vecs: test.fields.vecs, + vecsMu: test.fields.vecsMu, } got := r.TotalCompleted() if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_registerer_dispatch(t *testing.T) { + t.Parallel() type args struct { - ctx context.Context - meta *payload.Backup_MetaVector + ctx context.Context + vec *payload.Backup_Vector } type fields struct { worker worker.Worker @@ -994,8 +1012,8 @@ func Test_registerer_dispatch(t *testing.T) { backup Backup compressor Compressor client client.Client - metas map[string]*payload.Backup_MetaVector - metasMux sync.Mutex + vecs map[string]*payload.Backup_Vector + vecsMu sync.Mutex } type want struct { err error @@ -1022,7 +1040,7 @@ func Test_registerer_dispatch(t *testing.T) { name: "test_case_1", args: args { ctx: nil, - meta: nil, + vec: nil, }, fields: fields { worker: nil, @@ -1031,8 +1049,8 @@ func Test_registerer_dispatch(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1046,7 +1064,7 @@ func Test_registerer_dispatch(t *testing.T) { name: "test_case_2", args: args { ctx: nil, - meta: nil, + vec: nil, }, fields: fields { worker: nil, @@ -1055,8 +1073,8 @@ func Test_registerer_dispatch(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1065,9 +1083,11 @@ func Test_registerer_dispatch(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1084,22 +1104,22 @@ func Test_registerer_dispatch(t *testing.T) { backup: test.fields.backup, compressor: test.fields.compressor, client: test.fields.client, - metas: test.fields.metas, - metasMux: test.fields.metasMux, + vecs: test.fields.vecs, + vecsMu: test.fields.vecsMu, } - err := r.dispatch(test.args.ctx, test.args.meta) + err := r.dispatch(test.args.ctx, test.args.vec) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_registerer_registerProcessFunc(t *testing.T) { + t.Parallel() type args struct { - meta *payload.Backup_MetaVector + vec *payload.Backup_Vector } type fields struct { worker worker.Worker @@ -1108,8 +1128,8 @@ func Test_registerer_registerProcessFunc(t *testing.T) { backup Backup compressor Compressor client client.Client - metas map[string]*payload.Backup_MetaVector - metasMux sync.Mutex + vecs map[string]*payload.Backup_Vector + vecsMu sync.Mutex } type want struct { want worker.JobFunc @@ -1135,7 +1155,7 @@ func Test_registerer_registerProcessFunc(t *testing.T) { { name: "test_case_1", args: args { - meta: nil, + vec: nil, }, fields: fields { worker: nil, @@ -1144,8 +1164,8 @@ func Test_registerer_registerProcessFunc(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1158,7 +1178,7 @@ func Test_registerer_registerProcessFunc(t *testing.T) { return test { name: "test_case_2", args: args { - meta: nil, + vec: nil, }, fields: fields { worker: nil, @@ -1167,8 +1187,8 @@ func Test_registerer_registerProcessFunc(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1177,9 +1197,11 @@ func Test_registerer_registerProcessFunc(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1196,20 +1218,20 @@ func Test_registerer_registerProcessFunc(t *testing.T) { backup: test.fields.backup, compressor: test.fields.compressor, client: test.fields.client, - metas: test.fields.metas, - metasMux: test.fields.metasMux, + vecs: test.fields.vecs, + vecsMu: test.fields.vecsMu, } - got := r.registerProcessFunc(test.args.meta) + got := r.registerProcessFunc(test.args.vec) if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_registerer_forwardMetas(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -1220,8 +1242,8 @@ func Test_registerer_forwardMetas(t *testing.T) { backup Backup compressor Compressor client client.Client - metas map[string]*payload.Backup_MetaVector - metasMux sync.Mutex + vecs map[string]*payload.Backup_Vector + vecsMu sync.Mutex } type want struct { err error @@ -1256,8 +1278,8 @@ func Test_registerer_forwardMetas(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1279,8 +1301,8 @@ func Test_registerer_forwardMetas(t *testing.T) { backup: nil, compressor: nil, client: nil, - metas: nil, - metasMux: sync.Mutex{}, + vecs: nil, + vecsMu: sync.Mutex{}, }, want: want{}, checkFunc: defaultCheckFunc, @@ -1289,9 +1311,11 @@ func Test_registerer_forwardMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1308,15 +1332,14 @@ func Test_registerer_forwardMetas(t *testing.T) { backup: test.fields.backup, compressor: test.fields.compressor, client: test.fields.client, - metas: test.fields.metas, - metasMux: test.fields.metasMux, + vecs: test.fields.vecs, + vecsMu: test.fields.vecsMu, } err := r.forwardMetas(test.args.ctx) if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/compressor/usecase/compressord.go b/pkg/manager/compressor/usecase/compressord.go index 2879f20b9b..9e9a91e334 100644 --- a/pkg/manager/compressor/usecase/compressord.go +++ b/pkg/manager/compressor/usecase/compressord.go @@ -19,8 +19,8 @@ package usecase import ( "context" - "github.com/vdaas/vald/apis/grpc/manager/compressor" - cclient "github.com/vdaas/vald/internal/client/compressor" + "github.com/vdaas/vald/apis/grpc/v1/manager/compressor" + cclient "github.com/vdaas/vald/internal/client/v1/client/compressor" iconf "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" @@ -53,9 +53,7 @@ type run struct { func New(cfg *config.Data) (r runner.Runner, err error) { eg := errgroup.Get() - var ( - b service.Backup - ) + var b service.Backup if addrs := cfg.BackupManager.Client.Addrs; len(addrs) == 0 { return nil, errors.ErrInvalidBackupConfig @@ -206,7 +204,6 @@ func New(cfg *config.Data) (r runner.Runner, err error) { }), // TODO add GraphQL handler ) - if err != nil { return nil, err } diff --git a/pkg/manager/compressor/usecase/compressord_test.go b/pkg/manager/compressor/usecase/compressord_test.go index 289af33051..7aad4a4356 100644 --- a/pkg/manager/compressor/usecase/compressord_test.go +++ b/pkg/manager/compressor/usecase/compressord_test.go @@ -28,11 +28,11 @@ import ( "github.com/vdaas/vald/internal/servers/starter" "github.com/vdaas/vald/pkg/manager/compressor/config" "github.com/vdaas/vald/pkg/manager/compressor/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -85,9 +85,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -102,12 +104,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -184,9 +186,11 @@ func Test_run_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -210,12 +214,12 @@ func Test_run_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -296,9 +300,11 @@ func Test_run_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -322,12 +328,12 @@ func Test_run_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -404,9 +410,11 @@ func Test_run_PreStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -430,12 +438,12 @@ func Test_run_PreStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Stop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -512,9 +520,11 @@ func Test_run_Stop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -538,12 +548,12 @@ func Test_run_Stop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -620,9 +630,11 @@ func Test_run_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -646,7 +658,6 @@ func Test_run_PostStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/index/config/config_test.go b/pkg/manager/index/config/config_test.go index eba18af56a..e90809127a 100644 --- a/pkg/manager/index/config/config_test.go +++ b/pkg/manager/index/config/config_test.go @@ -26,6 +26,7 @@ import ( ) func TestNewConfig(t *testing.T) { + t.Parallel() type args struct { path string } @@ -78,9 +79,11 @@ func TestNewConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -95,7 +98,6 @@ func TestNewConfig(t *testing.T) { if err := test.checkFunc(test.want, gotCfg, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/index/handler/grpc/checklist.go b/pkg/manager/index/handler/grpc/checklist.go deleted file mode 100644 index 1d3cdc4b47..0000000000 --- a/pkg/manager/index/handler/grpc/checklist.go +++ /dev/null @@ -1,141 +0,0 @@ -// -// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package grpc - -import ( - "sync" - "sync/atomic" - "unsafe" -) - -type checkList struct { - mu sync.Mutex - read atomic.Value - dirty map[string]*entryCheckList - misses int -} - -type readOnlyCheckList struct { - m map[string]*entryCheckList - amended bool -} - -var expungedCheckList = unsafe.Pointer(new(struct{})) - -type entryCheckList struct { - p unsafe.Pointer -} - -func (m *checkList) Exists(key string) bool { - read, _ := m.read.Load().(readOnlyCheckList) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - read, _ = m.read.Load().(readOnlyCheckList) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - m.missLocked() - } - m.mu.Unlock() - } - if !ok { - return false - } - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedCheckList { - return false - } - return true -} - -func (m *checkList) Check(key string) { - value := struct{}{} - read, _ := m.read.Load().(readOnlyCheckList) - if e, ok := read.m[key]; ok && e.tryStore(&value) { - return - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyCheckList) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - m.dirty[key] = e - } - atomic.StorePointer(&e.p, unsafe.Pointer(&value)) - } else if e, ok := m.dirty[key]; ok { - atomic.StorePointer(&e.p, unsafe.Pointer(&value)) - } else { - if !read.amended { - m.dirtyLocked() - m.read.Store(readOnlyCheckList{m: read.m, amended: true}) - } - m.dirty[key] = &entryCheckList{p: unsafe.Pointer(&value)} - } - m.mu.Unlock() -} - -func (e *entryCheckList) tryStore(i *struct{}) bool { - for { - p := atomic.LoadPointer(&e.p) - if p == expungedCheckList { - return false - } - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { - return true - } - } -} - -func (e *entryCheckList) unexpungeLocked() (wasExpunged bool) { - return atomic.CompareAndSwapPointer(&e.p, expungedCheckList, nil) -} - -func (m *checkList) missLocked() { - m.misses++ - if m.misses < len(m.dirty) { - return - } - m.read.Store(readOnlyCheckList{m: m.dirty}) - m.dirty = nil - m.misses = 0 -} - -func (m *checkList) dirtyLocked() { - if m.dirty != nil { - return - } - - read, _ := m.read.Load().(readOnlyCheckList) - m.dirty = make(map[string]*entryCheckList, len(read.m)) - for k, e := range read.m { - if !e.tryExpungeLocked() { - m.dirty[k] = e - } - } -} - -func (e *entryCheckList) tryExpungeLocked() (isExpunged bool) { - p := atomic.LoadPointer(&e.p) - for p == nil { - if atomic.CompareAndSwapPointer(&e.p, nil, expungedCheckList) { - return true - } - p = atomic.LoadPointer(&e.p) - } - return p == expungedCheckList -} diff --git a/pkg/manager/index/handler/grpc/handler.go b/pkg/manager/index/handler/grpc/handler.go index 9c60ad4283..89ab5a077b 100644 --- a/pkg/manager/index/handler/grpc/handler.go +++ b/pkg/manager/index/handler/grpc/handler.go @@ -20,8 +20,8 @@ package grpc import ( "context" - "github.com/vdaas/vald/apis/grpc/manager/index" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/index" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/observability/trace" "github.com/vdaas/vald/pkg/manager/index/service" ) diff --git a/pkg/manager/index/handler/grpc/handler_test.go b/pkg/manager/index/handler/grpc/handler_test.go index 661d81e043..0f09f30a5e 100644 --- a/pkg/manager/index/handler/grpc/handler_test.go +++ b/pkg/manager/index/handler/grpc/handler_test.go @@ -22,15 +22,15 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/manager/index" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/index" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/pkg/manager/index/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -79,9 +79,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -96,12 +98,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_IndexInfo(t *testing.T) { + t.Parallel() type args struct { ctx context.Context in1 *payload.Empty @@ -167,9 +169,11 @@ func Test_server_IndexInfo(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -187,7 +191,6 @@ func Test_server_IndexInfo(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/index/handler/grpc/option.go b/pkg/manager/index/handler/grpc/option.go index 24179149c9..312c1040a8 100644 --- a/pkg/manager/index/handler/grpc/option.go +++ b/pkg/manager/index/handler/grpc/option.go @@ -23,9 +23,7 @@ import ( type Option func(*server) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithIndexer(i service.Indexer) Option { return func(s *server) { diff --git a/pkg/manager/index/handler/grpc/option_test.go b/pkg/manager/index/handler/grpc/option_test.go index 02f2222988..e4d998102c 100644 --- a/pkg/manager/index/handler/grpc/option_test.go +++ b/pkg/manager/index/handler/grpc/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/pkg/manager/index/service" - "go.uber.org/goleak" ) func TestWithIndexer(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { i service.Indexer @@ -63,7 +64,7 @@ func TestWithIndexer(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithIndexer(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithIndexer(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithIndexer(t *testing.T) { got := WithIndexer(test.args.i) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/index/handler/rest/handler.go b/pkg/manager/index/handler/rest/handler.go index ac4c5d2b64..f764ccfa59 100644 --- a/pkg/manager/index/handler/rest/handler.go +++ b/pkg/manager/index/handler/rest/handler.go @@ -20,8 +20,8 @@ package rest import ( "net/http" - "github.com/vdaas/vald/apis/grpc/manager/index" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/index" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/net/http/dump" "github.com/vdaas/vald/internal/net/http/json" ) diff --git a/pkg/manager/index/handler/rest/handler_test.go b/pkg/manager/index/handler/rest/handler_test.go index 57916bdcb2..08f86bcf21 100644 --- a/pkg/manager/index/handler/rest/handler_test.go +++ b/pkg/manager/index/handler/rest/handler_test.go @@ -22,13 +22,13 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/manager/index" + "github.com/vdaas/vald/apis/grpc/v1/manager/index" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -77,9 +77,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -94,12 +96,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Index(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -165,9 +167,11 @@ func Test_handler_Index(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -185,12 +189,12 @@ func Test_handler_Index(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_IndexInfo(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -256,9 +260,11 @@ func Test_handler_IndexInfo(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -276,7 +282,6 @@ func Test_handler_IndexInfo(t *testing.T) { if err := test.checkFunc(test.want, gotCode, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/index/handler/rest/option.go b/pkg/manager/index/handler/rest/option.go index 1ad42b4913..b3725c043d 100644 --- a/pkg/manager/index/handler/rest/option.go +++ b/pkg/manager/index/handler/rest/option.go @@ -17,13 +17,11 @@ // Package rest provides rest api logic package rest -import "github.com/vdaas/vald/apis/grpc/manager/index" +import "github.com/vdaas/vald/apis/grpc/v1/manager/index" type Option func(*handler) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithIndexer(i index.IndexServer) Option { return func(h *handler) { diff --git a/pkg/manager/index/handler/rest/option_test.go b/pkg/manager/index/handler/rest/option_test.go index 03adc6bdc2..54c2fa38b7 100644 --- a/pkg/manager/index/handler/rest/option_test.go +++ b/pkg/manager/index/handler/rest/option_test.go @@ -20,12 +20,13 @@ package rest import ( "testing" - "github.com/vdaas/vald/apis/grpc/manager/index" - + "github.com/vdaas/vald/apis/grpc/v1/manager/index" "go.uber.org/goleak" ) func TestWithIndexer(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { i index.IndexServer @@ -63,7 +64,7 @@ func TestWithIndexer(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithIndexer(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithIndexer(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithIndexer(t *testing.T) { got := WithIndexer(test.args.i) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/index/router/option.go b/pkg/manager/index/router/option.go index d202453309..b21de9d366 100644 --- a/pkg/manager/index/router/option.go +++ b/pkg/manager/index/router/option.go @@ -23,11 +23,9 @@ import ( type Option func(*router) -var ( - defaultOpts = []Option{ - WithTimeout("3s"), - } -) +var defaultOpts = []Option{ + WithTimeout("3s"), +} func WithHandler(h rest.Handler) Option { return func(r *router) { diff --git a/pkg/manager/index/router/option_test.go b/pkg/manager/index/router/option_test.go index 6fbccbab73..c0addb46b2 100644 --- a/pkg/manager/index/router/option_test.go +++ b/pkg/manager/index/router/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/pkg/manager/index/handler/rest" - "go.uber.org/goleak" ) func TestWithHandler(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { h rest.Handler @@ -63,7 +64,7 @@ func TestWithHandler(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithHandler(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithHandler(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithHandler(t *testing.T) { got := WithHandler(test.args.h) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -139,6 +142,8 @@ func TestWithHandler(t *testing.T) { } func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { timeout string @@ -176,7 +181,7 @@ func TestWithTimeout(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -212,9 +217,11 @@ func TestWithTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -235,7 +242,7 @@ func TestWithTimeout(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -243,7 +250,7 @@ func TestWithTimeout(t *testing.T) { got := WithTimeout(test.args.timeout) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/index/router/router.go b/pkg/manager/index/router/router.go index d050d464ec..ee19948b05 100644 --- a/pkg/manager/index/router/router.go +++ b/pkg/manager/index/router/router.go @@ -29,7 +29,7 @@ type router struct { timeout string } -// New returns REST route&method information from handler interface +// New returns REST route&method information from handler interface. func New(opts ...Option) http.Handler { r := new(router) diff --git a/pkg/manager/index/router/router_test.go b/pkg/manager/index/router/router_test.go index 25eaec0bef..97248787ae 100644 --- a/pkg/manager/index/router/router_test.go +++ b/pkg/manager/index/router/router_test.go @@ -23,9 +23,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -74,8 +76,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -90,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/index/service/indexer.go b/pkg/manager/index/service/indexer.go index 7071d38c83..baf0e6e698 100644 --- a/pkg/manager/index/service/indexer.go +++ b/pkg/manager/index/service/indexer.go @@ -23,9 +23,9 @@ import ( "sync/atomic" "time" - agent "github.com/vdaas/vald/apis/grpc/agent/core" - "github.com/vdaas/vald/apis/grpc/payload" - "github.com/vdaas/vald/internal/client/discoverer" + agent "github.com/vdaas/vald/apis/grpc/v1/agent/core" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/internal/client/v1/client/discoverer" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" @@ -232,6 +232,7 @@ func (idx *index) IsIndexing() bool { func (idx *index) NumberOfUUIDs() uint32 { return atomic.LoadUint32(&idx.uuidsCount) } + func (idx *index) NumberOfUncommittedUUIDs() uint32 { return atomic.LoadUint32(&idx.uncommittedUUIDsCount) } diff --git a/pkg/manager/index/service/indexer_test.go b/pkg/manager/index/service/indexer_test.go index 888b580060..3679b89226 100644 --- a/pkg/manager/index/service/indexer_test.go +++ b/pkg/manager/index/service/indexer_test.go @@ -24,14 +24,14 @@ import ( "testing" "time" - "github.com/vdaas/vald/internal/client/discoverer" + "github.com/vdaas/vald/internal/client/v1/client/discoverer" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -84,9 +84,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -101,12 +103,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotIdx, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_index_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -199,9 +201,11 @@ func Test_index_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -229,12 +233,12 @@ func Test_index_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_index_execute(t *testing.T) { + t.Parallel() type args struct { ctx context.Context enableLowIndexSkip bool @@ -326,9 +330,11 @@ func Test_index_execute(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -356,12 +362,12 @@ func Test_index_execute(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_index_loadInfos(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -450,9 +456,11 @@ func Test_index_loadInfos(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -480,12 +488,12 @@ func Test_index_loadInfos(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_index_IsIndexing(t *testing.T) { + t.Parallel() type fields struct { client discoverer.Client eg errgroup.Group @@ -564,9 +572,11 @@ func Test_index_IsIndexing(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -594,12 +604,12 @@ func Test_index_IsIndexing(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_index_NumberOfUUIDs(t *testing.T) { + t.Parallel() type fields struct { client discoverer.Client eg errgroup.Group @@ -678,9 +688,11 @@ func Test_index_NumberOfUUIDs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -708,12 +720,12 @@ func Test_index_NumberOfUUIDs(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_index_NumberOfUncommittedUUIDs(t *testing.T) { + t.Parallel() type fields struct { client discoverer.Client eg errgroup.Group @@ -792,9 +804,11 @@ func Test_index_NumberOfUncommittedUUIDs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -822,7 +836,6 @@ func Test_index_NumberOfUncommittedUUIDs(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/index/service/indexinfos.go b/pkg/manager/index/service/indexinfos.go index 8066e7a07a..fc8b851d99 100644 --- a/pkg/manager/index/service/indexinfos.go +++ b/pkg/manager/index/service/indexinfos.go @@ -21,7 +21,7 @@ import ( "sync/atomic" "unsafe" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" ) type indexInfos struct { diff --git a/pkg/manager/index/service/indexinfos_test.go b/pkg/manager/index/service/indexinfos_test.go index f36e4edf5e..ba203807f3 100644 --- a/pkg/manager/index/service/indexinfos_test.go +++ b/pkg/manager/index/service/indexinfos_test.go @@ -23,11 +23,13 @@ import ( "testing" "unsafe" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func Test_newEntryIndexInfos(t *testing.T) { + t.Parallel() type args struct { i *payload.Info_Index_Count } @@ -76,8 +78,11 @@ func Test_newEntryIndexInfos(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -92,12 +97,12 @@ func Test_newEntryIndexInfos(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_indexInfos_Load(t *testing.T) { + t.Parallel() type args struct { key string } @@ -169,8 +174,11 @@ func Test_indexInfos_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -191,12 +199,12 @@ func Test_indexInfos_Load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryIndexInfos_load(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -249,8 +257,11 @@ func Test_entryIndexInfos_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -268,12 +279,12 @@ func Test_entryIndexInfos_load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_indexInfos_Store(t *testing.T) { + t.Parallel() type args struct { key string value *payload.Info_Index_Count @@ -340,8 +351,11 @@ func Test_indexInfos_Store(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,6 +381,7 @@ func Test_indexInfos_Store(t *testing.T) { } func Test_entryIndexInfos_tryStore(t *testing.T) { + t.Parallel() type args struct { i **payload.Info_Index_Count } @@ -425,8 +440,11 @@ func Test_entryIndexInfos_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -444,12 +462,12 @@ func Test_entryIndexInfos_tryStore(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryIndexInfos_unexpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -498,8 +516,11 @@ func Test_entryIndexInfos_unexpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -517,12 +538,12 @@ func Test_entryIndexInfos_unexpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotWasExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryIndexInfos_storeLocked(t *testing.T) { + t.Parallel() type args struct { i **payload.Info_Index_Count } @@ -577,8 +598,11 @@ func Test_entryIndexInfos_storeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -601,6 +625,7 @@ func Test_entryIndexInfos_storeLocked(t *testing.T) { } func Test_indexInfos_Delete(t *testing.T) { + t.Parallel() type args struct { key string } @@ -664,8 +689,11 @@ func Test_indexInfos_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -691,6 +719,7 @@ func Test_indexInfos_Delete(t *testing.T) { } func Test_entryIndexInfos_delete(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -739,8 +768,11 @@ func Test_entryIndexInfos_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -758,12 +790,12 @@ func Test_entryIndexInfos_delete(t *testing.T) { if err := test.checkFunc(test.want, gotHadValue); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_indexInfos_Range(t *testing.T) { + t.Parallel() type args struct { f func(key string, value *payload.Info_Index_Count) bool } @@ -827,8 +859,11 @@ func Test_indexInfos_Range(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -854,6 +889,7 @@ func Test_indexInfos_Range(t *testing.T) { } func Test_indexInfos_missLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -907,8 +943,11 @@ func Test_indexInfos_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -934,6 +973,7 @@ func Test_indexInfos_missLocked(t *testing.T) { } func Test_indexInfos_dirtyLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -987,8 +1027,11 @@ func Test_indexInfos_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1014,6 +1057,7 @@ func Test_indexInfos_dirtyLocked(t *testing.T) { } func Test_entryIndexInfos_tryExpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -1062,8 +1106,11 @@ func Test_entryIndexInfos_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1081,7 +1128,6 @@ func Test_entryIndexInfos_tryExpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotIsExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/index/service/option.go b/pkg/manager/index/service/option.go index 88dcd74d37..e2bcbc1e40 100644 --- a/pkg/manager/index/service/option.go +++ b/pkg/manager/index/service/option.go @@ -20,23 +20,21 @@ package service import ( "time" - "github.com/vdaas/vald/internal/client/discoverer" + "github.com/vdaas/vald/internal/client/v1/client/discoverer" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/timeutil" ) type Option func(i *index) error -var ( - defaultOpts = []Option{ - WithErrGroup(errgroup.Get()), - WithIndexingConcurrency(1), - WithIndexingDuration("1m"), - WithIndexingDurationLimit("30m"), - WithMinUncommitted(100), - WithCreationPoolSize(10000), - } -) +var defaultOpts = []Option{ + WithErrGroup(errgroup.Get()), + WithIndexingConcurrency(1), + WithIndexingDuration("1m"), + WithIndexingDurationLimit("30m"), + WithMinUncommitted(100), + WithCreationPoolSize(10000), +} func WithIndexingConcurrency(c int) Option { return func(idx *index) error { diff --git a/pkg/manager/index/service/option_test.go b/pkg/manager/index/service/option_test.go index d75395c55a..e44e16f807 100644 --- a/pkg/manager/index/service/option_test.go +++ b/pkg/manager/index/service/option_test.go @@ -20,13 +20,14 @@ package service import ( "testing" - "github.com/vdaas/vald/internal/client/discoverer" + "github.com/vdaas/vald/internal/client/v1/client/discoverer" "github.com/vdaas/vald/internal/errgroup" - "go.uber.org/goleak" ) func TestWithIndexingConcurrency(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { c int @@ -64,7 +65,7 @@ func TestWithIndexingConcurrency(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -100,9 +101,11 @@ func TestWithIndexingConcurrency(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -123,7 +126,7 @@ func TestWithIndexingConcurrency(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -131,7 +134,7 @@ func TestWithIndexingConcurrency(t *testing.T) { got := WithIndexingConcurrency(test.args.c) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -140,6 +143,8 @@ func TestWithIndexingConcurrency(t *testing.T) { } func TestWithIndexingDuration(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -177,7 +182,7 @@ func TestWithIndexingDuration(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -213,9 +218,11 @@ func TestWithIndexingDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -236,7 +243,7 @@ func TestWithIndexingDuration(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -244,7 +251,7 @@ func TestWithIndexingDuration(t *testing.T) { got := WithIndexingDuration(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -253,6 +260,8 @@ func TestWithIndexingDuration(t *testing.T) { } func TestWithIndexingDurationLimit(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -290,7 +299,7 @@ func TestWithIndexingDurationLimit(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -326,9 +335,11 @@ func TestWithIndexingDurationLimit(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -349,7 +360,7 @@ func TestWithIndexingDurationLimit(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -357,7 +368,7 @@ func TestWithIndexingDurationLimit(t *testing.T) { got := WithIndexingDurationLimit(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -366,6 +377,8 @@ func TestWithIndexingDurationLimit(t *testing.T) { } func TestWithMinUncommitted(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { n uint32 @@ -403,7 +416,7 @@ func TestWithMinUncommitted(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -439,9 +452,11 @@ func TestWithMinUncommitted(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -462,7 +477,7 @@ func TestWithMinUncommitted(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -470,7 +485,7 @@ func TestWithMinUncommitted(t *testing.T) { got := WithMinUncommitted(test.args.n) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -479,6 +494,8 @@ func TestWithMinUncommitted(t *testing.T) { } func TestWithCreationPoolSize(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { size uint32 @@ -516,7 +533,7 @@ func TestWithCreationPoolSize(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -552,9 +569,11 @@ func TestWithCreationPoolSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -575,7 +594,7 @@ func TestWithCreationPoolSize(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -583,7 +602,7 @@ func TestWithCreationPoolSize(t *testing.T) { got := WithCreationPoolSize(test.args.size) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -592,6 +611,8 @@ func TestWithCreationPoolSize(t *testing.T) { } func TestWithDiscoverer(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { c discoverer.Client @@ -629,7 +650,7 @@ func TestWithDiscoverer(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -665,9 +686,11 @@ func TestWithDiscoverer(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -688,7 +711,7 @@ func TestWithDiscoverer(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -696,7 +719,7 @@ func TestWithDiscoverer(t *testing.T) { got := WithDiscoverer(test.args.c) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -705,6 +728,8 @@ func TestWithDiscoverer(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -742,7 +767,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -778,9 +803,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -801,7 +828,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -809,7 +836,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/index/usecase/indexer.go b/pkg/manager/index/usecase/indexer.go index 2a05c197a0..97854e7ac6 100644 --- a/pkg/manager/index/usecase/indexer.go +++ b/pkg/manager/index/usecase/indexer.go @@ -19,8 +19,8 @@ package usecase import ( "context" - "github.com/vdaas/vald/apis/grpc/manager/index" - "github.com/vdaas/vald/internal/client/discoverer" + "github.com/vdaas/vald/apis/grpc/v1/manager/index" + "github.com/vdaas/vald/internal/client/v1/client/discoverer" iconf "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/net/grpc" @@ -49,9 +49,7 @@ type run struct { func New(cfg *config.Data) (r runner.Runner, err error) { eg := errgroup.Get() - var ( - indexer service.Indexer - ) + var indexer service.Indexer discovererClientOptions := append( cfg.Indexer.Discoverer.Client.Opts(), diff --git a/pkg/manager/index/usecase/indexer_test.go b/pkg/manager/index/usecase/indexer_test.go index 5666a13627..7267330fe7 100644 --- a/pkg/manager/index/usecase/indexer_test.go +++ b/pkg/manager/index/usecase/indexer_test.go @@ -28,11 +28,11 @@ import ( "github.com/vdaas/vald/internal/servers/starter" "github.com/vdaas/vald/pkg/manager/index/config" "github.com/vdaas/vald/pkg/manager/index/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -85,9 +85,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -102,12 +104,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -178,9 +180,11 @@ func Test_run_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -202,12 +206,12 @@ func Test_run_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -282,9 +286,11 @@ func Test_run_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -306,12 +312,12 @@ func Test_run_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -382,9 +388,11 @@ func Test_run_PreStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -406,12 +414,12 @@ func Test_run_PreStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Stop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -482,9 +490,11 @@ func Test_run_Stop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -506,12 +516,12 @@ func Test_run_Stop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -582,9 +592,11 @@ func Test_run_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -606,7 +618,6 @@ func Test_run_PostStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/agent/config/config_test.go b/pkg/manager/replication/agent/config/config_test.go index eba18af56a..e90809127a 100644 --- a/pkg/manager/replication/agent/config/config_test.go +++ b/pkg/manager/replication/agent/config/config_test.go @@ -26,6 +26,7 @@ import ( ) func TestNewConfig(t *testing.T) { + t.Parallel() type args struct { path string } @@ -78,9 +79,11 @@ func TestNewConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -95,7 +98,6 @@ func TestNewConfig(t *testing.T) { if err := test.checkFunc(test.want, gotCfg, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/agent/handler/grpc/handler.go b/pkg/manager/replication/agent/handler/grpc/handler.go index ae2c6eadb0..7364b85de7 100644 --- a/pkg/manager/replication/agent/handler/grpc/handler.go +++ b/pkg/manager/replication/agent/handler/grpc/handler.go @@ -21,8 +21,8 @@ import ( "context" "fmt" - "github.com/vdaas/vald/apis/grpc/manager/replication/agent" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/replication/agent" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/net/grpc/status" "github.com/vdaas/vald/internal/observability/trace" @@ -70,6 +70,7 @@ func (s *server) Rebalance(ctx context.Context, req *payload.Replication_Rebalan }() return new(payload.Empty), nil } + func (s *server) AgentInfo(ctx context.Context, req *payload.Empty) (*payload.Replication_Agents, error) { // TODO implement this later ctx, span := trace.StartSpan(ctx, "vald/manager-replication-agent.AgentInfo") diff --git a/pkg/manager/replication/agent/handler/grpc/handler_test.go b/pkg/manager/replication/agent/handler/grpc/handler_test.go index 8555e0c9f5..d3501fb4bf 100644 --- a/pkg/manager/replication/agent/handler/grpc/handler_test.go +++ b/pkg/manager/replication/agent/handler/grpc/handler_test.go @@ -22,13 +22,14 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/pkg/manager/replication/agent/service" "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -77,9 +78,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -94,12 +97,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Recover(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Replication_Recovery @@ -165,9 +168,11 @@ func Test_server_Recover(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -185,12 +190,12 @@ func Test_server_Recover(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_Rebalance(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Replication_Rebalance @@ -256,9 +261,11 @@ func Test_server_Rebalance(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -276,12 +283,12 @@ func Test_server_Rebalance(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_AgentInfo(t *testing.T) { + t.Parallel() type args struct { ctx context.Context req *payload.Empty @@ -347,9 +354,11 @@ func Test_server_AgentInfo(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,7 +376,6 @@ func Test_server_AgentInfo(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/agent/handler/grpc/option.go b/pkg/manager/replication/agent/handler/grpc/option.go index b221c098ab..dfb0b7dbf8 100644 --- a/pkg/manager/replication/agent/handler/grpc/option.go +++ b/pkg/manager/replication/agent/handler/grpc/option.go @@ -21,9 +21,7 @@ import "github.com/vdaas/vald/pkg/manager/replication/agent/service" type Option func(*server) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithReplicator(rep service.Replicator) Option { return func(s *server) { diff --git a/pkg/manager/replication/agent/handler/grpc/option_test.go b/pkg/manager/replication/agent/handler/grpc/option_test.go index 121c7d3b6e..a3c7d2d2cf 100644 --- a/pkg/manager/replication/agent/handler/grpc/option_test.go +++ b/pkg/manager/replication/agent/handler/grpc/option_test.go @@ -25,6 +25,8 @@ import ( ) func TestWithReplicator(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { rep service.Replicator @@ -62,7 +64,7 @@ func TestWithReplicator(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -98,9 +100,11 @@ func TestWithReplicator(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -121,7 +125,7 @@ func TestWithReplicator(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -129,7 +133,7 @@ func TestWithReplicator(t *testing.T) { got := WithReplicator(test.args.rep) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/replication/agent/handler/rest/handler.go b/pkg/manager/replication/agent/handler/rest/handler.go index a398937395..de519e1775 100644 --- a/pkg/manager/replication/agent/handler/rest/handler.go +++ b/pkg/manager/replication/agent/handler/rest/handler.go @@ -20,8 +20,8 @@ package rest import ( "net/http" - "github.com/vdaas/vald/apis/grpc/manager/replication/agent" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/replication/agent" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/net/http/json" ) diff --git a/pkg/manager/replication/agent/handler/rest/handler_test.go b/pkg/manager/replication/agent/handler/rest/handler_test.go index f7e42952a6..5c98c97338 100644 --- a/pkg/manager/replication/agent/handler/rest/handler_test.go +++ b/pkg/manager/replication/agent/handler/rest/handler_test.go @@ -22,13 +22,13 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/manager/replication/agent" + "github.com/vdaas/vald/apis/grpc/v1/manager/replication/agent" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -77,9 +77,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -94,12 +96,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Recover(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -165,9 +167,11 @@ func Test_handler_Recover(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -185,12 +189,12 @@ func Test_handler_Recover(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Rebalance(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -256,9 +260,11 @@ func Test_handler_Rebalance(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -276,12 +282,12 @@ func Test_handler_Rebalance(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_AgentInfo(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -347,9 +353,11 @@ func Test_handler_AgentInfo(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,7 +375,6 @@ func Test_handler_AgentInfo(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/agent/handler/rest/option.go b/pkg/manager/replication/agent/handler/rest/option.go index 69b8b72c3a..c62ac76f38 100644 --- a/pkg/manager/replication/agent/handler/rest/option.go +++ b/pkg/manager/replication/agent/handler/rest/option.go @@ -17,13 +17,11 @@ // Package rest provides rest api logic package rest -import "github.com/vdaas/vald/apis/grpc/manager/replication/agent" +import "github.com/vdaas/vald/apis/grpc/v1/manager/replication/agent" type Option func(*handler) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithReplicator(reps agent.ReplicationServer) Option { return func(h *handler) { diff --git a/pkg/manager/replication/agent/handler/rest/option_test.go b/pkg/manager/replication/agent/handler/rest/option_test.go index 5ac6fc4b50..691d259741 100644 --- a/pkg/manager/replication/agent/handler/rest/option_test.go +++ b/pkg/manager/replication/agent/handler/rest/option_test.go @@ -20,126 +20,13 @@ package rest import ( "testing" - "github.com/vdaas/vald/apis/grpc/manager/backup" - "github.com/vdaas/vald/apis/grpc/manager/replication/agent" - + "github.com/vdaas/vald/apis/grpc/v1/manager/replication/agent" "go.uber.org/goleak" ) -func TestWithBackup(t *testing.T) { - type T = interface{} - type args struct { - b backup.BackupServer - } - type want struct { - obj *T - // Uncomment this line if the option returns an error, otherwise delete it - // err error - } - type test struct { - name string - args args - want want - // Use the first line if the option returns an error. otherwise use the second line - // checkFunc func(want, *T, error) error - // checkFunc func(want, *T) error - beforeFunc func(args) - afterFunc func(args) - } - - // Uncomment this block if the option returns an error, otherwise delete it - /* - defaultCheckFunc := func(w want, obj *T, err error) error { - if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) - } - if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) - } - return nil - } - */ - - // Uncomment this block if the option do not returns an error, otherwise delete it - /* - defaultCheckFunc := func(w want, obj *T) error { - if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) - } - return nil - } - */ - - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - b: nil, - }, - want: want { - obj: new(T), - }, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - b: nil, - }, - want: want { - obj: new(T), - }, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - - // Uncomment this block if the option returns an error, otherwise delete it - /* - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := WithBackup(test.args.b) - obj := new(T) - if err := test.checkFunc(test.want, obj, got(obj)); err != nil { - tt.Errorf("error = %v", err) - } - */ - - // Uncomment this block if the option returns an error, otherwise delete it - /* - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - got := WithBackup(test.args.b) - obj := new(T) - got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { - tt.Errorf("error = %v", err) - } - */ - }) - } -} - func TestWithReplicator(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { reps agent.ReplicationServer @@ -177,7 +64,7 @@ func TestWithReplicator(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -213,9 +100,11 @@ func TestWithReplicator(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -236,7 +125,7 @@ func TestWithReplicator(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -244,7 +133,7 @@ func TestWithReplicator(t *testing.T) { got := WithReplicator(test.args.reps) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/replication/agent/router/option.go b/pkg/manager/replication/agent/router/option.go index 47384792dc..60a27fc63e 100644 --- a/pkg/manager/replication/agent/router/option.go +++ b/pkg/manager/replication/agent/router/option.go @@ -24,11 +24,9 @@ import ( type Option func(*router) -var ( - defaultOpts = []Option{ - WithTimeout("3s"), - } -) +var defaultOpts = []Option{ + WithTimeout("3s"), +} func WithHandler(h rest.Handler) Option { return func(r *router) { diff --git a/pkg/manager/replication/agent/router/option_test.go b/pkg/manager/replication/agent/router/option_test.go index 650e33f7b4..d1cd6de948 100644 --- a/pkg/manager/replication/agent/router/option_test.go +++ b/pkg/manager/replication/agent/router/option_test.go @@ -22,11 +22,12 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/pkg/manager/replication/agent/handler/rest" - "go.uber.org/goleak" ) func TestWithHandler(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { h rest.Handler @@ -64,7 +65,7 @@ func TestWithHandler(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -100,9 +101,11 @@ func TestWithHandler(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -123,7 +126,7 @@ func TestWithHandler(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -131,7 +134,7 @@ func TestWithHandler(t *testing.T) { got := WithHandler(test.args.h) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -140,6 +143,8 @@ func TestWithHandler(t *testing.T) { } func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { timeout string @@ -177,7 +182,7 @@ func TestWithTimeout(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -213,9 +218,11 @@ func TestWithTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -236,7 +243,7 @@ func TestWithTimeout(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -244,7 +251,7 @@ func TestWithTimeout(t *testing.T) { got := WithTimeout(test.args.timeout) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -253,6 +260,8 @@ func TestWithTimeout(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -290,7 +299,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -326,9 +335,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -349,7 +360,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -357,7 +368,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/replication/agent/router/router.go b/pkg/manager/replication/agent/router/router.go index 6a867967a6..e5c63e7c0b 100644 --- a/pkg/manager/replication/agent/router/router.go +++ b/pkg/manager/replication/agent/router/router.go @@ -32,7 +32,7 @@ type router struct { timeout string } -// New returns REST route&method information from handler interface +// New returns REST route&method information from handler interface. func New(opts ...Option) http.Handler { r := new(router) @@ -74,5 +74,4 @@ func New(opts ...Option) http.Handler { h.AgentInfo, }, }...)) - } diff --git a/pkg/manager/replication/agent/router/router_test.go b/pkg/manager/replication/agent/router/router_test.go index 25eaec0bef..97248787ae 100644 --- a/pkg/manager/replication/agent/router/router_test.go +++ b/pkg/manager/replication/agent/router/router_test.go @@ -23,9 +23,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -74,8 +76,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -90,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/agent/service/agent_test.go b/pkg/manager/replication/agent/service/agent_test.go index f4fa4e65f3..738e18a52b 100644 --- a/pkg/manager/replication/agent/service/agent_test.go +++ b/pkg/manager/replication/agent/service/agent_test.go @@ -25,6 +25,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type want struct { want Replicator } @@ -63,9 +64,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -80,7 +83,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/agent/usecase/backupd.go b/pkg/manager/replication/agent/usecase/backupd.go index 9abf62e3ae..5f320eb23e 100644 --- a/pkg/manager/replication/agent/usecase/backupd.go +++ b/pkg/manager/replication/agent/usecase/backupd.go @@ -19,7 +19,7 @@ package usecase import ( "context" - "github.com/vdaas/vald/apis/grpc/manager/replication/agent" + "github.com/vdaas/vald/apis/grpc/v1/manager/replication/agent" iconf "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/net/grpc" @@ -104,7 +104,6 @@ func New(cfg *config.Data) (r runner.Runner, err error) { }), // TODO add GraphQL handler ) - if err != nil { return nil, err } diff --git a/pkg/manager/replication/agent/usecase/backupd_test.go b/pkg/manager/replication/agent/usecase/backupd_test.go index 0c3b396622..a0fd90d93f 100644 --- a/pkg/manager/replication/agent/usecase/backupd_test.go +++ b/pkg/manager/replication/agent/usecase/backupd_test.go @@ -28,11 +28,11 @@ import ( "github.com/vdaas/vald/internal/servers/starter" "github.com/vdaas/vald/pkg/manager/replication/agent/config" "github.com/vdaas/vald/pkg/manager/replication/agent/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -85,9 +85,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -102,12 +104,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -178,9 +180,11 @@ func Test_run_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -202,12 +206,12 @@ func Test_run_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -282,9 +286,11 @@ func Test_run_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -306,12 +312,12 @@ func Test_run_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -382,9 +388,11 @@ func Test_run_PreStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -406,12 +414,12 @@ func Test_run_PreStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Stop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -482,9 +490,11 @@ func Test_run_Stop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -506,12 +516,12 @@ func Test_run_Stop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -582,9 +592,11 @@ func Test_run_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -606,7 +618,6 @@ func Test_run_PostStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/controller/config/config_test.go b/pkg/manager/replication/controller/config/config_test.go index eba18af56a..e90809127a 100644 --- a/pkg/manager/replication/controller/config/config_test.go +++ b/pkg/manager/replication/controller/config/config_test.go @@ -26,6 +26,7 @@ import ( ) func TestNewConfig(t *testing.T) { + t.Parallel() type args struct { path string } @@ -78,9 +79,11 @@ func TestNewConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -95,7 +98,6 @@ func TestNewConfig(t *testing.T) { if err := test.checkFunc(test.want, gotCfg, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/controller/handler/grpc/handler.go b/pkg/manager/replication/controller/handler/grpc/handler.go index 5cbd4a8ac1..37f02fdbbf 100644 --- a/pkg/manager/replication/controller/handler/grpc/handler.go +++ b/pkg/manager/replication/controller/handler/grpc/handler.go @@ -20,8 +20,8 @@ package grpc import ( "context" - "github.com/vdaas/vald/apis/grpc/manager/replication/controller" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/replication/controller" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/pkg/manager/replication/controller/service" ) diff --git a/pkg/manager/replication/controller/handler/grpc/handler_test.go b/pkg/manager/replication/controller/handler/grpc/handler_test.go index 30e5dbe4c0..52d1f10c7f 100644 --- a/pkg/manager/replication/controller/handler/grpc/handler_test.go +++ b/pkg/manager/replication/controller/handler/grpc/handler_test.go @@ -22,15 +22,15 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/manager/replication/controller" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/replication/controller" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/pkg/manager/replication/controller/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -79,9 +79,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -96,12 +98,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_ReplicationInfo(t *testing.T) { + t.Parallel() type args struct { ctx context.Context in1 *payload.Empty @@ -167,9 +169,11 @@ func Test_server_ReplicationInfo(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -187,7 +191,6 @@ func Test_server_ReplicationInfo(t *testing.T) { if err := test.checkFunc(test.want, gotRes, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/controller/handler/grpc/option.go b/pkg/manager/replication/controller/handler/grpc/option.go index 4d9af19fb4..3da3fdd0e4 100644 --- a/pkg/manager/replication/controller/handler/grpc/option.go +++ b/pkg/manager/replication/controller/handler/grpc/option.go @@ -21,9 +21,7 @@ import "github.com/vdaas/vald/pkg/manager/replication/controller/service" type Option func(*server) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithReplicator(ctrl service.Replicator) Option { return func(s *server) { diff --git a/pkg/manager/replication/controller/handler/grpc/option_test.go b/pkg/manager/replication/controller/handler/grpc/option_test.go index 3087b39666..4a8a3062d2 100644 --- a/pkg/manager/replication/controller/handler/grpc/option_test.go +++ b/pkg/manager/replication/controller/handler/grpc/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/pkg/manager/replication/controller/service" - "go.uber.org/goleak" ) func TestWithReplicator(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { ctrl service.Replicator @@ -63,7 +64,7 @@ func TestWithReplicator(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithReplicator(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithReplicator(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithReplicator(t *testing.T) { got := WithReplicator(test.args.ctrl) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/replication/controller/handler/rest/handler.go b/pkg/manager/replication/controller/handler/rest/handler.go index fb1abf481e..5d428dd12b 100644 --- a/pkg/manager/replication/controller/handler/rest/handler.go +++ b/pkg/manager/replication/controller/handler/rest/handler.go @@ -20,7 +20,7 @@ package rest import ( "net/http" - "github.com/vdaas/vald/apis/grpc/manager/replication/controller" + "github.com/vdaas/vald/apis/grpc/v1/manager/replication/controller" "github.com/vdaas/vald/internal/net/http/dump" "github.com/vdaas/vald/internal/net/http/json" ) diff --git a/pkg/manager/replication/controller/handler/rest/handler_test.go b/pkg/manager/replication/controller/handler/rest/handler_test.go index ca173ba6b4..99f97e0412 100644 --- a/pkg/manager/replication/controller/handler/rest/handler_test.go +++ b/pkg/manager/replication/controller/handler/rest/handler_test.go @@ -22,13 +22,13 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/manager/replication/controller" + "github.com/vdaas/vald/apis/grpc/v1/manager/replication/controller" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -77,9 +77,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -94,12 +96,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Index(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -165,9 +167,11 @@ func Test_handler_Index(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -185,7 +189,6 @@ func Test_handler_Index(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/controller/handler/rest/option.go b/pkg/manager/replication/controller/handler/rest/option.go index c1996d13e3..e0c4ac957b 100644 --- a/pkg/manager/replication/controller/handler/rest/option.go +++ b/pkg/manager/replication/controller/handler/rest/option.go @@ -18,14 +18,12 @@ package rest import ( - "github.com/vdaas/vald/apis/grpc/manager/replication/controller" + "github.com/vdaas/vald/apis/grpc/v1/manager/replication/controller" ) type Option func(*handler) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithReplicator(rpl controller.ReplicationControllerServer) Option { return func(h *handler) { diff --git a/pkg/manager/replication/controller/handler/rest/option_test.go b/pkg/manager/replication/controller/handler/rest/option_test.go index 1af654f20b..2039459da9 100644 --- a/pkg/manager/replication/controller/handler/rest/option_test.go +++ b/pkg/manager/replication/controller/handler/rest/option_test.go @@ -20,12 +20,13 @@ package rest import ( "testing" - "github.com/vdaas/vald/apis/grpc/manager/replication/controller" - + "github.com/vdaas/vald/apis/grpc/v1/manager/replication/controller" "go.uber.org/goleak" ) func TestWithReplicator(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { rpl controller.ReplicationControllerServer @@ -63,7 +64,7 @@ func TestWithReplicator(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithReplicator(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithReplicator(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithReplicator(t *testing.T) { got := WithReplicator(test.args.rpl) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/replication/controller/router/option.go b/pkg/manager/replication/controller/router/option.go index 022776655c..8880ce5d14 100644 --- a/pkg/manager/replication/controller/router/option.go +++ b/pkg/manager/replication/controller/router/option.go @@ -24,11 +24,9 @@ import ( type Option func(*router) -var ( - defaultOpts = []Option{ - WithTimeout("3s"), - } -) +var defaultOpts = []Option{ + WithTimeout("3s"), +} func WithHandler(h rest.Handler) Option { return func(r *router) { diff --git a/pkg/manager/replication/controller/router/option_test.go b/pkg/manager/replication/controller/router/option_test.go index 41e4678693..f8713b58f6 100644 --- a/pkg/manager/replication/controller/router/option_test.go +++ b/pkg/manager/replication/controller/router/option_test.go @@ -22,11 +22,12 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/pkg/manager/replication/controller/handler/rest" - "go.uber.org/goleak" ) func TestWithHandler(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { h rest.Handler @@ -64,7 +65,7 @@ func TestWithHandler(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -100,9 +101,11 @@ func TestWithHandler(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -123,7 +126,7 @@ func TestWithHandler(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -131,7 +134,7 @@ func TestWithHandler(t *testing.T) { got := WithHandler(test.args.h) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -140,6 +143,8 @@ func TestWithHandler(t *testing.T) { } func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { timeout string @@ -177,7 +182,7 @@ func TestWithTimeout(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -213,9 +218,11 @@ func TestWithTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -236,7 +243,7 @@ func TestWithTimeout(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -244,7 +251,7 @@ func TestWithTimeout(t *testing.T) { got := WithTimeout(test.args.timeout) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -253,6 +260,8 @@ func TestWithTimeout(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -290,7 +299,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -326,9 +335,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -349,7 +360,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -357,7 +368,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/replication/controller/router/router.go b/pkg/manager/replication/controller/router/router.go index 3aaa95735d..8974e7210a 100644 --- a/pkg/manager/replication/controller/router/router.go +++ b/pkg/manager/replication/controller/router/router.go @@ -32,7 +32,7 @@ type router struct { timeout string } -// New returns REST route&method information from handler interface +// New returns REST route&method information from handler interface. func New(opts ...Option) http.Handler { r := new(router) diff --git a/pkg/manager/replication/controller/router/router_test.go b/pkg/manager/replication/controller/router/router_test.go index 25eaec0bef..97248787ae 100644 --- a/pkg/manager/replication/controller/router/router_test.go +++ b/pkg/manager/replication/controller/router/router_test.go @@ -23,9 +23,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -74,8 +76,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -90,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/controller/service/discover.go b/pkg/manager/replication/controller/service/discover.go index 55ee8e2c85..b0793b4640 100644 --- a/pkg/manager/replication/controller/service/discover.go +++ b/pkg/manager/replication/controller/service/discover.go @@ -24,8 +24,8 @@ import ( "sync/atomic" "time" - "github.com/vdaas/vald/apis/grpc/manager/replication/agent" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/manager/replication/agent" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/k8s" @@ -138,7 +138,6 @@ func (r *replicator) Start(ctx context.Context) (<-chan error, error) { ech <- err } } - } })) return ech, nil diff --git a/pkg/manager/replication/controller/service/discover_test.go b/pkg/manager/replication/controller/service/discover_test.go index 814185abc3..faad858536 100644 --- a/pkg/manager/replication/controller/service/discover_test.go +++ b/pkg/manager/replication/controller/service/discover_test.go @@ -29,11 +29,11 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/k8s" "github.com/vdaas/vald/internal/net/grpc" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -86,9 +86,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -103,12 +105,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotRp, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_replicator_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -192,9 +194,11 @@ func Test_replicator_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -219,12 +223,12 @@ func Test_replicator_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_replicator_GetCurrentPodIPs(t *testing.T) { + t.Parallel() type fields struct { pods atomic.Value ctrl k8s.Controller @@ -298,9 +302,11 @@ func Test_replicator_GetCurrentPodIPs(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -325,12 +331,12 @@ func Test_replicator_GetCurrentPodIPs(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_replicator_SendRecoveryRequest(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -410,9 +416,11 @@ func Test_replicator_SendRecoveryRequest(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -437,7 +445,6 @@ func Test_replicator_SendRecoveryRequest(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/controller/service/nodemap_test.go b/pkg/manager/replication/controller/service/nodemap_test.go index fc8a00c48c..3eee2a1ef5 100644 --- a/pkg/manager/replication/controller/service/nodemap_test.go +++ b/pkg/manager/replication/controller/service/nodemap_test.go @@ -25,9 +25,11 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/k8s/node" + "go.uber.org/goleak" ) func Test_newEntryNodeMap(t *testing.T) { + t.Parallel() type args struct { i node.Node } @@ -76,8 +78,11 @@ func Test_newEntryNodeMap(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -92,12 +97,12 @@ func Test_newEntryNodeMap(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMap_Load(t *testing.T) { + t.Parallel() type args struct { key string } @@ -169,8 +174,11 @@ func Test_nodeMap_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -191,12 +199,12 @@ func Test_nodeMap_Load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMap_load(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -249,8 +257,11 @@ func Test_entryNodeMap_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -268,12 +279,12 @@ func Test_entryNodeMap_load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMap_Store(t *testing.T) { + t.Parallel() type args struct { key string value node.Node @@ -340,8 +351,11 @@ func Test_nodeMap_Store(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,6 +381,7 @@ func Test_nodeMap_Store(t *testing.T) { } func Test_entryNodeMap_tryStore(t *testing.T) { + t.Parallel() type args struct { i *node.Node } @@ -425,8 +440,11 @@ func Test_entryNodeMap_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -444,12 +462,12 @@ func Test_entryNodeMap_tryStore(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMap_unexpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -498,8 +516,11 @@ func Test_entryNodeMap_unexpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -517,12 +538,12 @@ func Test_entryNodeMap_unexpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotWasExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMap_storeLocked(t *testing.T) { + t.Parallel() type args struct { i *node.Node } @@ -577,8 +598,11 @@ func Test_entryNodeMap_storeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -601,6 +625,7 @@ func Test_entryNodeMap_storeLocked(t *testing.T) { } func Test_nodeMap_LoadOrStore(t *testing.T) { + t.Parallel() type args struct { key string value node.Node @@ -675,8 +700,11 @@ func Test_nodeMap_LoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -697,12 +725,12 @@ func Test_nodeMap_LoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMap_tryLoadOrStore(t *testing.T) { + t.Parallel() type args struct { i node.Node } @@ -769,8 +797,11 @@ func Test_entryNodeMap_tryLoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -788,12 +819,12 @@ func Test_entryNodeMap_tryLoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMap_Delete(t *testing.T) { + t.Parallel() type args struct { key string } @@ -857,8 +888,11 @@ func Test_nodeMap_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -884,6 +918,7 @@ func Test_nodeMap_Delete(t *testing.T) { } func Test_entryNodeMap_delete(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -932,8 +967,11 @@ func Test_entryNodeMap_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -951,12 +989,12 @@ func Test_entryNodeMap_delete(t *testing.T) { if err := test.checkFunc(test.want, gotHadValue); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMap_Range(t *testing.T) { + t.Parallel() type args struct { f func(key string, value node.Node) bool } @@ -1020,8 +1058,11 @@ func Test_nodeMap_Range(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1047,6 +1088,7 @@ func Test_nodeMap_Range(t *testing.T) { } func Test_nodeMap_missLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1100,8 +1142,11 @@ func Test_nodeMap_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1127,6 +1172,7 @@ func Test_nodeMap_missLocked(t *testing.T) { } func Test_nodeMap_dirtyLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1180,8 +1226,11 @@ func Test_nodeMap_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1207,6 +1256,7 @@ func Test_nodeMap_dirtyLocked(t *testing.T) { } func Test_entryNodeMap_tryExpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -1255,8 +1305,11 @@ func Test_entryNodeMap_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1274,7 +1327,6 @@ func Test_entryNodeMap_tryExpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotIsExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/controller/service/nodemetricsmap_test.go b/pkg/manager/replication/controller/service/nodemetricsmap_test.go index 1cc6ae0368..0be14b3cdf 100644 --- a/pkg/manager/replication/controller/service/nodemetricsmap_test.go +++ b/pkg/manager/replication/controller/service/nodemetricsmap_test.go @@ -25,9 +25,11 @@ import ( "github.com/vdaas/vald/internal/errors" mnode "github.com/vdaas/vald/internal/k8s/metrics/node" + "go.uber.org/goleak" ) func Test_newEntryNodeMetricsMap(t *testing.T) { + t.Parallel() type args struct { i mnode.Node } @@ -76,8 +78,11 @@ func Test_newEntryNodeMetricsMap(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -92,12 +97,12 @@ func Test_newEntryNodeMetricsMap(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMetricsMap_Load(t *testing.T) { + t.Parallel() type args struct { key string } @@ -169,8 +174,11 @@ func Test_nodeMetricsMap_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -191,12 +199,12 @@ func Test_nodeMetricsMap_Load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMetricsMap_load(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -249,8 +257,11 @@ func Test_entryNodeMetricsMap_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -268,12 +279,12 @@ func Test_entryNodeMetricsMap_load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMetricsMap_Store(t *testing.T) { + t.Parallel() type args struct { key string value mnode.Node @@ -340,8 +351,11 @@ func Test_nodeMetricsMap_Store(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,6 +381,7 @@ func Test_nodeMetricsMap_Store(t *testing.T) { } func Test_entryNodeMetricsMap_tryStore(t *testing.T) { + t.Parallel() type args struct { i *mnode.Node } @@ -425,8 +440,11 @@ func Test_entryNodeMetricsMap_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -444,12 +462,12 @@ func Test_entryNodeMetricsMap_tryStore(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMetricsMap_unexpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -498,8 +516,11 @@ func Test_entryNodeMetricsMap_unexpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -517,12 +538,12 @@ func Test_entryNodeMetricsMap_unexpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotWasExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMetricsMap_storeLocked(t *testing.T) { + t.Parallel() type args struct { i *mnode.Node } @@ -577,8 +598,11 @@ func Test_entryNodeMetricsMap_storeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -601,6 +625,7 @@ func Test_entryNodeMetricsMap_storeLocked(t *testing.T) { } func Test_nodeMetricsMap_LoadOrStore(t *testing.T) { + t.Parallel() type args struct { key string value mnode.Node @@ -675,8 +700,11 @@ func Test_nodeMetricsMap_LoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -697,12 +725,12 @@ func Test_nodeMetricsMap_LoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryNodeMetricsMap_tryLoadOrStore(t *testing.T) { + t.Parallel() type args struct { i mnode.Node } @@ -769,8 +797,11 @@ func Test_entryNodeMetricsMap_tryLoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -788,12 +819,12 @@ func Test_entryNodeMetricsMap_tryLoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMetricsMap_Delete(t *testing.T) { + t.Parallel() type args struct { key string } @@ -857,8 +888,11 @@ func Test_nodeMetricsMap_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -884,6 +918,7 @@ func Test_nodeMetricsMap_Delete(t *testing.T) { } func Test_entryNodeMetricsMap_delete(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -932,8 +967,11 @@ func Test_entryNodeMetricsMap_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -951,12 +989,12 @@ func Test_entryNodeMetricsMap_delete(t *testing.T) { if err := test.checkFunc(test.want, gotHadValue); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_nodeMetricsMap_Range(t *testing.T) { + t.Parallel() type args struct { f func(key string, value mnode.Node) bool } @@ -1020,8 +1058,11 @@ func Test_nodeMetricsMap_Range(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1047,6 +1088,7 @@ func Test_nodeMetricsMap_Range(t *testing.T) { } func Test_nodeMetricsMap_missLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1100,8 +1142,11 @@ func Test_nodeMetricsMap_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1127,6 +1172,7 @@ func Test_nodeMetricsMap_missLocked(t *testing.T) { } func Test_nodeMetricsMap_dirtyLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1180,8 +1226,11 @@ func Test_nodeMetricsMap_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1207,6 +1256,7 @@ func Test_nodeMetricsMap_dirtyLocked(t *testing.T) { } func Test_entryNodeMetricsMap_tryExpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -1255,8 +1305,11 @@ func Test_entryNodeMetricsMap_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1274,7 +1327,6 @@ func Test_entryNodeMetricsMap_tryExpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotIsExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/controller/service/option.go b/pkg/manager/replication/controller/service/option.go index 4604930960..0d59ec4c70 100644 --- a/pkg/manager/replication/controller/service/option.go +++ b/pkg/manager/replication/controller/service/option.go @@ -26,13 +26,11 @@ import ( type Option func(r *replicator) error -var ( - defaultOpts = []Option{ - WithRecoverCheckDuration("1m"), - WithErrGroup(errgroup.Get()), - WithNamespace("vald"), - } -) +var defaultOpts = []Option{ + WithRecoverCheckDuration("1m"), + WithErrGroup(errgroup.Get()), + WithNamespace("vald"), +} func WithName(name string) Option { return func(r *replicator) error { diff --git a/pkg/manager/replication/controller/service/option_test.go b/pkg/manager/replication/controller/service/option_test.go index 9398080f52..a99e787869 100644 --- a/pkg/manager/replication/controller/service/option_test.go +++ b/pkg/manager/replication/controller/service/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/internal/errgroup" - "go.uber.org/goleak" ) func TestWithName(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { name string @@ -63,7 +64,7 @@ func TestWithName(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithName(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithName(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithName(t *testing.T) { got := WithName(test.args.name) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -139,6 +142,8 @@ func TestWithName(t *testing.T) { } func TestWithNamespace(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { ns string @@ -176,7 +181,7 @@ func TestWithNamespace(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -212,9 +217,11 @@ func TestWithNamespace(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -235,7 +242,7 @@ func TestWithNamespace(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -243,7 +250,7 @@ func TestWithNamespace(t *testing.T) { got := WithNamespace(test.args.ns) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -252,6 +259,8 @@ func TestWithNamespace(t *testing.T) { } func TestWithRecoverCheckDuration(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { dur string @@ -289,7 +298,7 @@ func TestWithRecoverCheckDuration(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -325,9 +334,11 @@ func TestWithRecoverCheckDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -348,7 +359,7 @@ func TestWithRecoverCheckDuration(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -356,7 +367,7 @@ func TestWithRecoverCheckDuration(t *testing.T) { got := WithRecoverCheckDuration(test.args.dur) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -365,6 +376,8 @@ func TestWithRecoverCheckDuration(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -402,7 +415,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -438,9 +451,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -461,7 +476,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -469,7 +484,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/manager/replication/controller/service/podmetricsmap_test.go b/pkg/manager/replication/controller/service/podmetricsmap_test.go index 3c224e71ca..a212f80753 100644 --- a/pkg/manager/replication/controller/service/podmetricsmap_test.go +++ b/pkg/manager/replication/controller/service/podmetricsmap_test.go @@ -25,9 +25,11 @@ import ( "github.com/vdaas/vald/internal/errors" mpod "github.com/vdaas/vald/internal/k8s/metrics/pod" + "go.uber.org/goleak" ) func Test_newEntryPodMetricsMap(t *testing.T) { + t.Parallel() type args struct { i mpod.Pod } @@ -76,8 +78,11 @@ func Test_newEntryPodMetricsMap(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -92,12 +97,12 @@ func Test_newEntryPodMetricsMap(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podMetricsMap_Load(t *testing.T) { + t.Parallel() type args struct { key string } @@ -169,8 +174,11 @@ func Test_podMetricsMap_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -191,12 +199,12 @@ func Test_podMetricsMap_Load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodMetricsMap_load(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -249,8 +257,11 @@ func Test_entryPodMetricsMap_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -268,12 +279,12 @@ func Test_entryPodMetricsMap_load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podMetricsMap_Store(t *testing.T) { + t.Parallel() type args struct { key string value mpod.Pod @@ -340,8 +351,11 @@ func Test_podMetricsMap_Store(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,6 +381,7 @@ func Test_podMetricsMap_Store(t *testing.T) { } func Test_entryPodMetricsMap_tryStore(t *testing.T) { + t.Parallel() type args struct { i *mpod.Pod } @@ -425,8 +440,11 @@ func Test_entryPodMetricsMap_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -444,12 +462,12 @@ func Test_entryPodMetricsMap_tryStore(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodMetricsMap_unexpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -498,8 +516,11 @@ func Test_entryPodMetricsMap_unexpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -517,12 +538,12 @@ func Test_entryPodMetricsMap_unexpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotWasExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodMetricsMap_storeLocked(t *testing.T) { + t.Parallel() type args struct { i *mpod.Pod } @@ -577,8 +598,11 @@ func Test_entryPodMetricsMap_storeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -601,6 +625,7 @@ func Test_entryPodMetricsMap_storeLocked(t *testing.T) { } func Test_podMetricsMap_LoadOrStore(t *testing.T) { + t.Parallel() type args struct { key string value mpod.Pod @@ -675,8 +700,11 @@ func Test_podMetricsMap_LoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -697,12 +725,12 @@ func Test_podMetricsMap_LoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodMetricsMap_tryLoadOrStore(t *testing.T) { + t.Parallel() type args struct { i mpod.Pod } @@ -769,8 +797,11 @@ func Test_entryPodMetricsMap_tryLoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -788,12 +819,12 @@ func Test_entryPodMetricsMap_tryLoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podMetricsMap_Delete(t *testing.T) { + t.Parallel() type args struct { key string } @@ -857,8 +888,11 @@ func Test_podMetricsMap_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -884,6 +918,7 @@ func Test_podMetricsMap_Delete(t *testing.T) { } func Test_entryPodMetricsMap_delete(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -932,8 +967,11 @@ func Test_entryPodMetricsMap_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -951,12 +989,12 @@ func Test_entryPodMetricsMap_delete(t *testing.T) { if err := test.checkFunc(test.want, gotHadValue); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podMetricsMap_Range(t *testing.T) { + t.Parallel() type args struct { f func(key string, value mpod.Pod) bool } @@ -1020,8 +1058,11 @@ func Test_podMetricsMap_Range(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1047,6 +1088,7 @@ func Test_podMetricsMap_Range(t *testing.T) { } func Test_podMetricsMap_missLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1100,8 +1142,11 @@ func Test_podMetricsMap_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1127,6 +1172,7 @@ func Test_podMetricsMap_missLocked(t *testing.T) { } func Test_podMetricsMap_dirtyLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1180,8 +1226,11 @@ func Test_podMetricsMap_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1207,6 +1256,7 @@ func Test_podMetricsMap_dirtyLocked(t *testing.T) { } func Test_entryPodMetricsMap_tryExpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -1255,8 +1305,11 @@ func Test_entryPodMetricsMap_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1274,7 +1327,6 @@ func Test_entryPodMetricsMap_tryExpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotIsExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/controller/service/podsmap_test.go b/pkg/manager/replication/controller/service/podsmap_test.go index deeb4e7554..55261144e0 100644 --- a/pkg/manager/replication/controller/service/podsmap_test.go +++ b/pkg/manager/replication/controller/service/podsmap_test.go @@ -25,9 +25,11 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/k8s/pod" + "go.uber.org/goleak" ) func Test_newEntryPodsMap(t *testing.T) { + t.Parallel() type args struct { i []pod.Pod } @@ -76,8 +78,11 @@ func Test_newEntryPodsMap(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -92,12 +97,12 @@ func Test_newEntryPodsMap(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podsMap_Load(t *testing.T) { + t.Parallel() type args struct { key string } @@ -169,8 +174,11 @@ func Test_podsMap_Load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -191,12 +199,12 @@ func Test_podsMap_Load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodsMap_load(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -249,8 +257,11 @@ func Test_entryPodsMap_load(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -268,12 +279,12 @@ func Test_entryPodsMap_load(t *testing.T) { if err := test.checkFunc(test.want, gotValue, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podsMap_Store(t *testing.T) { + t.Parallel() type args struct { key string value []pod.Pod @@ -340,8 +351,11 @@ func Test_podsMap_Store(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,6 +381,7 @@ func Test_podsMap_Store(t *testing.T) { } func Test_entryPodsMap_tryStore(t *testing.T) { + t.Parallel() type args struct { i *[]pod.Pod } @@ -425,8 +440,11 @@ func Test_entryPodsMap_tryStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -444,12 +462,12 @@ func Test_entryPodsMap_tryStore(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodsMap_unexpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -498,8 +516,11 @@ func Test_entryPodsMap_unexpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -517,12 +538,12 @@ func Test_entryPodsMap_unexpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotWasExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodsMap_storeLocked(t *testing.T) { + t.Parallel() type args struct { i *[]pod.Pod } @@ -577,8 +598,11 @@ func Test_entryPodsMap_storeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -601,6 +625,7 @@ func Test_entryPodsMap_storeLocked(t *testing.T) { } func Test_podsMap_LoadOrStore(t *testing.T) { + t.Parallel() type args struct { key string value []pod.Pod @@ -675,8 +700,11 @@ func Test_podsMap_LoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -697,12 +725,12 @@ func Test_podsMap_LoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_entryPodsMap_tryLoadOrStore(t *testing.T) { + t.Parallel() type args struct { i []pod.Pod } @@ -769,8 +797,11 @@ func Test_entryPodsMap_tryLoadOrStore(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -788,12 +819,12 @@ func Test_entryPodsMap_tryLoadOrStore(t *testing.T) { if err := test.checkFunc(test.want, gotActual, gotLoaded, gotOk); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podsMap_Delete(t *testing.T) { + t.Parallel() type args struct { key string } @@ -857,8 +888,11 @@ func Test_podsMap_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -884,6 +918,7 @@ func Test_podsMap_Delete(t *testing.T) { } func Test_entryPodsMap_delete(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -932,8 +967,11 @@ func Test_entryPodsMap_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -951,12 +989,12 @@ func Test_entryPodsMap_delete(t *testing.T) { if err := test.checkFunc(test.want, gotHadValue); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_podsMap_Range(t *testing.T) { + t.Parallel() type args struct { f func(key string, value []pod.Pod) bool } @@ -1020,8 +1058,11 @@ func Test_podsMap_Range(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1047,6 +1088,7 @@ func Test_podsMap_Range(t *testing.T) { } func Test_podsMap_missLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1100,8 +1142,11 @@ func Test_podsMap_missLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1127,6 +1172,7 @@ func Test_podsMap_missLocked(t *testing.T) { } func Test_podsMap_dirtyLocked(t *testing.T) { + t.Parallel() type fields struct { mu sync.Mutex read atomic.Value @@ -1180,8 +1226,11 @@ func Test_podsMap_dirtyLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1207,6 +1256,7 @@ func Test_podsMap_dirtyLocked(t *testing.T) { } func Test_entryPodsMap_tryExpungeLocked(t *testing.T) { + t.Parallel() type fields struct { p unsafe.Pointer } @@ -1255,8 +1305,11 @@ func Test_entryPodsMap_tryExpungeLocked(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -1274,7 +1327,6 @@ func Test_entryPodsMap_tryExpungeLocked(t *testing.T) { if err := test.checkFunc(test.want, gotIsExpunged); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/manager/replication/controller/usecase/discovered.go b/pkg/manager/replication/controller/usecase/discovered.go index 93e29d1c32..f200b67448 100644 --- a/pkg/manager/replication/controller/usecase/discovered.go +++ b/pkg/manager/replication/controller/usecase/discovered.go @@ -19,7 +19,7 @@ package usecase import ( "context" - "github.com/vdaas/vald/apis/grpc/manager/replication/controller" + "github.com/vdaas/vald/apis/grpc/v1/manager/replication/controller" iconf "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/net/grpc" @@ -108,7 +108,6 @@ func New(cfg *config.Data) (r runner.Runner, err error) { }), // TODO add GraphQL handler ) - if err != nil { return nil, err } diff --git a/pkg/manager/replication/controller/usecase/discovered_test.go b/pkg/manager/replication/controller/usecase/discovered_test.go index 1d78025bc5..507c66163c 100644 --- a/pkg/manager/replication/controller/usecase/discovered_test.go +++ b/pkg/manager/replication/controller/usecase/discovered_test.go @@ -28,11 +28,11 @@ import ( "github.com/vdaas/vald/internal/servers/starter" "github.com/vdaas/vald/pkg/manager/replication/controller/config" "github.com/vdaas/vald/pkg/manager/replication/controller/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -85,9 +85,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -102,12 +104,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -178,9 +180,11 @@ func Test_run_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -202,12 +206,12 @@ func Test_run_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -282,9 +286,11 @@ func Test_run_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -306,12 +312,12 @@ func Test_run_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -382,9 +388,11 @@ func Test_run_PreStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -406,12 +414,12 @@ func Test_run_PreStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Stop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -482,9 +490,11 @@ func Test_run_Stop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -506,12 +516,12 @@ func Test_run_Stop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -582,9 +592,11 @@ func Test_run_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -606,7 +618,6 @@ func Test_run_PostStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/meta/cassandra/config/config_test.go b/pkg/meta/cassandra/config/config_test.go index eba18af56a..e90809127a 100644 --- a/pkg/meta/cassandra/config/config_test.go +++ b/pkg/meta/cassandra/config/config_test.go @@ -26,6 +26,7 @@ import ( ) func TestNewConfig(t *testing.T) { + t.Parallel() type args struct { path string } @@ -78,9 +79,11 @@ func TestNewConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -95,7 +98,6 @@ func TestNewConfig(t *testing.T) { if err := test.checkFunc(test.want, gotCfg, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/meta/cassandra/handler/grpc/handler.go b/pkg/meta/cassandra/handler/grpc/handler.go index 0ee13aef29..e7761e0c4b 100644 --- a/pkg/meta/cassandra/handler/grpc/handler.go +++ b/pkg/meta/cassandra/handler/grpc/handler.go @@ -21,8 +21,8 @@ import ( "context" "fmt" - "github.com/vdaas/vald/apis/grpc/meta" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/meta" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" diff --git a/pkg/meta/cassandra/handler/grpc/handler_test.go b/pkg/meta/cassandra/handler/grpc/handler_test.go index dea536dec1..e3cd1d65e5 100644 --- a/pkg/meta/cassandra/handler/grpc/handler_test.go +++ b/pkg/meta/cassandra/handler/grpc/handler_test.go @@ -22,15 +22,15 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/meta" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/meta" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/pkg/meta/cassandra/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -79,9 +79,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -96,12 +98,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_GetMeta(t *testing.T) { + t.Parallel() type args struct { ctx context.Context key *payload.Meta_Key @@ -167,9 +169,11 @@ func Test_server_GetMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -187,12 +191,12 @@ func Test_server_GetMeta(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_GetMetas(t *testing.T) { + t.Parallel() type args struct { ctx context.Context keys *payload.Meta_Keys @@ -258,9 +262,11 @@ func Test_server_GetMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -278,12 +284,12 @@ func Test_server_GetMetas(t *testing.T) { if err := test.checkFunc(test.want, gotMv, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_GetMetaInverse(t *testing.T) { + t.Parallel() type args struct { ctx context.Context val *payload.Meta_Val @@ -349,9 +355,11 @@ func Test_server_GetMetaInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -369,12 +377,12 @@ func Test_server_GetMetaInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_GetMetasInverse(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vals *payload.Meta_Vals @@ -440,9 +448,11 @@ func Test_server_GetMetasInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -460,12 +470,12 @@ func Test_server_GetMetasInverse(t *testing.T) { if err := test.checkFunc(test.want, gotMk, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_SetMeta(t *testing.T) { + t.Parallel() type args struct { ctx context.Context kv *payload.Meta_KeyVal @@ -531,9 +541,11 @@ func Test_server_SetMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -551,12 +563,12 @@ func Test_server_SetMeta(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_SetMetas(t *testing.T) { + t.Parallel() type args struct { ctx context.Context kvs *payload.Meta_KeyVals @@ -622,9 +634,11 @@ func Test_server_SetMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -642,12 +656,12 @@ func Test_server_SetMetas(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_DeleteMeta(t *testing.T) { + t.Parallel() type args struct { ctx context.Context key *payload.Meta_Key @@ -713,9 +727,11 @@ func Test_server_DeleteMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -733,12 +749,12 @@ func Test_server_DeleteMeta(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_DeleteMetas(t *testing.T) { + t.Parallel() type args struct { ctx context.Context keys *payload.Meta_Keys @@ -804,9 +820,11 @@ func Test_server_DeleteMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -824,12 +842,12 @@ func Test_server_DeleteMetas(t *testing.T) { if err := test.checkFunc(test.want, gotMv, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_DeleteMetaInverse(t *testing.T) { + t.Parallel() type args struct { ctx context.Context val *payload.Meta_Val @@ -895,9 +913,11 @@ func Test_server_DeleteMetaInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -915,12 +935,12 @@ func Test_server_DeleteMetaInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_DeleteMetasInverse(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vals *payload.Meta_Vals @@ -986,9 +1006,11 @@ func Test_server_DeleteMetasInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1006,7 +1028,6 @@ func Test_server_DeleteMetasInverse(t *testing.T) { if err := test.checkFunc(test.want, gotMk, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/meta/cassandra/handler/grpc/option.go b/pkg/meta/cassandra/handler/grpc/option.go index 6ba2ff9c69..41fd7ea2c9 100644 --- a/pkg/meta/cassandra/handler/grpc/option.go +++ b/pkg/meta/cassandra/handler/grpc/option.go @@ -21,9 +21,7 @@ import "github.com/vdaas/vald/pkg/meta/cassandra/service" type Option func(*server) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithCassandra(r service.Cassandra) Option { return func(s *server) { diff --git a/pkg/meta/cassandra/handler/grpc/option_test.go b/pkg/meta/cassandra/handler/grpc/option_test.go index 95dad3edf0..47611543aa 100644 --- a/pkg/meta/cassandra/handler/grpc/option_test.go +++ b/pkg/meta/cassandra/handler/grpc/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/pkg/meta/cassandra/service" - "go.uber.org/goleak" ) func TestWithCassandra(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { r service.Cassandra @@ -63,7 +64,7 @@ func TestWithCassandra(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithCassandra(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithCassandra(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithCassandra(t *testing.T) { got := WithCassandra(test.args.r) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/meta/cassandra/handler/rest/handler.go b/pkg/meta/cassandra/handler/rest/handler.go index d41944d9f3..f85d9f254c 100644 --- a/pkg/meta/cassandra/handler/rest/handler.go +++ b/pkg/meta/cassandra/handler/rest/handler.go @@ -20,8 +20,8 @@ package rest import ( "net/http" - "github.com/vdaas/vald/apis/grpc/meta" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/meta" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/net/http/dump" "github.com/vdaas/vald/internal/net/http/json" ) diff --git a/pkg/meta/cassandra/handler/rest/handler_test.go b/pkg/meta/cassandra/handler/rest/handler_test.go index 466171b77b..d6ba29e992 100644 --- a/pkg/meta/cassandra/handler/rest/handler_test.go +++ b/pkg/meta/cassandra/handler/rest/handler_test.go @@ -22,13 +22,13 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/meta" + "github.com/vdaas/vald/apis/grpc/v1/meta" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -77,9 +77,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -94,12 +96,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Index(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -165,9 +167,11 @@ func Test_handler_Index(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -185,12 +189,12 @@ func Test_handler_Index(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_GetMeta(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -256,9 +260,11 @@ func Test_handler_GetMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -276,12 +282,12 @@ func Test_handler_GetMeta(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_GetMetas(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -347,9 +353,11 @@ func Test_handler_GetMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,12 +375,12 @@ func Test_handler_GetMetas(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_GetMetaInverse(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -438,9 +446,11 @@ func Test_handler_GetMetaInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -458,12 +468,12 @@ func Test_handler_GetMetaInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_GetMetasInverse(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -529,9 +539,11 @@ func Test_handler_GetMetasInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -549,12 +561,12 @@ func Test_handler_GetMetasInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_SetMeta(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -620,9 +632,11 @@ func Test_handler_SetMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -640,12 +654,12 @@ func Test_handler_SetMeta(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_SetMetas(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -711,9 +725,11 @@ func Test_handler_SetMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -731,12 +747,12 @@ func Test_handler_SetMetas(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_DeleteMeta(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -802,9 +818,11 @@ func Test_handler_DeleteMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -822,12 +840,12 @@ func Test_handler_DeleteMeta(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_DeleteMetas(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -893,9 +911,11 @@ func Test_handler_DeleteMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -913,12 +933,12 @@ func Test_handler_DeleteMetas(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_DeleteMetaInverse(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -984,9 +1004,11 @@ func Test_handler_DeleteMetaInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1004,12 +1026,12 @@ func Test_handler_DeleteMetaInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_DeleteMetasInverse(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -1075,9 +1097,11 @@ func Test_handler_DeleteMetasInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1095,7 +1119,6 @@ func Test_handler_DeleteMetasInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/meta/cassandra/handler/rest/option.go b/pkg/meta/cassandra/handler/rest/option.go index b5a1aa2836..1da6ab5db6 100644 --- a/pkg/meta/cassandra/handler/rest/option.go +++ b/pkg/meta/cassandra/handler/rest/option.go @@ -17,13 +17,11 @@ // Package rest provides rest api logic package rest -import "github.com/vdaas/vald/apis/grpc/meta" +import "github.com/vdaas/vald/apis/grpc/v1/meta" type Option func(*handler) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithMeta(m meta.MetaServer) Option { return func(h *handler) { diff --git a/pkg/meta/cassandra/handler/rest/option_test.go b/pkg/meta/cassandra/handler/rest/option_test.go index 75f1e2b964..5a1ca31571 100644 --- a/pkg/meta/cassandra/handler/rest/option_test.go +++ b/pkg/meta/cassandra/handler/rest/option_test.go @@ -20,12 +20,13 @@ package rest import ( "testing" - "github.com/vdaas/vald/apis/grpc/meta" - + "github.com/vdaas/vald/apis/grpc/v1/meta" "go.uber.org/goleak" ) func TestWithMeta(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { m meta.MetaServer @@ -63,7 +64,7 @@ func TestWithMeta(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithMeta(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithMeta(t *testing.T) { got := WithMeta(test.args.m) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/meta/cassandra/router/option.go b/pkg/meta/cassandra/router/option.go index a57f87966f..ebe8c52895 100644 --- a/pkg/meta/cassandra/router/option.go +++ b/pkg/meta/cassandra/router/option.go @@ -24,11 +24,9 @@ import ( type Option func(*router) -var ( - defaultOpts = []Option{ - WithTimeout("3s"), - } -) +var defaultOpts = []Option{ + WithTimeout("3s"), +} func WithHandler(h rest.Handler) Option { return func(r *router) { diff --git a/pkg/meta/cassandra/router/option_test.go b/pkg/meta/cassandra/router/option_test.go index 63539f268f..d34beaba1c 100644 --- a/pkg/meta/cassandra/router/option_test.go +++ b/pkg/meta/cassandra/router/option_test.go @@ -22,11 +22,12 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/pkg/meta/cassandra/handler/rest" - "go.uber.org/goleak" ) func TestWithHandler(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { h rest.Handler @@ -64,7 +65,7 @@ func TestWithHandler(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -100,9 +101,11 @@ func TestWithHandler(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -123,7 +126,7 @@ func TestWithHandler(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -131,7 +134,7 @@ func TestWithHandler(t *testing.T) { got := WithHandler(test.args.h) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -140,6 +143,8 @@ func TestWithHandler(t *testing.T) { } func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { timeout string @@ -177,7 +182,7 @@ func TestWithTimeout(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -213,9 +218,11 @@ func TestWithTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -236,7 +243,7 @@ func TestWithTimeout(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -244,7 +251,7 @@ func TestWithTimeout(t *testing.T) { got := WithTimeout(test.args.timeout) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -253,6 +260,8 @@ func TestWithTimeout(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -290,7 +299,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -326,9 +335,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -349,7 +360,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -357,7 +368,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/meta/cassandra/router/router.go b/pkg/meta/cassandra/router/router.go index a35e6ebc20..853cc1ccff 100644 --- a/pkg/meta/cassandra/router/router.go +++ b/pkg/meta/cassandra/router/router.go @@ -32,7 +32,7 @@ type router struct { timeout string } -// New returns REST route&method information from handler interface +// New returns REST route&method information from handler interface. func New(opts ...Option) http.Handler { r := new(router) diff --git a/pkg/meta/cassandra/router/router_test.go b/pkg/meta/cassandra/router/router_test.go index 25eaec0bef..97248787ae 100644 --- a/pkg/meta/cassandra/router/router_test.go +++ b/pkg/meta/cassandra/router/router_test.go @@ -23,9 +23,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -74,8 +76,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -90,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/meta/cassandra/service/cassandra_test.go b/pkg/meta/cassandra/service/cassandra_test.go index 80685dceb8..c65bf42416 100644 --- a/pkg/meta/cassandra/service/cassandra_test.go +++ b/pkg/meta/cassandra/service/cassandra_test.go @@ -28,6 +28,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -80,8 +81,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -97,12 +100,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotCas, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Connect(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -167,8 +170,10 @@ func Test_client_Connect(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -189,12 +194,12 @@ func Test_client_Connect(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Close(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -259,8 +264,10 @@ func Test_client_Close(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -281,12 +288,12 @@ func Test_client_Close(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Get(t *testing.T) { + t.Parallel() type args struct { key string } @@ -355,8 +362,10 @@ func Test_client_Get(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -377,12 +386,12 @@ func Test_client_Get(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_GetMultiple(t *testing.T) { + t.Parallel() type args struct { keys []string } @@ -451,8 +460,10 @@ func Test_client_GetMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -473,12 +484,12 @@ func Test_client_GetMultiple(t *testing.T) { if err := test.checkFunc(test.want, gotVals, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_GetInverse(t *testing.T) { + t.Parallel() type args struct { val string } @@ -547,8 +558,10 @@ func Test_client_GetInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -569,12 +582,12 @@ func Test_client_GetInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_GetInverseMultiple(t *testing.T) { + t.Parallel() type args struct { vals []string } @@ -643,8 +656,10 @@ func Test_client_GetInverseMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -665,12 +680,12 @@ func Test_client_GetInverseMultiple(t *testing.T) { if err := test.checkFunc(test.want, gotKeys, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Set(t *testing.T) { + t.Parallel() type args struct { key string val string @@ -738,8 +753,10 @@ func Test_client_Set(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -760,12 +777,12 @@ func Test_client_Set(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_SetMultiple(t *testing.T) { + t.Parallel() type args struct { kvs map[string]string } @@ -830,8 +847,10 @@ func Test_client_SetMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -852,12 +871,12 @@ func Test_client_SetMultiple(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_deleteByKeys(t *testing.T) { + t.Parallel() type args struct { keys []string } @@ -926,8 +945,10 @@ func Test_client_deleteByKeys(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -948,12 +969,12 @@ func Test_client_deleteByKeys(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Delete(t *testing.T) { + t.Parallel() type args struct { key string } @@ -1022,8 +1043,10 @@ func Test_client_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1044,12 +1067,12 @@ func Test_client_Delete(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_DeleteMultiple(t *testing.T) { + t.Parallel() type args struct { keys []string } @@ -1118,8 +1141,10 @@ func Test_client_DeleteMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1140,12 +1165,12 @@ func Test_client_DeleteMultiple(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_deleteByValues(t *testing.T) { + t.Parallel() type args struct { vals []string } @@ -1214,8 +1239,10 @@ func Test_client_deleteByValues(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1236,12 +1263,12 @@ func Test_client_deleteByValues(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_DeleteInverse(t *testing.T) { + t.Parallel() type args struct { val string } @@ -1310,8 +1337,10 @@ func Test_client_DeleteInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1332,12 +1361,12 @@ func Test_client_DeleteInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_DeleteInverseMultiple(t *testing.T) { + t.Parallel() type args struct { vals []string } @@ -1406,8 +1435,10 @@ func Test_client_DeleteInverseMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1428,7 +1459,6 @@ func Test_client_DeleteInverseMultiple(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/meta/cassandra/service/option.go b/pkg/meta/cassandra/service/option.go index c417635483..ff9d7e2010 100644 --- a/pkg/meta/cassandra/service/option.go +++ b/pkg/meta/cassandra/service/option.go @@ -23,12 +23,10 @@ import ( type Option func(*client) error -var ( - defaultOpts = []Option{ - WithKVTable("kv"), - WithVKTable("vk"), - } -) +var defaultOpts = []Option{ + WithKVTable("kv"), + WithVKTable("vk"), +} func WithCassandra(db cassandra.Cassandra) Option { return func(c *client) error { diff --git a/pkg/meta/cassandra/service/option_test.go b/pkg/meta/cassandra/service/option_test.go new file mode 100644 index 0000000000..cba2130217 --- /dev/null +++ b/pkg/meta/cassandra/service/option_test.go @@ -0,0 +1,376 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package service manages the main logic of server. +package service + +import ( + "testing" + + "github.com/vdaas/vald/internal/db/nosql/cassandra" + "go.uber.org/goleak" +) + +func TestWithCassandra(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + db cassandra.Cassandra + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + db: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + db: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithCassandra(test.args.db) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithCassandra(test.args.db) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithKVTable(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + name string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + name: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + name: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithKVTable(test.args.name) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithKVTable(test.args.name) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithVKTable(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + name string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + name: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + name: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithVKTable(test.args.name) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithVKTable(test.args.name) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/meta/cassandra/usecase/meta.go b/pkg/meta/cassandra/usecase/meta.go index 202b50a0af..85695efe03 100644 --- a/pkg/meta/cassandra/usecase/meta.go +++ b/pkg/meta/cassandra/usecase/meta.go @@ -19,7 +19,7 @@ package usecase import ( "context" - "github.com/vdaas/vald/apis/grpc/meta" + "github.com/vdaas/vald/apis/grpc/v1/meta" iconf "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/db/nosql/cassandra" "github.com/vdaas/vald/internal/errgroup" @@ -137,7 +137,6 @@ func New(cfg *config.Data) (r runner.Runner, err error) { }), // TODO add GraphQL handler ) - if err != nil { return nil, err } diff --git a/pkg/meta/cassandra/usecase/meta_test.go b/pkg/meta/cassandra/usecase/meta_test.go index bc847f82f2..c2b2eb1b95 100644 --- a/pkg/meta/cassandra/usecase/meta_test.go +++ b/pkg/meta/cassandra/usecase/meta_test.go @@ -28,11 +28,11 @@ import ( "github.com/vdaas/vald/internal/servers/starter" "github.com/vdaas/vald/pkg/meta/cassandra/config" "github.com/vdaas/vald/pkg/meta/cassandra/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -85,9 +85,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -102,12 +104,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -178,9 +180,11 @@ func Test_run_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -202,12 +206,12 @@ func Test_run_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -282,9 +286,11 @@ func Test_run_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -306,12 +312,12 @@ func Test_run_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -382,9 +388,11 @@ func Test_run_PreStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -406,12 +414,12 @@ func Test_run_PreStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Stop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -482,9 +490,11 @@ func Test_run_Stop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -506,12 +516,12 @@ func Test_run_Stop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -582,9 +592,11 @@ func Test_run_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -606,7 +618,6 @@ func Test_run_PostStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/meta/redis/config/config_test.go b/pkg/meta/redis/config/config_test.go index eba18af56a..e90809127a 100644 --- a/pkg/meta/redis/config/config_test.go +++ b/pkg/meta/redis/config/config_test.go @@ -26,6 +26,7 @@ import ( ) func TestNewConfig(t *testing.T) { + t.Parallel() type args struct { path string } @@ -78,9 +79,11 @@ func TestNewConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -95,7 +98,6 @@ func TestNewConfig(t *testing.T) { if err := test.checkFunc(test.want, gotCfg, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/meta/redis/handler/grpc/handler.go b/pkg/meta/redis/handler/grpc/handler.go index 5279fbae40..263357f367 100644 --- a/pkg/meta/redis/handler/grpc/handler.go +++ b/pkg/meta/redis/handler/grpc/handler.go @@ -21,8 +21,8 @@ import ( "context" "fmt" - "github.com/vdaas/vald/apis/grpc/meta" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/meta" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" diff --git a/pkg/meta/redis/handler/grpc/handler_test.go b/pkg/meta/redis/handler/grpc/handler_test.go index cd747c0578..ed997f26f2 100644 --- a/pkg/meta/redis/handler/grpc/handler_test.go +++ b/pkg/meta/redis/handler/grpc/handler_test.go @@ -22,14 +22,15 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/meta" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/meta" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/pkg/meta/redis/service" "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -78,8 +79,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -95,12 +98,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_GetMeta(t *testing.T) { + t.Parallel() type args struct { ctx context.Context key *payload.Meta_Key @@ -166,8 +169,10 @@ func Test_server_GetMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -186,12 +191,12 @@ func Test_server_GetMeta(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_GetMetas(t *testing.T) { + t.Parallel() type args struct { ctx context.Context keys *payload.Meta_Keys @@ -257,8 +262,10 @@ func Test_server_GetMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -277,12 +284,12 @@ func Test_server_GetMetas(t *testing.T) { if err := test.checkFunc(test.want, gotMv, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_GetMetaInverse(t *testing.T) { + t.Parallel() type args struct { ctx context.Context val *payload.Meta_Val @@ -348,8 +355,10 @@ func Test_server_GetMetaInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -368,12 +377,12 @@ func Test_server_GetMetaInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_GetMetasInverse(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vals *payload.Meta_Vals @@ -439,8 +448,10 @@ func Test_server_GetMetasInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -459,12 +470,12 @@ func Test_server_GetMetasInverse(t *testing.T) { if err := test.checkFunc(test.want, gotMk, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_SetMeta(t *testing.T) { + t.Parallel() type args struct { ctx context.Context kv *payload.Meta_KeyVal @@ -530,8 +541,10 @@ func Test_server_SetMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -550,12 +563,12 @@ func Test_server_SetMeta(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_SetMetas(t *testing.T) { + t.Parallel() type args struct { ctx context.Context kvs *payload.Meta_KeyVals @@ -621,8 +634,10 @@ func Test_server_SetMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -641,12 +656,12 @@ func Test_server_SetMetas(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_DeleteMeta(t *testing.T) { + t.Parallel() type args struct { ctx context.Context key *payload.Meta_Key @@ -712,8 +727,10 @@ func Test_server_DeleteMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -732,12 +749,12 @@ func Test_server_DeleteMeta(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_DeleteMetas(t *testing.T) { + t.Parallel() type args struct { ctx context.Context keys *payload.Meta_Keys @@ -803,8 +820,10 @@ func Test_server_DeleteMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -823,12 +842,12 @@ func Test_server_DeleteMetas(t *testing.T) { if err := test.checkFunc(test.want, gotMv, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_DeleteMetaInverse(t *testing.T) { + t.Parallel() type args struct { ctx context.Context val *payload.Meta_Val @@ -894,8 +913,10 @@ func Test_server_DeleteMetaInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -914,12 +935,12 @@ func Test_server_DeleteMetaInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_server_DeleteMetasInverse(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vals *payload.Meta_Vals @@ -985,8 +1006,10 @@ func Test_server_DeleteMetasInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1005,7 +1028,6 @@ func Test_server_DeleteMetasInverse(t *testing.T) { if err := test.checkFunc(test.want, gotMk, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/meta/redis/handler/grpc/option.go b/pkg/meta/redis/handler/grpc/option.go index eef3beae1f..1fb3854271 100644 --- a/pkg/meta/redis/handler/grpc/option.go +++ b/pkg/meta/redis/handler/grpc/option.go @@ -21,9 +21,7 @@ import "github.com/vdaas/vald/pkg/meta/redis/service" type Option func(*server) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithRedis(r service.Redis) Option { return func(s *server) { diff --git a/pkg/meta/redis/handler/grpc/option_test.go b/pkg/meta/redis/handler/grpc/option_test.go index bf86be8d11..722658822d 100644 --- a/pkg/meta/redis/handler/grpc/option_test.go +++ b/pkg/meta/redis/handler/grpc/option_test.go @@ -21,11 +21,12 @@ import ( "testing" "github.com/vdaas/vald/pkg/meta/redis/service" - "go.uber.org/goleak" ) func TestWithRedis(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { r service.Redis @@ -63,7 +64,7 @@ func TestWithRedis(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithRedis(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithRedis(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithRedis(t *testing.T) { got := WithRedis(test.args.r) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/meta/redis/handler/rest/handler.go b/pkg/meta/redis/handler/rest/handler.go index d41944d9f3..f85d9f254c 100644 --- a/pkg/meta/redis/handler/rest/handler.go +++ b/pkg/meta/redis/handler/rest/handler.go @@ -20,8 +20,8 @@ package rest import ( "net/http" - "github.com/vdaas/vald/apis/grpc/meta" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/meta" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/net/http/dump" "github.com/vdaas/vald/internal/net/http/json" ) diff --git a/pkg/meta/redis/handler/rest/handler_test.go b/pkg/meta/redis/handler/rest/handler_test.go index 466171b77b..d6ba29e992 100644 --- a/pkg/meta/redis/handler/rest/handler_test.go +++ b/pkg/meta/redis/handler/rest/handler_test.go @@ -22,13 +22,13 @@ import ( "reflect" "testing" - "github.com/vdaas/vald/apis/grpc/meta" + "github.com/vdaas/vald/apis/grpc/v1/meta" "github.com/vdaas/vald/internal/errors" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -77,9 +77,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -94,12 +96,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_Index(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -165,9 +167,11 @@ func Test_handler_Index(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -185,12 +189,12 @@ func Test_handler_Index(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_GetMeta(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -256,9 +260,11 @@ func Test_handler_GetMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -276,12 +282,12 @@ func Test_handler_GetMeta(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_GetMetas(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -347,9 +353,11 @@ func Test_handler_GetMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -367,12 +375,12 @@ func Test_handler_GetMetas(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_GetMetaInverse(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -438,9 +446,11 @@ func Test_handler_GetMetaInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -458,12 +468,12 @@ func Test_handler_GetMetaInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_GetMetasInverse(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -529,9 +539,11 @@ func Test_handler_GetMetasInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -549,12 +561,12 @@ func Test_handler_GetMetasInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_SetMeta(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -620,9 +632,11 @@ func Test_handler_SetMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -640,12 +654,12 @@ func Test_handler_SetMeta(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_SetMetas(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -711,9 +725,11 @@ func Test_handler_SetMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -731,12 +747,12 @@ func Test_handler_SetMetas(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_DeleteMeta(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -802,9 +818,11 @@ func Test_handler_DeleteMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -822,12 +840,12 @@ func Test_handler_DeleteMeta(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_DeleteMetas(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -893,9 +911,11 @@ func Test_handler_DeleteMetas(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -913,12 +933,12 @@ func Test_handler_DeleteMetas(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_DeleteMetaInverse(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -984,9 +1004,11 @@ func Test_handler_DeleteMetaInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1004,12 +1026,12 @@ func Test_handler_DeleteMetaInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_handler_DeleteMetasInverse(t *testing.T) { + t.Parallel() type args struct { w http.ResponseWriter r *http.Request @@ -1075,9 +1097,11 @@ func Test_handler_DeleteMetasInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -1095,7 +1119,6 @@ func Test_handler_DeleteMetasInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/meta/redis/handler/rest/option.go b/pkg/meta/redis/handler/rest/option.go index b5a1aa2836..1da6ab5db6 100644 --- a/pkg/meta/redis/handler/rest/option.go +++ b/pkg/meta/redis/handler/rest/option.go @@ -17,13 +17,11 @@ // Package rest provides rest api logic package rest -import "github.com/vdaas/vald/apis/grpc/meta" +import "github.com/vdaas/vald/apis/grpc/v1/meta" type Option func(*handler) -var ( - defaultOpts = []Option{} -) +var defaultOpts = []Option{} func WithMeta(m meta.MetaServer) Option { return func(h *handler) { diff --git a/pkg/meta/redis/handler/rest/option_test.go b/pkg/meta/redis/handler/rest/option_test.go index 75f1e2b964..5a1ca31571 100644 --- a/pkg/meta/redis/handler/rest/option_test.go +++ b/pkg/meta/redis/handler/rest/option_test.go @@ -20,12 +20,13 @@ package rest import ( "testing" - "github.com/vdaas/vald/apis/grpc/meta" - + "github.com/vdaas/vald/apis/grpc/v1/meta" "go.uber.org/goleak" ) func TestWithMeta(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { m meta.MetaServer @@ -63,7 +64,7 @@ func TestWithMeta(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +100,11 @@ func TestWithMeta(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +125,7 @@ func TestWithMeta(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +133,7 @@ func TestWithMeta(t *testing.T) { got := WithMeta(test.args.m) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/meta/redis/router/option.go b/pkg/meta/redis/router/option.go index 6f19fe917f..46bfe338f1 100644 --- a/pkg/meta/redis/router/option.go +++ b/pkg/meta/redis/router/option.go @@ -24,11 +24,9 @@ import ( type Option func(*router) -var ( - defaultOpts = []Option{ - WithTimeout("3s"), - } -) +var defaultOpts = []Option{ + WithTimeout("3s"), +} func WithHandler(h rest.Handler) Option { return func(r *router) { diff --git a/pkg/meta/redis/router/option_test.go b/pkg/meta/redis/router/option_test.go index 5cd9ec71b2..4ebfd09865 100644 --- a/pkg/meta/redis/router/option_test.go +++ b/pkg/meta/redis/router/option_test.go @@ -22,11 +22,12 @@ import ( "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/pkg/meta/redis/handler/rest" - "go.uber.org/goleak" ) func TestWithHandler(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { h rest.Handler @@ -64,7 +65,7 @@ func TestWithHandler(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -100,9 +101,11 @@ func TestWithHandler(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -123,7 +126,7 @@ func TestWithHandler(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -131,7 +134,7 @@ func TestWithHandler(t *testing.T) { got := WithHandler(test.args.h) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -140,6 +143,8 @@ func TestWithHandler(t *testing.T) { } func TestWithTimeout(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { timeout string @@ -177,7 +182,7 @@ func TestWithTimeout(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -213,9 +218,11 @@ func TestWithTimeout(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -236,7 +243,7 @@ func TestWithTimeout(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -244,7 +251,7 @@ func TestWithTimeout(t *testing.T) { got := WithTimeout(test.args.timeout) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -253,6 +260,8 @@ func TestWithTimeout(t *testing.T) { } func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { eg errgroup.Group @@ -290,7 +299,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -326,9 +335,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -349,7 +360,7 @@ func TestWithErrGroup(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -357,7 +368,7 @@ func TestWithErrGroup(t *testing.T) { got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ diff --git a/pkg/meta/redis/router/router.go b/pkg/meta/redis/router/router.go index aaffe75c61..975f821a1f 100644 --- a/pkg/meta/redis/router/router.go +++ b/pkg/meta/redis/router/router.go @@ -32,7 +32,7 @@ type router struct { timeout string } -// New returns REST route&method information from handler interface +// New returns REST route&method information from handler interface. func New(opts ...Option) http.Handler { r := new(router) diff --git a/pkg/meta/redis/router/router_test.go b/pkg/meta/redis/router/router_test.go index 25eaec0bef..97248787ae 100644 --- a/pkg/meta/redis/router/router_test.go +++ b/pkg/meta/redis/router/router_test.go @@ -23,9 +23,11 @@ import ( "testing" "github.com/vdaas/vald/internal/errors" + "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -74,8 +76,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -90,7 +95,6 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/meta/redis/service/option.go b/pkg/meta/redis/service/option.go index c457544412..8ec5d1353b 100644 --- a/pkg/meta/redis/service/option.go +++ b/pkg/meta/redis/service/option.go @@ -23,13 +23,11 @@ import ( type Option func(*client) error -var ( - defaultOpts = []Option{ - WithKVPrefix("kv"), - WithVKPrefix("vk"), - WithPrefixDelimiter("-"), - } -) +var defaultOpts = []Option{ + WithKVPrefix("kv"), + WithVKPrefix("vk"), + WithPrefixDelimiter("-"), +} func WithRedisClient(r redis.Redis) Option { return func(c *client) error { diff --git a/pkg/meta/redis/service/option_test.go b/pkg/meta/redis/service/option_test.go new file mode 100644 index 0000000000..caca15b19c --- /dev/null +++ b/pkg/meta/redis/service/option_test.go @@ -0,0 +1,610 @@ +// +// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package service manages the main logic of server. +package service + +import ( + "testing" + + "github.com/vdaas/vald/internal/db/kvs/redis" + "go.uber.org/goleak" +) + +func TestWithRedisClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + r redis.Redis + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + r: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + r: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithRedisClient(test.args.r) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithRedisClient(test.args.r) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithRedisClientConnector(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + connector redis.Connector + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + connector: nil, + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + connector: nil, + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithRedisClientConnector(test.args.connector) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithRedisClientConnector(test.args.connector) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithKVPrefix(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + name string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + name: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + name: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithKVPrefix(test.args.name) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithKVPrefix(test.args.name) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithVKPrefix(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + name string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + name: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + name: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithVKPrefix(test.args.name) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithVKPrefix(test.args.name) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} + +func TestWithPrefixDelimiter(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} + type args struct { + del string + } + type want struct { + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error + } + type test struct { + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error + beforeFunc func(args) + afterFunc func(args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ + + tests := []test{ + // TODO test cases + /* + { + name: "test_case_1", + args: args { + del: "", + }, + want: want { + obj: new(T), + }, + }, + */ + + // TODO test cases + /* + func() test { + return test { + name: "test_case_2", + args: args { + del: "", + }, + want: want { + obj: new(T), + }, + } + }(), + */ + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt) + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithPrefixDelimiter(test.args.del) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithPrefixDelimiter(test.args.del) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ + }) + } +} diff --git a/pkg/meta/redis/service/redis.go b/pkg/meta/redis/service/redis.go index 10bda41c0c..00dd981ac7 100644 --- a/pkg/meta/redis/service/redis.go +++ b/pkg/meta/redis/service/redis.go @@ -252,7 +252,6 @@ func (c *client) delete(ctx context.Context, pfx, pfxInv, key string) (val strin return "", errors.Wrap(c.Set(ctx, key, val), err.Error()) } return "", errors.Wrap(c.Set(ctx, val, key), err.Error()) - } return val, nil } diff --git a/pkg/meta/redis/service/redis_test.go b/pkg/meta/redis/service/redis_test.go index c0237735e5..bb88b54841 100644 --- a/pkg/meta/redis/service/redis_test.go +++ b/pkg/meta/redis/service/redis_test.go @@ -28,6 +28,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -80,8 +81,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -97,12 +100,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Disconnect(t *testing.T) { + t.Parallel() type fields struct { connector redis.Connector db redis.Redis @@ -163,8 +166,10 @@ func Test_client_Disconnect(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -187,12 +192,12 @@ func Test_client_Disconnect(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Connect(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -263,8 +268,10 @@ func Test_client_Connect(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -287,12 +294,12 @@ func Test_client_Connect(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Get(t *testing.T) { + t.Parallel() type args struct { ctx context.Context key string @@ -370,8 +377,10 @@ func Test_client_Get(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -394,12 +403,12 @@ func Test_client_Get(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_GetMultiple(t *testing.T) { + t.Parallel() type args struct { ctx context.Context keys []string @@ -477,8 +486,10 @@ func Test_client_GetMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -501,12 +512,12 @@ func Test_client_GetMultiple(t *testing.T) { if err := test.checkFunc(test.want, gotVals, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_GetInverse(t *testing.T) { + t.Parallel() type args struct { ctx context.Context val string @@ -584,8 +595,10 @@ func Test_client_GetInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -608,12 +621,12 @@ func Test_client_GetInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_GetInverseMultiple(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vals []string @@ -691,8 +704,10 @@ func Test_client_GetInverseMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -715,12 +730,12 @@ func Test_client_GetInverseMultiple(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_appendPrefix(t *testing.T) { + t.Parallel() type args struct { prefix string key string @@ -794,8 +809,10 @@ func Test_client_appendPrefix(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -818,12 +835,12 @@ func Test_client_appendPrefix(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_get(t *testing.T) { + t.Parallel() type args struct { ctx context.Context prefix string @@ -904,8 +921,10 @@ func Test_client_get(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -928,12 +947,12 @@ func Test_client_get(t *testing.T) { if err := test.checkFunc(test.want, gotVal, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_getMulti(t *testing.T) { + t.Parallel() type args struct { ctx context.Context prefix string @@ -1014,8 +1033,10 @@ func Test_client_getMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1038,12 +1059,12 @@ func Test_client_getMulti(t *testing.T) { if err := test.checkFunc(test.want, gotVals, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Set(t *testing.T) { + t.Parallel() type args struct { ctx context.Context key string @@ -1120,8 +1141,10 @@ func Test_client_Set(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1144,12 +1167,12 @@ func Test_client_Set(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_SetMultiple(t *testing.T) { + t.Parallel() type args struct { ctx context.Context kvs map[string]string @@ -1223,8 +1246,10 @@ func Test_client_SetMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1247,12 +1272,12 @@ func Test_client_SetMultiple(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_Delete(t *testing.T) { + t.Parallel() type args struct { ctx context.Context key string @@ -1330,8 +1355,10 @@ func Test_client_Delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1354,12 +1381,12 @@ func Test_client_Delete(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_DeleteMultiple(t *testing.T) { + t.Parallel() type args struct { ctx context.Context keys []string @@ -1437,8 +1464,10 @@ func Test_client_DeleteMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1461,12 +1490,12 @@ func Test_client_DeleteMultiple(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_DeleteInverse(t *testing.T) { + t.Parallel() type args struct { ctx context.Context val string @@ -1544,8 +1573,10 @@ func Test_client_DeleteInverse(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1568,12 +1599,12 @@ func Test_client_DeleteInverse(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_DeleteInverseMultiple(t *testing.T) { + t.Parallel() type args struct { ctx context.Context vals []string @@ -1651,8 +1682,10 @@ func Test_client_DeleteInverseMultiple(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1675,12 +1708,12 @@ func Test_client_DeleteInverseMultiple(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_delete(t *testing.T) { + t.Parallel() type args struct { ctx context.Context pfx string @@ -1764,8 +1797,10 @@ func Test_client_delete(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1788,12 +1823,12 @@ func Test_client_delete(t *testing.T) { if err := test.checkFunc(test.want, gotVal, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_client_deleteMulti(t *testing.T) { + t.Parallel() type args struct { ctx context.Context pfx string @@ -1877,8 +1912,10 @@ func Test_client_deleteMulti(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1901,7 +1938,6 @@ func Test_client_deleteMulti(t *testing.T) { if err := test.checkFunc(test.want, gotVals, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/meta/redis/usecase/meta.go b/pkg/meta/redis/usecase/meta.go index b80a833b78..844fa1c2ab 100644 --- a/pkg/meta/redis/usecase/meta.go +++ b/pkg/meta/redis/usecase/meta.go @@ -19,7 +19,7 @@ package usecase import ( "context" - "github.com/vdaas/vald/apis/grpc/meta" + "github.com/vdaas/vald/apis/grpc/v1/meta" iconf "github.com/vdaas/vald/internal/config" "github.com/vdaas/vald/internal/db/kvs/redis" "github.com/vdaas/vald/internal/errgroup" @@ -138,7 +138,6 @@ func New(cfg *config.Data) (r runner.Runner, err error) { }), // TODO add GraphQL handler ) - if err != nil { return nil, err } diff --git a/pkg/meta/redis/usecase/meta_test.go b/pkg/meta/redis/usecase/meta_test.go index aa73ddd3da..f4822e6b36 100644 --- a/pkg/meta/redis/usecase/meta_test.go +++ b/pkg/meta/redis/usecase/meta_test.go @@ -28,11 +28,11 @@ import ( "github.com/vdaas/vald/internal/servers/starter" "github.com/vdaas/vald/pkg/meta/redis/config" "github.com/vdaas/vald/pkg/meta/redis/service" - "go.uber.org/goleak" ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -85,9 +85,11 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -102,12 +104,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -178,9 +180,11 @@ func Test_run_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -202,12 +206,12 @@ func Test_run_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -282,9 +286,11 @@ func Test_run_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -306,12 +312,12 @@ func Test_run_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -382,9 +388,11 @@ func Test_run_PreStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -406,12 +414,12 @@ func Test_run_PreStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Stop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -482,9 +490,11 @@ func Test_run_Stop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -506,12 +516,12 @@ func Test_run_Stop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -582,9 +592,11 @@ func Test_run_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -606,7 +618,6 @@ func Test_run_PostStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/tools/cli/loadtest/assets/dataset.go b/pkg/tools/cli/loadtest/assets/dataset.go index a5e71a6161..7826da451b 100644 --- a/pkg/tools/cli/loadtest/assets/dataset.go +++ b/pkg/tools/cli/loadtest/assets/dataset.go @@ -25,9 +25,7 @@ import ( "github.com/vdaas/vald/internal/log" ) -var ( - ErrOutOfBounds = x1b.ErrOutOfBounds -) +var ErrOutOfBounds = x1b.ErrOutOfBounds // Dataset is representation of train and test dataset. type Dataset interface { diff --git a/pkg/tools/cli/loadtest/assets/dataset_test.go b/pkg/tools/cli/loadtest/assets/dataset_test.go index ef7c6aab94..b503afdfed 100644 --- a/pkg/tools/cli/loadtest/assets/dataset_test.go +++ b/pkg/tools/cli/loadtest/assets/dataset_test.go @@ -24,6 +24,7 @@ import ( ) func Test_dataset_Name(t *testing.T) { + t.Parallel() type fields struct { name string dimension int @@ -43,7 +44,7 @@ func Test_dataset_Name(t *testing.T) { } defaultCheckFunc := func(w want, got string) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -81,8 +82,10 @@ func Test_dataset_Name(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -104,12 +107,12 @@ func Test_dataset_Name(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_dataset_Dimension(t *testing.T) { + t.Parallel() type fields struct { name string dimension int @@ -129,7 +132,7 @@ func Test_dataset_Dimension(t *testing.T) { } defaultCheckFunc := func(w want, got int) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -167,8 +170,10 @@ func Test_dataset_Dimension(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -190,12 +195,12 @@ func Test_dataset_Dimension(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_dataset_DistanceType(t *testing.T) { + t.Parallel() type fields struct { name string dimension int @@ -215,7 +220,7 @@ func Test_dataset_DistanceType(t *testing.T) { } defaultCheckFunc := func(w want, got string) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -253,8 +258,10 @@ func Test_dataset_DistanceType(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -276,12 +283,12 @@ func Test_dataset_DistanceType(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_dataset_ObjectType(t *testing.T) { + t.Parallel() type fields struct { name string dimension int @@ -301,7 +308,7 @@ func Test_dataset_ObjectType(t *testing.T) { } defaultCheckFunc := func(w want, got string) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -339,8 +346,10 @@ func Test_dataset_ObjectType(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -362,12 +371,12 @@ func Test_dataset_ObjectType(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_findDir(t *testing.T) { + t.Parallel() type args struct { path string } @@ -385,10 +394,10 @@ func Test_findDir(t *testing.T) { } defaultCheckFunc := func(w want, got string, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -420,8 +429,10 @@ func Test_findDir(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -437,12 +448,12 @@ func Test_findDir(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestData(t *testing.T) { + t.Parallel() type args struct { name string } @@ -459,7 +470,7 @@ func TestData(t *testing.T) { } defaultCheckFunc := func(w want, got func() (Dataset, error)) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -491,8 +502,10 @@ func TestData(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -508,7 +521,6 @@ func TestData(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/tools/cli/loadtest/assets/hdf5_loader_test.go b/pkg/tools/cli/loadtest/assets/hdf5_loader_test.go index 55ed91fcbf..f45f3cda15 100644 --- a/pkg/tools/cli/loadtest/assets/hdf5_loader_test.go +++ b/pkg/tools/cli/loadtest/assets/hdf5_loader_test.go @@ -25,6 +25,7 @@ import ( ) func Test_loadFloat32(t *testing.T) { + t.Parallel() type args struct { dset *hdf5.Dataset npoints int @@ -45,10 +46,10 @@ func Test_loadFloat32(t *testing.T) { } defaultCheckFunc := func(w want, got interface{}, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -86,8 +87,10 @@ func Test_loadFloat32(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -103,12 +106,12 @@ func Test_loadFloat32(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_loadInt(t *testing.T) { + t.Parallel() type args struct { dset *hdf5.Dataset npoints int @@ -129,10 +132,10 @@ func Test_loadInt(t *testing.T) { } defaultCheckFunc := func(w want, got interface{}, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -170,8 +173,10 @@ func Test_loadInt(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -187,12 +192,12 @@ func Test_loadInt(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_loadDataset(t *testing.T) { + t.Parallel() type args struct { file *hdf5.File name string @@ -213,13 +218,13 @@ func Test_loadDataset(t *testing.T) { } defaultCheckFunc := func(w want, gotDim int, gotVec interface{}, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(gotDim, w.wantDim) { - return errors.Errorf("got = %v, want %v", gotDim, w.wantDim) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDim, w.wantDim) } if !reflect.DeepEqual(gotVec, w.wantVec) { - return errors.Errorf("got = %v, want %v", gotVec, w.wantVec) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) } return nil } @@ -255,8 +260,10 @@ func Test_loadDataset(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -272,12 +279,12 @@ func Test_loadDataset(t *testing.T) { if err := test.checkFunc(test.want, gotDim, gotVec, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestLoad(t *testing.T) { + t.Parallel() type args struct { path string } @@ -299,22 +306,22 @@ func TestLoad(t *testing.T) { } defaultCheckFunc := func(w want, gotTrain [][]float32, gotTest [][]float32, gotDistances [][]float32, gotNeighbors [][]int, gotDim int, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(gotTrain, w.wantTrain) { - return errors.Errorf("got = %v, want %v", gotTrain, w.wantTrain) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTrain, w.wantTrain) } if !reflect.DeepEqual(gotTest, w.wantTest) { - return errors.Errorf("got = %v, want %v", gotTest, w.wantTest) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTest, w.wantTest) } if !reflect.DeepEqual(gotDistances, w.wantDistances) { - return errors.Errorf("got = %v, want %v", gotDistances, w.wantDistances) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDistances, w.wantDistances) } if !reflect.DeepEqual(gotNeighbors, w.wantNeighbors) { - return errors.Errorf("got = %v, want %v", gotNeighbors, w.wantNeighbors) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotNeighbors, w.wantNeighbors) } if !reflect.DeepEqual(gotDim, w.wantDim) { - return errors.Errorf("got = %v, want %v", gotDim, w.wantDim) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDim, w.wantDim) } return nil } @@ -346,8 +353,10 @@ func TestLoad(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -363,7 +372,6 @@ func TestLoad(t *testing.T) { if err := test.checkFunc(test.want, gotTrain, gotTest, gotDistances, gotNeighbors, gotDim, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/tools/cli/loadtest/assets/large_dataset_test.go b/pkg/tools/cli/loadtest/assets/large_dataset_test.go index 9ac09a2104..dae46ce2a9 100644 --- a/pkg/tools/cli/loadtest/assets/large_dataset_test.go +++ b/pkg/tools/cli/loadtest/assets/large_dataset_test.go @@ -25,6 +25,7 @@ import ( ) func Test_loadLargeData(t *testing.T) { + t.Parallel() type args struct { trainFileName string queryFileName string @@ -47,7 +48,7 @@ func Test_loadLargeData(t *testing.T) { } defaultCheckFunc := func(w want, got func() (Dataset, error)) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -91,8 +92,10 @@ func Test_loadLargeData(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -108,12 +111,12 @@ func Test_loadLargeData(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_largeDataset_Train(t *testing.T) { + t.Parallel() type args struct { i int } @@ -139,10 +142,10 @@ func Test_largeDataset_Train(t *testing.T) { } defaultCheckFunc := func(w want, got interface{}, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -188,8 +191,10 @@ func Test_largeDataset_Train(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -212,12 +217,12 @@ func Test_largeDataset_Train(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_largeDataset_TrainSize(t *testing.T) { + t.Parallel() type fields struct { dataset *dataset train x1b.BillionScaleVectors @@ -238,7 +243,7 @@ func Test_largeDataset_TrainSize(t *testing.T) { } defaultCheckFunc := func(w want, got int) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -278,8 +283,10 @@ func Test_largeDataset_TrainSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -302,12 +309,12 @@ func Test_largeDataset_TrainSize(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_largeDataset_Query(t *testing.T) { + t.Parallel() type args struct { i int } @@ -333,10 +340,10 @@ func Test_largeDataset_Query(t *testing.T) { } defaultCheckFunc := func(w want, got interface{}, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -382,8 +389,10 @@ func Test_largeDataset_Query(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -406,12 +415,12 @@ func Test_largeDataset_Query(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_largeDataset_QuerySize(t *testing.T) { + t.Parallel() type fields struct { dataset *dataset train x1b.BillionScaleVectors @@ -432,7 +441,7 @@ func Test_largeDataset_QuerySize(t *testing.T) { } defaultCheckFunc := func(w want, got int) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -472,8 +481,10 @@ func Test_largeDataset_QuerySize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -496,12 +507,12 @@ func Test_largeDataset_QuerySize(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_largeDataset_Distance(t *testing.T) { + t.Parallel() type args struct { i int } @@ -527,10 +538,10 @@ func Test_largeDataset_Distance(t *testing.T) { } defaultCheckFunc := func(w want, got []float32, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -576,8 +587,10 @@ func Test_largeDataset_Distance(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -600,12 +613,12 @@ func Test_largeDataset_Distance(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_largeDataset_DistanceSize(t *testing.T) { + t.Parallel() type fields struct { dataset *dataset train x1b.BillionScaleVectors @@ -626,7 +639,7 @@ func Test_largeDataset_DistanceSize(t *testing.T) { } defaultCheckFunc := func(w want, got int) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -666,8 +679,10 @@ func Test_largeDataset_DistanceSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -690,12 +705,12 @@ func Test_largeDataset_DistanceSize(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_largeDataset_Neighbor(t *testing.T) { + t.Parallel() type args struct { i int } @@ -721,10 +736,10 @@ func Test_largeDataset_Neighbor(t *testing.T) { } defaultCheckFunc := func(w want, got []int, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -770,8 +785,10 @@ func Test_largeDataset_Neighbor(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -794,12 +811,12 @@ func Test_largeDataset_Neighbor(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_largeDataset_NeighborSize(t *testing.T) { + t.Parallel() type fields struct { dataset *dataset train x1b.BillionScaleVectors @@ -820,7 +837,7 @@ func Test_largeDataset_NeighborSize(t *testing.T) { } defaultCheckFunc := func(w want, got int) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -860,8 +877,10 @@ func Test_largeDataset_NeighborSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -884,12 +903,12 @@ func Test_largeDataset_NeighborSize(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_largeDataset_Dimension(t *testing.T) { + t.Parallel() type fields struct { dataset *dataset train x1b.BillionScaleVectors @@ -910,7 +929,7 @@ func Test_largeDataset_Dimension(t *testing.T) { } defaultCheckFunc := func(w want, got int) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -950,8 +969,10 @@ func Test_largeDataset_Dimension(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -974,12 +995,12 @@ func Test_largeDataset_Dimension(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_largeDataset_DistanceType(t *testing.T) { + t.Parallel() type fields struct { dataset *dataset train x1b.BillionScaleVectors @@ -1000,7 +1021,7 @@ func Test_largeDataset_DistanceType(t *testing.T) { } defaultCheckFunc := func(w want, got string) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -1040,8 +1061,10 @@ func Test_largeDataset_DistanceType(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -1064,12 +1087,12 @@ func Test_largeDataset_DistanceType(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_largeDataset_ObjectType(t *testing.T) { + t.Parallel() type fields struct { dataset *dataset train x1b.BillionScaleVectors @@ -1090,7 +1113,7 @@ func Test_largeDataset_ObjectType(t *testing.T) { } defaultCheckFunc := func(w want, got string) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -1130,8 +1153,10 @@ func Test_largeDataset_ObjectType(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -1154,12 +1179,12 @@ func Test_largeDataset_ObjectType(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_largeDataset_Name(t *testing.T) { + t.Parallel() type fields struct { dataset *dataset train x1b.BillionScaleVectors @@ -1180,7 +1205,7 @@ func Test_largeDataset_Name(t *testing.T) { } defaultCheckFunc := func(w want, got string) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -1220,8 +1245,10 @@ func Test_largeDataset_Name(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -1244,7 +1271,6 @@ func Test_largeDataset_Name(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/tools/cli/loadtest/assets/small_dataset.go b/pkg/tools/cli/loadtest/assets/small_dataset.go index 0c43642bdc..1c4412db38 100644 --- a/pkg/tools/cli/loadtest/assets/small_dataset.go +++ b/pkg/tools/cli/loadtest/assets/small_dataset.go @@ -163,7 +163,7 @@ func (s *smallDataset) Distance(i int) ([]float32, error) { return s.distances[i], nil } -// DistanceSize returns size of distances +// DistanceSize returns size of distances. func (s *smallDataset) DistanceSize() int { return len(s.distances) } diff --git a/pkg/tools/cli/loadtest/assets/small_dataset_test.go b/pkg/tools/cli/loadtest/assets/small_dataset_test.go index 4bae3775da..e13d866b22 100644 --- a/pkg/tools/cli/loadtest/assets/small_dataset_test.go +++ b/pkg/tools/cli/loadtest/assets/small_dataset_test.go @@ -24,6 +24,7 @@ import ( ) func Test_loadSmallData(t *testing.T) { + t.Parallel() type args struct { fileName string datasetName string @@ -43,7 +44,7 @@ func Test_loadSmallData(t *testing.T) { } defaultCheckFunc := func(w want, got func() (Dataset, error)) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -81,8 +82,10 @@ func Test_loadSmallData(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -98,12 +101,12 @@ func Test_loadSmallData(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_identity(t *testing.T) { + t.Parallel() type args struct { dim int } @@ -120,7 +123,7 @@ func Test_identity(t *testing.T) { } defaultCheckFunc := func(w want, got func() (Dataset, error)) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -152,8 +155,10 @@ func Test_identity(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -169,12 +174,12 @@ func Test_identity(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_random(t *testing.T) { + t.Parallel() type args struct { dim int size int @@ -192,7 +197,7 @@ func Test_random(t *testing.T) { } defaultCheckFunc := func(w want, got func() (Dataset, error)) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -226,8 +231,10 @@ func Test_random(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -243,12 +250,12 @@ func Test_random(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_gaussian(t *testing.T) { + t.Parallel() type args struct { dim int size int @@ -268,7 +275,7 @@ func Test_gaussian(t *testing.T) { } defaultCheckFunc := func(w want, got func() (Dataset, error)) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -306,8 +313,10 @@ func Test_gaussian(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -323,12 +332,12 @@ func Test_gaussian(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_smallDataset_Train(t *testing.T) { + t.Parallel() type args struct { i int } @@ -354,10 +363,10 @@ func Test_smallDataset_Train(t *testing.T) { } defaultCheckFunc := func(w want, got interface{}, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -403,8 +412,10 @@ func Test_smallDataset_Train(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -427,12 +438,12 @@ func Test_smallDataset_Train(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_smallDataset_TrainSize(t *testing.T) { + t.Parallel() type fields struct { dataset *dataset train [][]float32 @@ -453,7 +464,7 @@ func Test_smallDataset_TrainSize(t *testing.T) { } defaultCheckFunc := func(w want, got int) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -493,8 +504,10 @@ func Test_smallDataset_TrainSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -517,12 +530,12 @@ func Test_smallDataset_TrainSize(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_smallDataset_Query(t *testing.T) { + t.Parallel() type args struct { i int } @@ -548,10 +561,10 @@ func Test_smallDataset_Query(t *testing.T) { } defaultCheckFunc := func(w want, got interface{}, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -597,8 +610,10 @@ func Test_smallDataset_Query(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -621,12 +636,12 @@ func Test_smallDataset_Query(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_smallDataset_QuerySize(t *testing.T) { + t.Parallel() type fields struct { dataset *dataset train [][]float32 @@ -647,7 +662,7 @@ func Test_smallDataset_QuerySize(t *testing.T) { } defaultCheckFunc := func(w want, got int) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -687,8 +702,10 @@ func Test_smallDataset_QuerySize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -711,12 +728,12 @@ func Test_smallDataset_QuerySize(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_smallDataset_Distance(t *testing.T) { + t.Parallel() type args struct { i int } @@ -742,10 +759,10 @@ func Test_smallDataset_Distance(t *testing.T) { } defaultCheckFunc := func(w want, got []float32, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -791,8 +808,10 @@ func Test_smallDataset_Distance(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -815,12 +834,12 @@ func Test_smallDataset_Distance(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_smallDataset_DistanceSize(t *testing.T) { + t.Parallel() type fields struct { dataset *dataset train [][]float32 @@ -841,7 +860,7 @@ func Test_smallDataset_DistanceSize(t *testing.T) { } defaultCheckFunc := func(w want, got int) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -881,8 +900,10 @@ func Test_smallDataset_DistanceSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -905,12 +926,12 @@ func Test_smallDataset_DistanceSize(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_smallDataset_Neighbor(t *testing.T) { + t.Parallel() type args struct { i int } @@ -936,10 +957,10 @@ func Test_smallDataset_Neighbor(t *testing.T) { } defaultCheckFunc := func(w want, got []int, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got error = %v, want %v", err, w.err) + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) } if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -985,8 +1006,10 @@ func Test_smallDataset_Neighbor(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -1009,12 +1032,12 @@ func Test_smallDataset_Neighbor(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_smallDataset_NeighborSize(t *testing.T) { + t.Parallel() type fields struct { dataset *dataset train [][]float32 @@ -1035,7 +1058,7 @@ func Test_smallDataset_NeighborSize(t *testing.T) { } defaultCheckFunc := func(w want, got int) error { if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got = %v, want %v", got, w.want) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) } return nil } @@ -1075,8 +1098,10 @@ func Test_smallDataset_NeighborSize(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -1099,7 +1124,6 @@ func Test_smallDataset_NeighborSize(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/tools/cli/loadtest/config/config.go b/pkg/tools/cli/loadtest/config/config.go index 6b81a124a8..047547d39d 100644 --- a/pkg/tools/cli/loadtest/config/config.go +++ b/pkg/tools/cli/loadtest/config/config.go @@ -70,46 +70,11 @@ func (o Operation) String() string { } } -// Service is service type of implemented load test. -type Service uint8 - -// Service definitions. -const ( - UnknownService Service = iota - Agent - Gateway -) - -// ServiceMethod converts string to Service. -func ServiceMethod(s string) Service { - switch strings.ToLower(s) { - case "agent": - return Agent - case "gateway": - return Gateway - default: - return UnknownService - } -} - -// String converts Service to string. -func (s Service) String() string { - switch s { - case Agent: - return "Agent" - case Gateway: - return "Gateway" - default: - return "Unknown service" - } -} - // Data represent a application setting data content (config.yaml). // In K8s environment, this configuration is stored in K8s ConfigMap. type Data struct { config.GlobalConfig `json:",inline" yaml:",inline"` Addr string `json:"addr" yaml:"addr"` - Service string `json:"service" yaml:"service"` Operation string `json:"operation" yaml:"operation"` Dataset string `json:"dataset" yaml:"dataset"` Concurrency int `json:"concurrency" yaml:"concurrency"` @@ -137,7 +102,6 @@ func NewConfig(path string) (cfg *Data, err error) { cfg.Operation = config.GetActualValue(cfg.Operation) cfg.Dataset = config.GetActualValue(cfg.Dataset) cfg.ProgressDuration = config.GetActualValue(cfg.ProgressDuration) - cfg.Service = config.GetActualValue(cfg.Service) return cfg, nil } diff --git a/pkg/tools/cli/loadtest/config/config_test.go b/pkg/tools/cli/loadtest/config/config_test.go index 60016a5c18..f05ddc2cb9 100644 --- a/pkg/tools/cli/loadtest/config/config_test.go +++ b/pkg/tools/cli/loadtest/config/config_test.go @@ -26,6 +26,7 @@ import ( ) func TestOperationMethod(t *testing.T) { + t.Parallel() type args struct { s string } @@ -74,9 +75,11 @@ func TestOperationMethod(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -91,12 +94,12 @@ func TestOperationMethod(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestOperation_String(t *testing.T) { + t.Parallel() type want struct { want string } @@ -136,9 +139,11 @@ func TestOperation_String(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() } @@ -153,12 +158,12 @@ func TestOperation_String(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func TestNewConfig(t *testing.T) { + t.Parallel() type args struct { path string } @@ -211,9 +216,11 @@ func TestNewConfig(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -228,140 +235,6 @@ func TestNewConfig(t *testing.T) { if err := test.checkFunc(test.want, gotCfg, err); err != nil { tt.Errorf("error = %v", err) } - - }) - } -} - -func TestServiceMethod(t *testing.T) { - type args struct { - s string - } - type want struct { - want Service - } - type test struct { - name string - args args - want want - checkFunc func(want, Service) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got Service) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - s: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - s: "", - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := ServiceMethod(test.args.s) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func TestService_String(t *testing.T) { - type want struct { - want string - } - type test struct { - name string - s Service - want want - checkFunc func(want, string) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got string) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := test.s.String() - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) } } diff --git a/pkg/tools/cli/loadtest/service/insert.go b/pkg/tools/cli/loadtest/service/insert.go index 9bf24d2b1b..0eef2ad3f7 100644 --- a/pkg/tools/cli/loadtest/service/insert.go +++ b/pkg/tools/cli/loadtest/service/insert.go @@ -20,13 +20,11 @@ import ( "sync/atomic" "github.com/kpango/fuid" - "github.com/vdaas/vald/apis/grpc/agent/core" - "github.com/vdaas/vald/apis/grpc/gateway/vald" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/pkg/tools/cli/loadtest/assets" - "github.com/vdaas/vald/pkg/tools/cli/loadtest/config" ) func insertRequestProvider(dataset assets.Dataset, batchSize int) (f func() interface{}, size int, err error) { @@ -53,9 +51,11 @@ func objectVectorProvider(dataset assets.Dataset) (func() interface{}, int) { if err != nil { return nil } - ret = &payload.Object_Vector{ - Id: fuid.String(), - Vector: v.([]float32), + ret = &payload.Insert_Request{ + Vector: &payload.Object_Vector{ + Id: fuid.String(), + Vector: v.([]float32), + }, } } return ret @@ -69,67 +69,32 @@ func objectVectorsProvider(dataset assets.Dataset, n int) (func() interface{}, i size = size + 1 } return func() (ret interface{}) { - v := make([]*payload.Object_Vector, 0, n) + r := make([]*payload.Insert_Request, 0, n) for i := 0; i < n; i++ { d := provider() if d == nil { break } - v = append(v, d.(*payload.Object_Vector)) + r = append(r, d.(*payload.Insert_Request)) } - if len(v) == 0 { + if len(r) == 0 { return nil } - return &payload.Object_Vectors{ - Vectors: v, + return &payload.Insert_MultiRequest{ + Requests: r, } }, size } -type inserter interface { - Insert(context.Context, *payload.Object_Vector, ...grpc.CallOption) (*payload.Empty, error) - MultiInsert(context.Context, *payload.Object_Vectors, ...grpc.CallOption) (*payload.Empty, error) -} - -func agent(conn *grpc.ClientConn) inserter { - return core.NewAgentClient(conn) -} - -func gateway(conn *grpc.ClientConn) inserter { - return vald.NewValdClient(conn) -} - -func insert(c func(*grpc.ClientConn) inserter) loadFunc { - return func(ctx context.Context, conn *grpc.ClientConn, i interface{}, copts ...grpc.CallOption) (interface{}, error) { - return c(conn).Insert(ctx, i.(*payload.Object_Vector), copts...) - } -} - -func bulkInsert(c func(*grpc.ClientConn) inserter) loadFunc { - return func(ctx context.Context, conn *grpc.ClientConn, i interface{}, copts ...grpc.CallOption) (interface{}, error) { - return c(conn).MultiInsert(ctx, i.(*payload.Object_Vectors), copts...) - } -} - func (l *loader) newInsert() (f loadFunc, err error) { switch { case l.batchSize == 1: - switch l.service { - case config.Agent: - f = insert(agent) - case config.Gateway: - f = insert(gateway) - default: - err = errors.Errorf("undefined service: %s", l.service.String()) + f = func(ctx context.Context, conn *grpc.ClientConn, i interface{}, copts ...grpc.CallOption) (interface{}, error) { + return vald.NewInsertClient(conn).Insert(ctx, i.(*payload.Insert_Request), copts...) } case l.batchSize >= 2: - switch l.service { - case config.Agent: - f = bulkInsert(agent) - case config.Gateway: - f = bulkInsert(gateway) - default: - err = errors.Errorf("undefined service: %s", l.service.String()) + f = func(ctx context.Context, conn *grpc.ClientConn, i interface{}, copts ...grpc.CallOption) (interface{}, error) { + return vald.NewInsertClient(conn).MultiInsert(ctx, i.(*payload.Insert_MultiRequest), copts...) } default: err = errors.New("batch size must be natural number.") @@ -142,20 +107,7 @@ func (l *loader) newInsert() (f loadFunc, err error) { func (l *loader) newStreamInsert() (f loadFunc, err error) { l.batchSize = 1 - switch l.service { - case config.Agent: - f = func(ctx context.Context, conn *grpc.ClientConn, i interface{}, copts ...grpc.CallOption) (interface{}, error) { - return core.NewAgentClient(conn).StreamInsert(ctx, copts...) - } - case config.Gateway: - f = func(ctx context.Context, conn *grpc.ClientConn, i interface{}, copts ...grpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).StreamInsert(ctx, copts...) - } - default: - err = errors.Errorf("undefined service: %s", l.service.String()) - } - if err != nil { - return nil, err - } - return f, nil + return func(ctx context.Context, conn *grpc.ClientConn, i interface{}, copts ...grpc.CallOption) (interface{}, error) { + return vald.NewValdClient(conn).StreamInsert(ctx, copts...) + }, nil } diff --git a/pkg/tools/cli/loadtest/service/insert_test.go b/pkg/tools/cli/loadtest/service/insert_test.go index 62471bc4af..3a786b1eaf 100644 --- a/pkg/tools/cli/loadtest/service/insert_test.go +++ b/pkg/tools/cli/loadtest/service/insert_test.go @@ -29,6 +29,7 @@ import ( ) func Test_insertRequestProvider(t *testing.T) { + t.Parallel() type args struct { dataset assets.Dataset batchSize int @@ -88,8 +89,10 @@ func Test_insertRequestProvider(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -105,12 +108,12 @@ func Test_insertRequestProvider(t *testing.T) { if err := test.checkFunc(test.want, gotF, gotSize, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_objectVectorProvider(t *testing.T) { + t.Parallel() type args struct { dataset assets.Dataset } @@ -163,8 +166,10 @@ func Test_objectVectorProvider(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -180,12 +185,12 @@ func Test_objectVectorProvider(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_objectVectorsProvider(t *testing.T) { + t.Parallel() type args struct { dataset assets.Dataset n int @@ -241,8 +246,10 @@ func Test_objectVectorsProvider(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -258,296 +265,12 @@ func Test_objectVectorsProvider(t *testing.T) { if err := test.checkFunc(test.want, got, got1); err != nil { tt.Errorf("error = %v", err) } - - }) - } -} - -func Test_agent(t *testing.T) { - type args struct { - conn *grpc.ClientConn - } - type want struct { - want inserter - } - type test struct { - name string - args args - want want - checkFunc func(want, inserter) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got inserter) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - conn: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - conn: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := agent(test.args.conn) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_gateway(t *testing.T) { - type args struct { - conn *grpc.ClientConn - } - type want struct { - want inserter - } - type test struct { - name string - args args - want want - checkFunc func(want, inserter) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got inserter) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - conn: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - conn: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := gateway(test.args.conn) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_insert(t *testing.T) { - type args struct { - c func(*grpc.ClientConn) inserter - } - type want struct { - want loadFunc - } - type test struct { - name string - args args - want want - checkFunc func(want, loadFunc) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got loadFunc) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - c: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - c: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := insert(test.args.c) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - - }) - } -} - -func Test_bulkInsert(t *testing.T) { - type args struct { - c func(*grpc.ClientConn) inserter - } - type want struct { - want loadFunc - } - type test struct { - name string - args args - want want - checkFunc func(want, loadFunc) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got loadFunc) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - c: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - c: nil, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := bulkInsert(test.args.c) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) } } func Test_loader_newInsert(t *testing.T) { + t.Parallel() type fields struct { eg errgroup.Group client grpc.Client @@ -559,7 +282,6 @@ func Test_loader_newInsert(t *testing.T) { loaderFunc loadFunc dataProvider func() interface{} dataSize int - service config.Service operation config.Operation } type want struct { @@ -599,7 +321,6 @@ func Test_loader_newInsert(t *testing.T) { loaderFunc: nil, dataProvider: nil, dataSize: 0, - service: nil, operation: nil, }, want: want{}, @@ -623,7 +344,6 @@ func Test_loader_newInsert(t *testing.T) { loaderFunc: nil, dataProvider: nil, dataSize: 0, - service: nil, operation: nil, }, want: want{}, @@ -633,8 +353,10 @@ func Test_loader_newInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -656,7 +378,6 @@ func Test_loader_newInsert(t *testing.T) { loaderFunc: test.fields.loaderFunc, dataProvider: test.fields.dataProvider, dataSize: test.fields.dataSize, - service: test.fields.service, operation: test.fields.operation, } @@ -664,12 +385,12 @@ func Test_loader_newInsert(t *testing.T) { if err := test.checkFunc(test.want, gotF, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_loader_newStreamInsert(t *testing.T) { + t.Parallel() type fields struct { eg errgroup.Group client grpc.Client @@ -681,7 +402,6 @@ func Test_loader_newStreamInsert(t *testing.T) { loaderFunc loadFunc dataProvider func() interface{} dataSize int - service config.Service operation config.Operation } type want struct { @@ -721,7 +441,6 @@ func Test_loader_newStreamInsert(t *testing.T) { loaderFunc: nil, dataProvider: nil, dataSize: 0, - service: nil, operation: nil, }, want: want{}, @@ -745,7 +464,6 @@ func Test_loader_newStreamInsert(t *testing.T) { loaderFunc: nil, dataProvider: nil, dataSize: 0, - service: nil, operation: nil, }, want: want{}, @@ -755,8 +473,10 @@ func Test_loader_newStreamInsert(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -778,7 +498,6 @@ func Test_loader_newStreamInsert(t *testing.T) { loaderFunc: test.fields.loaderFunc, dataProvider: test.fields.dataProvider, dataSize: test.fields.dataSize, - service: test.fields.service, operation: test.fields.operation, } @@ -786,7 +505,6 @@ func Test_loader_newStreamInsert(t *testing.T) { if err := test.checkFunc(test.want, gotF, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/tools/cli/loadtest/service/loader.go b/pkg/tools/cli/loadtest/service/loader.go index 1aef2e6356..0b623e95be 100644 --- a/pkg/tools/cli/loadtest/service/loader.go +++ b/pkg/tools/cli/loadtest/service/loader.go @@ -23,7 +23,7 @@ import ( "syscall" "time" - "github.com/vdaas/vald/apis/grpc/payload" + "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" @@ -33,7 +33,7 @@ import ( "github.com/vdaas/vald/pkg/tools/cli/loadtest/config" ) -// Loader is representation of load test +// Loader is representation of load test. type Loader interface { Prepare(context.Context) error Do(context.Context) <-chan error @@ -54,7 +54,6 @@ type loader struct { loaderFunc loadFunc dataProvider func() interface{} dataSize int - service config.Service operation config.Operation } @@ -165,7 +164,7 @@ func (l *loader) Do(ctx context.Context) <-chan error { })) l.eg.Go(safety.RecoverFunc(func() error { - log.Infof("start load test(%s, %s)", l.service.String(), l.operation.String()) + log.Infof("start load test(%s)", l.operation.String()) defer close(ech) defer ticker.Stop() start = time.Now() @@ -179,7 +178,7 @@ func (l *loader) Do(ctx context.Context) <-chan error { finalize(ctx, err) return p.Signal(syscall.SIGKILL) // TODO: #403 } - log.Infof("result:%s\t%d\t%d\t%f", l.service.String(), l.concurrency, l.batchSize, vps(int(pgCnt)*l.batchSize, start, end)) + log.Infof("result:%d\t%d\t%f", l.concurrency, l.batchSize, vps(int(pgCnt)*l.batchSize, start, end)) return p.Signal(syscall.SIGTERM) // TODO: #403 })) diff --git a/pkg/tools/cli/loadtest/service/loader_option.go b/pkg/tools/cli/loadtest/service/loader_option.go index 731c45ebb6..433da054b7 100644 --- a/pkg/tools/cli/loadtest/service/loader_option.go +++ b/pkg/tools/cli/loadtest/service/loader_option.go @@ -26,14 +26,12 @@ import ( // Option is load test configuration. type Option func(*loader) error -var ( - defaultOpts = []Option{ - WithConcurrency(100), - WithBatchSize(1), - WithErrGroup(errgroup.Get()), - WithProgressDuration("5s"), - } -) +var defaultOpts = []Option{ + WithConcurrency(100), + WithBatchSize(1), + WithErrGroup(errgroup.Get()), + WithProgressDuration("5s"), +} // WithAddr sets load test server address. func WithAddr(a string) Option { @@ -116,11 +114,3 @@ func WithOperation(op string) Option { return nil } } - -// WithService sets service of load test. -func WithService(s string) Option { - return func(l *loader) error { - l.service = config.ServiceMethod(s) - return nil - } -} diff --git a/pkg/tools/cli/loadtest/service/loader_option_test.go b/pkg/tools/cli/loadtest/service/loader_option_test.go index 7a8741de06..115b6bc170 100644 --- a/pkg/tools/cli/loadtest/service/loader_option_test.go +++ b/pkg/tools/cli/loadtest/service/loader_option_test.go @@ -16,16 +16,16 @@ package service import ( - "reflect" "testing" "github.com/vdaas/vald/internal/errgroup" - "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" "go.uber.org/goleak" ) func TestWithAddr(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { a string @@ -63,7 +63,7 @@ func TestWithAddr(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -99,9 +99,11 @@ func TestWithAddr(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -122,7 +124,7 @@ func TestWithAddr(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -130,7 +132,7 @@ func TestWithAddr(t *testing.T) { got := WithAddr(test.args.a) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -139,6 +141,8 @@ func TestWithAddr(t *testing.T) { } func TestWithClient(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { c grpc.Client @@ -176,7 +180,7 @@ func TestWithClient(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -212,9 +216,11 @@ func TestWithClient(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -235,7 +241,7 @@ func TestWithClient(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -243,7 +249,7 @@ func TestWithClient(t *testing.T) { got := WithClient(test.args.c) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -252,6 +258,8 @@ func TestWithClient(t *testing.T) { } func TestWithConcurrency(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { c int @@ -289,7 +297,7 @@ func TestWithConcurrency(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -325,9 +333,11 @@ func TestWithConcurrency(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -348,7 +358,7 @@ func TestWithConcurrency(t *testing.T) { } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc @@ -356,7 +366,7 @@ func TestWithConcurrency(t *testing.T) { got := WithConcurrency(test.args.c) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -364,10 +374,12 @@ func TestWithConcurrency(t *testing.T) { } } -func TestWithDataset(t *testing.T) { +func TestWithBatchSize(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { - n string + b int } type want struct { obj *T @@ -402,7 +414,7 @@ func TestWithDataset(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -414,7 +426,7 @@ func TestWithDataset(t *testing.T) { { name: "test_case_1", args: args { - n: "", + b: 0, }, want: want { obj: new(T), @@ -428,7 +440,7 @@ func TestWithDataset(t *testing.T) { return test { name: "test_case_2", args: args { - n: "", + b: 0, }, want: want { obj: new(T), @@ -438,9 +450,11 @@ func TestWithDataset(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -454,22 +468,22 @@ func TestWithDataset(t *testing.T) { test.checkFunc = defaultCheckFunc } - got := WithDataset(test.args.n) + got := WithBatchSize(test.args.b) obj := new(T) if err := test.checkFunc(test.want, obj, got(obj)); err != nil { tt.Errorf("error = %v", err) } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - got := WithDataset(test.args.n) + got := WithBatchSize(test.args.b) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -477,10 +491,12 @@ func TestWithDataset(t *testing.T) { } } -func TestWithErrGroup(t *testing.T) { +func TestWithDataset(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { - eg errgroup.Group + n string } type want struct { obj *T @@ -515,7 +531,7 @@ func TestWithErrGroup(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -527,7 +543,7 @@ func TestWithErrGroup(t *testing.T) { { name: "test_case_1", args: args { - eg: nil, + n: "", }, want: want { obj: new(T), @@ -541,7 +557,7 @@ func TestWithErrGroup(t *testing.T) { return test { name: "test_case_2", args: args { - eg: nil, + n: "", }, want: want { obj: new(T), @@ -551,9 +567,11 @@ func TestWithErrGroup(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -567,22 +585,22 @@ func TestWithErrGroup(t *testing.T) { test.checkFunc = defaultCheckFunc } - got := WithErrGroup(test.args.eg) + got := WithDataset(test.args.n) obj := new(T) if err := test.checkFunc(test.want, obj, got(obj)); err != nil { tt.Errorf("error = %v", err) } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - got := WithErrGroup(test.args.eg) + got := WithDataset(test.args.n) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -590,10 +608,12 @@ func TestWithErrGroup(t *testing.T) { } } -func TestWithProgressDuration(t *testing.T) { +func TestWithErrGroup(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { - pd string + eg errgroup.Group } type want struct { obj *T @@ -628,7 +648,7 @@ func TestWithProgressDuration(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -640,7 +660,7 @@ func TestWithProgressDuration(t *testing.T) { { name: "test_case_1", args: args { - pd: "", + eg: nil, }, want: want { obj: new(T), @@ -654,7 +674,7 @@ func TestWithProgressDuration(t *testing.T) { return test { name: "test_case_2", args: args { - pd: "", + eg: nil, }, want: want { obj: new(T), @@ -664,9 +684,11 @@ func TestWithProgressDuration(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -680,22 +702,22 @@ func TestWithProgressDuration(t *testing.T) { test.checkFunc = defaultCheckFunc } - got := WithProgressDuration(test.args.pd) + got := WithErrGroup(test.args.eg) obj := new(T) if err := test.checkFunc(test.want, obj, got(obj)); err != nil { tt.Errorf("error = %v", err) } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - got := WithProgressDuration(test.args.pd) + got := WithErrGroup(test.args.eg) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -703,10 +725,12 @@ func TestWithProgressDuration(t *testing.T) { } } -func TestWithOperation(t *testing.T) { +func TestWithProgressDuration(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing type T = interface{} type args struct { - op string + pd string } type want struct { obj *T @@ -741,7 +765,7 @@ func TestWithOperation(t *testing.T) { /* defaultCheckFunc := func(w want, obj *T) error { if !reflect.DeepEqual(obj, w.obj) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.c) + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) } return nil } @@ -753,7 +777,7 @@ func TestWithOperation(t *testing.T) { { name: "test_case_1", args: args { - op: "", + pd: "", }, want: want { obj: new(T), @@ -767,7 +791,7 @@ func TestWithOperation(t *testing.T) { return test { name: "test_case_2", args: args { - op: "", + pd: "", }, want: want { obj: new(T), @@ -777,9 +801,11 @@ func TestWithOperation(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(t) + tt.Parallel() + defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) } @@ -793,22 +819,22 @@ func TestWithOperation(t *testing.T) { test.checkFunc = defaultCheckFunc } - got := WithOperation(test.args.op) + got := WithProgressDuration(test.args.pd) obj := new(T) if err := test.checkFunc(test.want, obj, got(obj)); err != nil { tt.Errorf("error = %v", err) } */ - // Uncomment this block if the option returns an error, otherwise delete it + // Uncomment this block if the option do not return an error, otherwise delete it /* if test.checkFunc == nil { test.checkFunc = defaultCheckFunc } - got := WithOperation(test.args.op) + got := WithProgressDuration(test.args.pd) obj := new(T) got(obj) - if err := test.checkFunc(tt.want, obj); err != nil { + if err := test.checkFunc(test.want, obj); err != nil { tt.Errorf("error = %v", err) } */ @@ -816,108 +842,63 @@ func TestWithOperation(t *testing.T) { } } -func TestWithBatchSize(t *testing.T) { +func TestWithOperation(t *testing.T) { + t.Parallel() + // Change interface type to the type of object you are testing + type T = interface{} type args struct { - b int + op string } type want struct { - want Option + obj *T + // Uncomment this line if the option returns an error, otherwise delete it + // err error } type test struct { - name string - args args - want want - checkFunc func(want, Option) error + name string + args args + want want + // Use the first line if the option returns an error. otherwise use the second line + // checkFunc func(want, *T, error) error + // checkFunc func(want, *T) error beforeFunc func(args) afterFunc func(args) } - defaultCheckFunc := func(w want, got Option) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - // TODO test cases - /* - { - name: "test_case_1", - args: args { - b: 0, - }, - want: want{}, - checkFunc: defaultCheckFunc, - }, - */ - // TODO test cases - /* - func() test { - return test { - name: "test_case_2", - args: args { - b: 0, - }, - want: want{}, - checkFunc: defaultCheckFunc, - } - }(), - */ - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - defer goleak.VerifyNone(tt) - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - - got := WithBatchSize(test.args.b) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } + // Uncomment this block if the option returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ - }) - } -} + // Uncomment this block if the option do not returns an error, otherwise delete it + /* + defaultCheckFunc := func(w want, obj *T) error { + if !reflect.DeepEqual(obj, w.obj) { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", obj, w.obj) + } + return nil + } + */ -func TestWithService(t *testing.T) { - type args struct { - s string - } - type want struct { - want Option - } - type test struct { - name string - args args - want want - checkFunc func(want, Option) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got Option) error { - if !reflect.DeepEqual(got, w.want) { - return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } tests := []test{ // TODO test cases /* { name: "test_case_1", args: args { - s: "", + op: "", + }, + want: want { + obj: new(T), }, - want: want{}, - checkFunc: defaultCheckFunc, }, */ @@ -927,17 +908,20 @@ func TestWithService(t *testing.T) { return test { name: "test_case_2", args: args { - s: "", + op: "", + }, + want: want { + obj: new(T), }, - want: want{}, - checkFunc: defaultCheckFunc, } }(), */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -945,15 +929,32 @@ func TestWithService(t *testing.T) { if test.afterFunc != nil { defer test.afterFunc(test.args) } - if test.checkFunc == nil { - test.checkFunc = defaultCheckFunc - } - got := WithService(test.args.s) - if err := test.checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } + // Uncomment this block if the option returns an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + + got := WithOperation(test.args.op) + obj := new(T) + if err := test.checkFunc(test.want, obj, got(obj)); err != nil { + tt.Errorf("error = %v", err) + } + */ + // Uncomment this block if the option do not return an error, otherwise delete it + /* + if test.checkFunc == nil { + test.checkFunc = defaultCheckFunc + } + got := WithOperation(test.args.op) + obj := new(T) + got(obj) + if err := test.checkFunc(test.want, obj); err != nil { + tt.Errorf("error = %v", err) + } + */ }) } } diff --git a/pkg/tools/cli/loadtest/service/loader_test.go b/pkg/tools/cli/loadtest/service/loader_test.go index c601c1942d..39422c1606 100644 --- a/pkg/tools/cli/loadtest/service/loader_test.go +++ b/pkg/tools/cli/loadtest/service/loader_test.go @@ -29,6 +29,7 @@ import ( ) func TestNewLoader(t *testing.T) { + t.Parallel() type args struct { opts []Option } @@ -81,8 +82,10 @@ func TestNewLoader(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -98,12 +101,12 @@ func TestNewLoader(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_loader_Prepare(t *testing.T) { + t.Parallel() type args struct { in0 context.Context } @@ -118,7 +121,6 @@ func Test_loader_Prepare(t *testing.T) { loaderFunc loadFunc dataProvider func() interface{} dataSize int - service config.Service operation config.Operation } type want struct { @@ -158,7 +160,6 @@ func Test_loader_Prepare(t *testing.T) { loaderFunc: nil, dataProvider: nil, dataSize: 0, - service: nil, operation: nil, }, want: want{}, @@ -185,7 +186,6 @@ func Test_loader_Prepare(t *testing.T) { loaderFunc: nil, dataProvider: nil, dataSize: 0, - service: nil, operation: nil, }, want: want{}, @@ -195,8 +195,10 @@ func Test_loader_Prepare(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -218,7 +220,6 @@ func Test_loader_Prepare(t *testing.T) { loaderFunc: test.fields.loaderFunc, dataProvider: test.fields.dataProvider, dataSize: test.fields.dataSize, - service: test.fields.service, operation: test.fields.operation, } @@ -226,12 +227,12 @@ func Test_loader_Prepare(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_loader_Do(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -246,7 +247,6 @@ func Test_loader_Do(t *testing.T) { loaderFunc loadFunc dataProvider func() interface{} dataSize int - service config.Service operation config.Operation } type want struct { @@ -286,7 +286,6 @@ func Test_loader_Do(t *testing.T) { loaderFunc: nil, dataProvider: nil, dataSize: 0, - service: nil, operation: nil, }, want: want{}, @@ -313,7 +312,6 @@ func Test_loader_Do(t *testing.T) { loaderFunc: nil, dataProvider: nil, dataSize: 0, - service: nil, operation: nil, }, want: want{}, @@ -323,8 +321,10 @@ func Test_loader_Do(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -346,7 +346,6 @@ func Test_loader_Do(t *testing.T) { loaderFunc: test.fields.loaderFunc, dataProvider: test.fields.dataProvider, dataSize: test.fields.dataSize, - service: test.fields.service, operation: test.fields.operation, } @@ -354,12 +353,12 @@ func Test_loader_Do(t *testing.T) { if err := test.checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_loader_do(t *testing.T) { + t.Parallel() type args struct { ctx context.Context f func(interface{}, error) @@ -376,7 +375,6 @@ func Test_loader_do(t *testing.T) { loaderFunc loadFunc dataProvider func() interface{} dataSize int - service config.Service operation config.Operation } type want struct { @@ -418,7 +416,6 @@ func Test_loader_do(t *testing.T) { loaderFunc: nil, dataProvider: nil, dataSize: 0, - service: nil, operation: nil, }, want: want{}, @@ -447,7 +444,6 @@ func Test_loader_do(t *testing.T) { loaderFunc: nil, dataProvider: nil, dataSize: 0, - service: nil, operation: nil, }, want: want{}, @@ -457,8 +453,10 @@ func Test_loader_do(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -480,7 +478,6 @@ func Test_loader_do(t *testing.T) { loaderFunc: test.fields.loaderFunc, dataProvider: test.fields.dataProvider, dataSize: test.fields.dataSize, - service: test.fields.service, operation: test.fields.operation, } @@ -488,7 +485,6 @@ func Test_loader_do(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/tools/cli/loadtest/service/search.go b/pkg/tools/cli/loadtest/service/search.go index a51e0728db..87c99c443d 100644 --- a/pkg/tools/cli/loadtest/service/search.go +++ b/pkg/tools/cli/loadtest/service/search.go @@ -19,13 +19,10 @@ import ( "context" "sync/atomic" - "github.com/vdaas/vald/apis/grpc/agent/core" - "github.com/vdaas/vald/apis/grpc/gateway/vald" - "github.com/vdaas/vald/apis/grpc/payload" - "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/pkg/tools/cli/loadtest/assets" - "github.com/vdaas/vald/pkg/tools/cli/loadtest/config" ) func searchRequestProvider(dataset assets.Dataset) (func() interface{}, int, error) { @@ -46,31 +43,13 @@ func searchRequestProvider(dataset assets.Dataset) (func() interface{}, int, err } func (l *loader) newSearch() (loadFunc, error) { - switch l.service { - case config.Agent: - return func(ctx context.Context, conn *grpc.ClientConn, i interface{}, copts ...grpc.CallOption) (interface{}, error) { - return core.NewAgentClient(conn).Search(ctx, i.(*payload.Search_Request), copts...) - }, nil - case config.Gateway: - return func(ctx context.Context, conn *grpc.ClientConn, i interface{}, copts ...grpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).Search(ctx, i.(*payload.Search_Request), copts...) - }, nil - default: - return nil, errors.Errorf("undefined service: %s", l.service.String()) - } + return func(ctx context.Context, conn *grpc.ClientConn, i interface{}, copts ...grpc.CallOption) (interface{}, error) { + return vald.NewSearchClient(conn).Search(ctx, i.(*payload.Search_Request), copts...) + }, nil } func (l *loader) newStreamSearch() (loadFunc, error) { - switch l.service { - case config.Agent: - return func(ctx context.Context, conn *grpc.ClientConn, i interface{}, copts ...grpc.CallOption) (interface{}, error) { - return core.NewAgentClient(conn).StreamSearch(ctx, copts...) - }, nil - case config.Gateway: - return func(ctx context.Context, conn *grpc.ClientConn, i interface{}, copts ...grpc.CallOption) (interface{}, error) { - return vald.NewValdClient(conn).StreamSearch(ctx, copts...) - }, nil - default: - return nil, errors.Errorf("undefined service: %s", l.service.String()) - } + return func(ctx context.Context, conn *grpc.ClientConn, i interface{}, copts ...grpc.CallOption) (interface{}, error) { + return vald.NewSearchClient(conn).StreamSearch(ctx, copts...) + }, nil } diff --git a/pkg/tools/cli/loadtest/service/search_test.go b/pkg/tools/cli/loadtest/service/search_test.go index 343c1c653f..59acf7d4ec 100644 --- a/pkg/tools/cli/loadtest/service/search_test.go +++ b/pkg/tools/cli/loadtest/service/search_test.go @@ -29,6 +29,7 @@ import ( ) func Test_searchRequestProvider(t *testing.T) { + t.Parallel() type args struct { dataset assets.Dataset } @@ -85,8 +86,10 @@ func Test_searchRequestProvider(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -102,12 +105,12 @@ func Test_searchRequestProvider(t *testing.T) { if err := test.checkFunc(test.want, got, got1, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_loader_newSearch(t *testing.T) { + t.Parallel() type fields struct { eg errgroup.Group client grpc.Client @@ -119,7 +122,6 @@ func Test_loader_newSearch(t *testing.T) { loaderFunc loadFunc dataProvider func() interface{} dataSize int - service config.Service operation config.Operation } type want struct { @@ -159,7 +161,6 @@ func Test_loader_newSearch(t *testing.T) { loaderFunc: nil, dataProvider: nil, dataSize: 0, - service: nil, operation: nil, }, want: want{}, @@ -183,7 +184,6 @@ func Test_loader_newSearch(t *testing.T) { loaderFunc: nil, dataProvider: nil, dataSize: 0, - service: nil, operation: nil, }, want: want{}, @@ -193,8 +193,10 @@ func Test_loader_newSearch(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -216,7 +218,6 @@ func Test_loader_newSearch(t *testing.T) { loaderFunc: test.fields.loaderFunc, dataProvider: test.fields.dataProvider, dataSize: test.fields.dataSize, - service: test.fields.service, operation: test.fields.operation, } @@ -224,12 +225,12 @@ func Test_loader_newSearch(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_loader_newStreamSearch(t *testing.T) { + t.Parallel() type fields struct { eg errgroup.Group client grpc.Client @@ -241,7 +242,6 @@ func Test_loader_newStreamSearch(t *testing.T) { loaderFunc loadFunc dataProvider func() interface{} dataSize int - service config.Service operation config.Operation } type want struct { @@ -281,7 +281,6 @@ func Test_loader_newStreamSearch(t *testing.T) { loaderFunc: nil, dataProvider: nil, dataSize: 0, - service: nil, operation: nil, }, want: want{}, @@ -305,7 +304,6 @@ func Test_loader_newStreamSearch(t *testing.T) { loaderFunc: nil, dataProvider: nil, dataSize: 0, - service: nil, operation: nil, }, want: want{}, @@ -315,8 +313,10 @@ func Test_loader_newStreamSearch(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc() @@ -338,7 +338,6 @@ func Test_loader_newStreamSearch(t *testing.T) { loaderFunc: test.fields.loaderFunc, dataProvider: test.fields.dataProvider, dataSize: test.fields.dataSize, - service: test.fields.service, operation: test.fields.operation, } @@ -346,7 +345,6 @@ func Test_loader_newStreamSearch(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/pkg/tools/cli/loadtest/usecase/load.go b/pkg/tools/cli/loadtest/usecase/load.go index 83b5ad4037..f0e49c3ec9 100644 --- a/pkg/tools/cli/loadtest/usecase/load.go +++ b/pkg/tools/cli/loadtest/usecase/load.go @@ -56,7 +56,6 @@ func New(cfg *config.Data) (r runner.Runner, err error) { service.WithClient(run.client), service.WithConcurrency(cfg.Concurrency), service.WithProgressDuration(cfg.ProgressDuration), - service.WithService(cfg.Service), ) if err != nil { return nil, err diff --git a/pkg/tools/cli/loadtest/usecase/load_test.go b/pkg/tools/cli/loadtest/usecase/load_test.go index 7066148df7..4c4d579b9d 100644 --- a/pkg/tools/cli/loadtest/usecase/load_test.go +++ b/pkg/tools/cli/loadtest/usecase/load_test.go @@ -31,6 +31,7 @@ import ( ) func TestNew(t *testing.T) { + t.Parallel() type args struct { cfg *config.Data } @@ -83,8 +84,10 @@ func TestNew(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -100,12 +103,12 @@ func TestNew(t *testing.T) { if err := test.checkFunc(test.want, gotR, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStart(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -170,8 +173,10 @@ func Test_run_PreStart(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -192,12 +197,12 @@ func Test_run_PreStart(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Start(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -266,8 +271,10 @@ func Test_run_Start(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -288,12 +295,12 @@ func Test_run_Start(t *testing.T) { if err := test.checkFunc(test.want, got, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PreStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -358,8 +365,10 @@ func Test_run_PreStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -380,12 +389,12 @@ func Test_run_PreStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_Stop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -450,8 +459,10 @@ func Test_run_Stop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -472,12 +483,12 @@ func Test_run_Stop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } func Test_run_PostStop(t *testing.T) { + t.Parallel() type args struct { ctx context.Context } @@ -542,8 +553,10 @@ func Test_run_PostStop(t *testing.T) { */ } - for _, test := range tests { + for _, tc := range tests { + test := tc t.Run(test.name, func(tt *testing.T) { + tt.Parallel() defer goleak.VerifyNone(tt) if test.beforeFunc != nil { test.beforeFunc(test.args) @@ -564,7 +577,6 @@ func Test_run_PostStop(t *testing.T) { if err := test.checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } - }) } } diff --git a/versions/GO_VERSION b/versions/GO_VERSION index 42cf0675c5..e34208c937 100644 --- a/versions/GO_VERSION +++ b/versions/GO_VERSION @@ -1 +1 @@ -1.15.2 +1.15.4 diff --git a/versions/NGT_VERSION b/versions/NGT_VERSION index f8f4f03b3d..6b89d58f86 100644 --- a/versions/NGT_VERSION +++ b/versions/NGT_VERSION @@ -1 +1 @@ -1.12.1 +1.12.2 diff --git a/versions/OPERATOR_SDK_VERSION b/versions/OPERATOR_SDK_VERSION index b18d46540b..79127d85a4 100644 --- a/versions/OPERATOR_SDK_VERSION +++ b/versions/OPERATOR_SDK_VERSION @@ -1 +1 @@ -v1.0.1 +v1.2.0