diff --git a/.drone.yml b/.drone.yml new file mode 100644 index 000000000..60fd4b201 --- /dev/null +++ b/.drone.yml @@ -0,0 +1,35 @@ +kind: pipeline +name: default + +steps: + - name: postgres + image: postgres:12.3-alpine + commands: + - until psql -U admin -w installer -c "select 1"; do sleep 5; done + - name: build + image: golang + commands: + - CGO_ENABLED=0 go build -o build/bm-inventory cmd/main.go + - ./build/bm-inventory & + - sleep 10 + - go test -v ./subsystem/... -count=1 -ginkgo.skip=only_k8s -ginkgo.v + environment: + INVENTORY: build:8090 + DB_HOST: postgres + DB_PORT: 5432 + USE_K8S: false + +trigger: + branch: + - master + event: + - pull_request + - push + +services: + - name: postgres + image: postgres:12.3-alpine + environment: + POSTGRES_PASSWORD: admin + POSTGRES_USER: admin + POSTGRES_DB: installer diff --git a/.github/workflows/image-push.yml b/.github/workflows/image-push.yml index 7c2014672..b4a4b2cb2 100644 --- a/.github/workflows/image-push.yml +++ b/.github/workflows/image-push.yml @@ -4,7 +4,9 @@ on: push: branches: - master - + pull_request: + branches: + - master jobs: build: runs-on: ubuntu-latest @@ -28,8 +30,12 @@ jobs: - name: Get release version id: get_version run: echo ::set-env name=GIT_REVISION::$(echo ${GITHUB_SHA}) + - name: create python client + run: | + skipper make create-python-client - name: Publish bm-inventory to Registry - uses: elgohr/Publish-Docker-Github-Action@2.15 + if: github.event_name != 'pull_request' + uses: elgohr/Publish-Docker-Github-Action@v5 with: name: bm-inventory username: ${{ secrets.DOCKER_USERNAME }} @@ -37,14 +43,15 @@ jobs: REGISTRY: 'quay.io/ocpmetal' dockerfile: "Dockerfile.bm-inventory" buildargs: GIT_REVISION - tags: "stable,${{ env.GIT_REVISION }}" - - name: Publish expirer to Registry - uses: elgohr/Publish-Docker-Github-Action@2.15 + tags: "latest,${{ env.GIT_REVISION }}" + - name: Publish bm-inventory PR image to Registry + if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository + uses: elgohr/Publish-Docker-Github-Action@v5 with: - name: s3-object-expirer + name: bm-inventory username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} REGISTRY: 'quay.io/ocpmetal' - dockerfile: "Dockerfile.s3-object-expirer" - buildargs: GIT_REVISION - tags: "stable,${{ env.GIT_REVISION }}" + dockerfile: "Dockerfile.bm-inventory" + buildargs: GIT_REVISION, QUAY_TAG_EXPIRATION + tags: "PR_${{ env.GIT_REVISION }}" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 000000000..65edd2d9e --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,46 @@ +name: release + +on: + push: + tags: + - "*" + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v1 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: 3.x + - name: Install dependencies + run: | + sudo pip install strato-skipper + mkdir -p ~/.docker + echo "{}" > ~/.docker/config.json + touch ${HOME}/.gitconfig + ln -s Dockerfile.bm-inventory Dockerfile + - name: build + run: | + skipper make build + - name: create python client + run: | + skipper make create-python-client + - name: Get release version + id: get_version + run: echo ::set-env name=GIT_REVISION::$(echo ${GITHUB_SHA}) + - name: Get tag + id: get_tag + run: echo ::set-env name=GIT_TAG::${GITHUB_REF/refs\/tags\//} + - name: Publish bm-inventory to Registry + uses: elgohr/Publish-Docker-Github-Action@v5 + with: + name: bm-inventory + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + REGISTRY: 'quay.io/ocpmetal' + dockerfile: "Dockerfile.bm-inventory" + buildargs: GIT_REVISION + tags: "${{ env.GIT_TAG }},${{ env.GIT_REVISION }}" diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index e13a863f7..e95bf0f0d 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -9,38 +9,6 @@ on: - master jobs: - remove-label: - runs-on: ubuntu-latest - steps: - - name: remove label - uses: actions/github-script@0.9.0 - if: github.event_name == 'pull_request' - with: - github-token: ${{secrets.GITHUB_TOKEN}} - script: | - - // Fetch the list of labels attached to the issue that - // triggered the workflow - const opts = github.issues.listLabelsOnIssue.endpoint.merge({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.issue.number, - }); - const labels = await github.paginate(opts); - - for (const label of labels) { - // If the issue has a label named 'tests-passed', remove it - if (label.name === 'tests-passed') { - await github.issues.removeLabel({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.issue.number, - name: 'tests-passed' - }) - return; - } - } - lint: runs-on: ubuntu-latest steps: @@ -73,15 +41,3 @@ jobs: - name: test run: | skipper make build - - name: apply label - uses: actions/github-script@0.9.0 - if: github.event_name == 'pull_request' - with: - github-token: ${{secrets.GITHUB_TOKEN}} - script: | - github.issues.addLabels({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - labels: ['tests-passed'] - }) diff --git a/.gitignore b/.gitignore index 8b9aeb6f3..b948da341 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,7 @@ .idea/ .vscode/ +.venv/ +venv/ build/ kubevirtci/ **/RCS/** @@ -10,3 +12,5 @@ __pycache__/ # Output of the go coverage tool, specifically when used with LiteIDE *.out + +/vendor diff --git a/.golangci.yml b/.golangci.yml index 38a3336c5..da7053cf6 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -47,6 +47,7 @@ issues: # excluded by default patterns execute `golangci-lint run --help` exclude: - G107 + - G402 # support scality DisableSSL linters: enable: diff --git a/Dockerfile.bm-inventory b/Dockerfile.bm-inventory index 7645d0ff6..25e880dfb 100644 --- a/Dockerfile.bm-inventory +++ b/Dockerfile.bm-inventory @@ -1,5 +1,10 @@ +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest as certs + FROM scratch +COPY --from=certs /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /etc/ssl/certs/ca-bundle.crt +COPY --from=certs /etc/pki/ca-trust/extracted/openssl/ca-bundle.trust.crt /etc/ssl/certs/ca-bundle.trust.crt ARG GIT_REVISION LABEL "git_revision"=${GIT_REVISION} ADD build/bm-inventory /bm-inventory +ADD build/bm-inventory-client-1.0.0.tar.gz /clients/bm-inventory-client-1.0.0.tar.gz CMD ["/bm-inventory"] diff --git a/Dockerfile.bm-inventory-build b/Dockerfile.bm-inventory-build index 4733edfa8..3cb8e4953 100644 --- a/Dockerfile.bm-inventory-build +++ b/Dockerfile.bm-inventory-build @@ -1,14 +1,15 @@ -FROM golang:1.14 +FROM golang:1.14.3 -RUN apt-get update && apt-get install -y docker.io libvirt-clients awscli python3-pip \ +ENV GO111MODULE=on + +RUN apt-get update && apt-get install -y docker.io libvirt-clients awscli python3-pip postgresql \ && rm -rf /var/lib/apt/lists/* RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.24.0 -RUN go get -u github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI -RUN go get -u github.com/onsi/gomega/... # fetches the matcher library -RUN go get -u golang.org/x/tools/cmd/goimports -RUN go get -u github.com/golang/mock/mockgen -RUN go get -u github.com/vektra/mockery/.../ -RUN pip3 install boto3 -RUN curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 \ +RUN go get -u github.com/onsi/ginkgo/ginkgo@v1.12.2 \ + golang.org/x/tools/cmd/goimports@v0.0.0-20200520220537-cf2d1e09c845 \ + github.com/golang/mock/mockgen@v1.4.3 \ + github.com/vektra/mockery/.../@v1.1.2 +RUN pip3 install boto3==1.13.14 waiting==1.4.1 +RUN curl -Lo minikube https://storage.googleapis.com/minikube/releases/v1.10.1/minikube-linux-amd64 \ && chmod +x minikube && mkdir -p /usr/local/bin/ && install minikube /usr/local/bin/ RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl && chmod +x ./kubectl && mv ./kubectl /usr/local/bin/kubectl diff --git a/Dockerfile.s3-object-expirer b/Dockerfile.s3-object-expirer deleted file mode 100644 index 2ecf0a4be..000000000 --- a/Dockerfile.s3-object-expirer +++ /dev/null @@ -1,7 +0,0 @@ -FROM python:3 - -ARG GIT_REVISION -LABEL "git_revision"=${GIT_REVISION} - -ADD tools/expirer.py / -RUN pip install --no-cache-dir boto3 pytz diff --git a/Dockerfile.test b/Dockerfile.test new file mode 100644 index 000000000..77ac30c39 --- /dev/null +++ b/Dockerfile.test @@ -0,0 +1,20 @@ +FROM golang:1.14.3 + +ENV GO111MODULE=on + +WORKDIR /assisted-service + +COPY . ./ + +RUN curl -sSfL \ +https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \ +sh -s -- -b $(go env GOPATH)/bin v1.24.0 + +RUN go get -u \ +github.com/onsi/ginkgo/ginkgo@v1.12.2 \ +golang.org/x/tools/cmd/goimports@v0.0.0-20200520220537-cf2d1e09c845 \ +github.com/golang/mock/mockgen@v1.4.3 \ +github.com/vektra/mockery/.../@v1.1.2 + +ENTRYPOINT ["make"] +CMD ["unit-test"] diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 000000000..3438378e7 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,43 @@ +pipeline { + agent { label 'bm-inventory-subsystem' } + stages { + stage('clear deployment') { + steps { + sh 'make clear-deployment' + } + } + + stage('Deploy') { + steps { + sh '''export PATH=$PATH:/usr/local/go/bin; export OBJEXP=quay.io/ocpmetal/s3-object-expirer:latest; make deploy-test''' + sleep 60 + sh '''# Dump pod statuses;kubectl get pods -A''' + } + } + + stage('test') { + steps { + sh '''export PATH=$PATH:/usr/local/go/bin;make subsystem-run''' + } + } + } + post { + failure { + echo 'Get bm-inventory log' + sh ''' + kubectl get pods -o=custom-columns=NAME:.metadata.name -A | grep bm-inventory | xargs -I {} sh -c "kubectl logs {} -n assisted-installer > test_dd.log" + mv test_dd.log $WORKSPACE/bm-inventory.log || true + ''' + + echo 'Get mariadb log' + sh '''kubectl get pods -o=custom-columns=NAME:.metadata.name -A | grep mariadb | xargs -I {} sh -c "kubectl logs {} -n assisted-installer > test_dd.log" + mv test_dd.log $WORKSPACE/mariadb.log || true + ''' + + echo 'Get createimage log' + sh '''kubectl get pods -o=custom-columns=NAME:.metadata.name -A | grep createimage | xargs -I {} sh -c "kubectl logs {} -n assisted-installer > test_dd.log" + mv test_dd.log $WORKSPACE/createimage.log || true + ''' + } + } +} \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile index 8a854b592..47374eb3b 100644 --- a/Makefile +++ b/Makefile @@ -1,23 +1,26 @@ PWD = $(shell pwd) UID = $(shell id -u) +BUILD_FOLDER = $(PWD)/build TARGET := $(or ${TARGET},minikube) -KUBECTL=kubectl -n assisted-installer +NAMESPACE := $(or ${NAMESPACE},assisted-installer) +KUBECTL=kubectl -n $(NAMESPACE) ifeq ($(TARGET), minikube) define get_service -minikube service --url $(1) -n assisted-installer | sed 's/http:\/\///g' -endef +minikube service --url $(1) -n $(NAMESPACE) | sed 's/http:\/\///g' +endef # get_service else define get_service -kubectl get service $(1) -n assisted-installer | grep $(1) | awk '{print $$4 ":" $$5}' | \ +kubectl get service $(1) -n $(NAMESPACE) | grep $(1) | awk '{print $$4 ":" $$5}' | \ awk '{split($$0,a,":"); print a[1] ":" a[2]}' -endef -endif +endef # get_service +endif # TARGET -SERVICE := $(or ${SERVICE},quay.io/ocpmetal/bm-inventory:stable) -OBJEXP := $(or ${OBJEXP},quay.io/ocpmetal/s3-object-expirer:stable) +SERVICE := $(or ${SERVICE},quay.io/ocpmetal/bm-inventory:latest) GIT_REVISION := $(shell git rev-parse HEAD) +APPLY_NAMESPACE := $(or ${APPLY_NAMESPACE},True) +ROUTE53_SECRET := ${ROUTE53_SECRET} all: build @@ -25,83 +28,154 @@ lint: golangci-lint run -v .PHONY: build -build: create-build-dir lint unit-test - CGO_ENABLED=0 go build -o build/bm-inventory cmd/main.go +build: lint unit-test build-minimal -create-build-dir: - mkdir -p build +build-minimal: create-build-dir + CGO_ENABLED=0 go build -o $(BUILD_FOLDER)/bm-inventory cmd/main.go -clean: - rm -rf build +create-build-dir: + mkdir -p $(BUILD_FOLDER) format: - goimports -w -l cmd/ internal/ + goimports -w -l cmd/ internal/ subsystem/ generate: - go generate $(shell go list ./...) + go generate $(shell go list ./... | grep -v 'bm-inventory/models\|bm-inventory/client\|bm-inventory/restapi') generate-from-swagger: rm -rf client models restapi - docker run -u $(UID):$(UID) -v $(PWD):$(PWD):rw,Z -v /etc/passwd:/etc/passwd -w $(PWD) quay.io/goswagger/swagger generate server --template=stratoscale -f swagger.yaml --template-dir=/templates/contrib - docker run -u $(UID):$(UID) -v $(PWD):$(PWD):rw,Z -v /etc/passwd:/etc/passwd -w $(PWD) quay.io/goswagger/swagger generate client --template=stratoscale -f swagger.yaml --template-dir=/templates/contrib - go generate $(shell go list ./client/... ./models/... ./restapi/...) - -update: build + docker run -u $(UID):$(UID) -v $(PWD):$(PWD):rw,Z -v /etc/passwd:/etc/passwd -w $(PWD) \ + quay.io/goswagger/swagger:v0.24.0 generate server --template=stratoscale -f swagger.yaml \ + --template-dir=/templates/contrib + docker run -u $(UID):$(UID) -v $(PWD):$(PWD):rw,Z -v /etc/passwd:/etc/passwd -w $(PWD) \ + quay.io/goswagger/swagger:v0.24.0 generate client --template=stratoscale -f swagger.yaml \ + --template-dir=/templates/contrib + +########## +# Update # +########## + +update: build create-python-client GIT_REVISION=${GIT_REVISION} docker build --build-arg GIT_REVISION -f Dockerfile.bm-inventory . -t $(SERVICE) docker push $(SERVICE) -update-expirer: build - GIT_REVISION=${GIT_REVISION} docker build --build-arg GIT_REVISION -f Dockerfile.s3-object-expirer . -t $(OBJEXP) - docker push $(OBJEXP) +update-minimal: build-minimal create-python-client + GIT_REVISION=${GIT_REVISION} docker build --build-arg GIT_REVISION -f Dockerfile.bm-inventory . -t $(SERVICE) + +update-minikube: build create-python-client + eval $$(SHELL=$${SHELL:-/bin/sh} minikube docker-env) && \ + GIT_REVISION=${GIT_REVISION} docker build --build-arg GIT_REVISION -f Dockerfile.bm-inventory . -t $(SERVICE) + +create-python-client: build/bm-inventory-client-${GIT_REVISION}.tar.gz + +build/bm-inventory-client/setup.py: swagger.yaml + cp swagger.yaml $(BUILD_FOLDER) + echo '{"packageName" : "bm_inventory_client", "packageVersion": "1.0.0"}' > $(BUILD_FOLDER)/code-gen-config.json + sed -i '/pattern:/d' $(BUILD_FOLDER)/swagger.yaml + docker run --rm -u $(shell id -u $(USER)) -v $(BUILD_FOLDER):/swagger-api/out:Z \ + -v $(BUILD_FOLDER)/swagger.yaml:/swagger.yaml:ro,Z -v $(BUILD_FOLDER)/code-gen-config.json:/config.json:ro,Z \ + jimschubert/swagger-codegen-cli:2.3.1 generate --lang python --config /config.json --output ./bm-inventory-client/ --input-spec /swagger.yaml + rm -f $(BUILD_FOLDER)/swagger.yaml + +build/bm-inventory-client-%.tar.gz: build/bm-inventory-client/setup.py + rm -rf $@ + cd $(BUILD_FOLDER)/bm-inventory-client/ && python3 setup.py sdist --dist-dir $(BUILD_FOLDER) + rm -rf bm-inventory-client/bm-inventory-client.egg-info + +########## +# Deploy # +########## +ifdef DEPLOY_TAG + DEPLOY_TAG_OPTION = --deploy-tag "$(DEPLOY_TAG)" +else ifdef DEPLOY_MANIFEST_PATH + DEPLOY_TAG_OPTION = --deploy-manifest-path "$(DEPLOY_MANIFEST_PATH)" +else ifdef DEPLOY_MANIFEST_TAG + DEPLOY_TAG_OPTION = --deploy-manifest-tag "$(DEPLOY_MANIFEST_TAG)" +endif -deploy-all: create-build-dir deploy-namespace deploy-mariadb deploy-s3 deploy-service +deploy-all: create-build-dir deploy-namespace deploy-postgres deploy-s3 deploy-route53 deploy-service echo "Deployment done" -deploy-namespace: - python3 ./tools/deploy_namespace.py +deploy-ui: deploy-namespace + python3 ./tools/deploy_ui.py --target "$(TARGET)" --domain "$(INGRESS_DOMAIN)" --namespace "$(NAMESPACE)" $(DEPLOY_TAG_OPTION) + +deploy-namespace: create-build-dir + python3 ./tools/deploy_namespace.py --deploy-namespace $(APPLY_NAMESPACE) --namespace "$(NAMESPACE)" -deploy-s3-configmap: - python3 tools/deploy_scality_configmap.py +deploy-s3-secret: + python3 ./tools/deploy_scality_configmap.py --namespace "$(NAMESPACE)" deploy-s3: deploy-namespace - python3 ./tools/deploy_s3.py + python3 ./tools/deploy_s3.py --namespace "$(NAMESPACE)" sleep 5; # wait for service to get an address - make deploy-s3-configmap - python3 ./tools/create_default_s3_bucket.py + make deploy-s3-secret + +deploy-route53: deploy-namespace + python3 ./tools/deploy_route53.py --secret "$(ROUTE53_SECRET)" --namespace "$(NAMESPACE)" deploy-inventory-service-file: deploy-namespace - python3 ./tools/deploy_inventory_service.py + python3 ./tools/deploy_inventory_service.py --target "$(TARGET)" --domain "$(INGRESS_DOMAIN)" --namespace "$(NAMESPACE)" sleep 5; # wait for service to get an address deploy-service-requirements: deploy-namespace deploy-inventory-service-file - python3 ./tools/deploy_assisted_installer_configmap.py + python3 ./tools/deploy_assisted_installer_configmap.py --target "$(TARGET)" --domain "$(INGRESS_DOMAIN)" --base-dns-domains "$(BASE_DNS_DOMAINS)" --namespace "$(NAMESPACE)" $(DEPLOY_TAG_OPTION) deploy-service: deploy-namespace deploy-service-requirements deploy-role - python3 ./tools/deploy_assisted_installer.py - -deploy-expirer: deploy-role - python3 ./tools/deploy_s3_object_expirer.py + python3 ./tools/deploy_assisted_installer.py $(DEPLOY_TAG_OPTION) --namespace "$(NAMESPACE)" $(TEST_FLAGS) + python3 ./tools/wait_for_pod.py --app=bm-inventory --state=running --namespace "$(NAMESPACE)" deploy-role: deploy-namespace - python3 ./tools/deploy_role.py + python3 ./tools/deploy_role.py --namespace "$(NAMESPACE)" + +deploy-postgres: deploy-namespace + python3 ./tools/deploy_postgres.py --namespace "$(NAMESPACE)" + +deploy-test: + export SERVICE=quay.io/ocpmetal/bm-inventory:test && export TEST_FLAGS=--subsystem-test && \ + $(MAKE) update-minikube deploy-all -deploy-mariadb: deploy-namespace - python3 ./tools/deploy_mariadb.py +######## +# Test # +######## subsystem-run: test subsystem-clean test: INVENTORY=$(shell $(call get_service,bm-inventory) | sed 's/http:\/\///g') \ - DB_HOST=$(shell $(call get_service,mariadb) | sed 's/http:\/\///g' | cut -d ":" -f 1) \ - DB_PORT=$(shell $(call get_service,mariadb) | sed 's/http:\/\///g' | cut -d ":" -f 2) \ - go test -v ./subsystem/... -count=1 -ginkgo.focus=${FOCUS} -ginkgo.v + DB_HOST=$(shell $(call get_service,postgres) | sed 's/http:\/\///g' | cut -d ":" -f 1) \ + DB_PORT=$(shell $(call get_service,postgres) | sed 's/http:\/\///g' | cut -d ":" -f 2) \ + go test -v ./subsystem/... -count=1 -ginkgo.focus=${FOCUS} -ginkgo.v -timeout 20m + +deploy-olm: deploy-namespace + python3 ./tools/deploy_olm.py --target $(TARGET) + +deploy-prometheus: create-build-dir deploy-namespace + python3 ./tools/deploy_prometheus.py --target $(TARGET) --namespace "$(NAMESPACE)" + +deploy-grafana: create-build-dir + python3 ./tools/deploy_grafana.py --target $(TARGET) --namespace "$(NAMESPACE)" + +deploy-monitoring: deploy-olm deploy-prometheus deploy-grafana unit-test: - go test -v $(shell go list ./... | grep -v subsystem) -cover + docker stop postgres || true + docker run -d --rm --name postgres -e POSTGRES_PASSWORD=admin -e POSTGRES_USER=admin -p 127.0.0.1:5432:5432 postgres:12.3-alpine -c 'max_connections=10000' + until PGPASSWORD=admin pg_isready -U admin --dbname postgres --host 127.0.0.1 --port 5432; do sleep 1; done + SKIP_UT_DB=1 go test -v $(or ${TEST}, ${TEST}, $(shell go list ./... | grep -v subsystem)) -cover || (docker stop postgres && /bin/false) + docker stop postgres + +######### +# Clean # +######### + +clear-all: clean subsystem-clean clear-deployment + +clean: + -rm -rf $(BUILD_FOLDER) subsystem-clean: - $(KUBECTL) get pod -o name | grep create-image | xargs $(KUBECTL) delete 1> /dev/null ; true - $(KUBECTL) get pod -o name | grep generate-kubeconfig | xargs $(KUBECTL) delete 1> /dev/null ; true + -$(KUBECTL) get pod -o name | grep create-image | xargs $(KUBECTL) delete 1> /dev/null || true + -$(KUBECTL) get pod -o name | grep generate-kubeconfig | xargs $(KUBECTL) delete 1> /dev/null || true clear-deployment: - python3 ./tools/clear_deployment.py + -python3 ./tools/clear_deployment.py --delete-namespace $(APPLY_NAMESPACE) --namespace "$(NAMESPACE)" || true diff --git a/README.md b/README.md index 2b6cc9a1e..46c1e995a 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,12 @@ [![Actions Status](https://github.com/filanov/bm-inventory/workflows/unit-test/badge.svg)](https://github.com/filanov/bm-inventory/actions) + + +[![Actions Status](https://raw.githubusercontent.com/swagger-api/swagger-ui/master/src/img/logo_small.png)](https://filanov.github.io/bm-inventory/) # bm-inventory -1. docker +## Prerequisites + +1. Docker 1. skipper https://github.com/stratoscale/skipper 1. minikube (for tests) 1. kubectl @@ -9,81 +14,204 @@ ## First Setup -To push your build target to a docker registry you first need to change the default target. +To push your build target to a Docker registry you first need to change the default target. 1. Create a quay.io or Docker Hub account if you don't already have one. These instructions refer to quay.io, Docker Hub is similar. -1. Create a repository called bm-inventory -1. Make sure you have your~/.docker/config.json file set up to point to your account. For quay.io, you can go to quay.io -> User Settings, and click "Generate Encrypted Password" under "Docker CLI Password". -1. Login to quay.io using `docker login quay.io` -1. Export the `SERVICE` environment variable to your docker registry, and pass a tag of your choice, e.g., "test": +1. Create a repository called bm-inventory. +1. Make sure you have your `~/.docker/config.json` file set up to point to your account. For quay.io, you can go to quay.io -> User Settings, and click "Generate Encrypted Password" under "Docker CLI Password". +1. Login to quay.io using `docker login quay.io`. +1. Export the `SERVICE` environment variable to your Docker registry, and pass a tag of your choice, e.g., "test": -```shell script +```sh export SERVICE=quay.io//bm-inventory: ``` -Do the same for s3-object-expirer: -```shell script -export OBJEXP=quay.io//s3-object-expirer: -``` - For the first build of the build container run: -`skipper build bm-inventory-build` +```shell +skipper build bm-inventory-build +``` ## Build -`skipper make all` +```shell +skipper make all +``` ### Generate code after swagger changes -After every change in the api (swagger.yaml) the code should be generated and the build must pass +After every change in the API (`swagger.yaml`) the code should be generated and the build must pass. + +```shell +skipper make generate-from-swagger +``` -`skipper make generate-from-swagger` +## Test -## Tests -Pre-configuration -1. Run minikube on your system. -2. Deploy service, DB and other requirements `skipper make deploy-all` -3. Wait for all the pods to be up. +#### Pre-configuration + - Run minikube on your system. + - Deploy services `skipper make deploy-test` -Running the tests: +### Run system tests + +```shell +skipper make test +``` + +### Run system tests with regex + +```shell +skipper make test FOCUS=versions +``` + +### Run only unit tests + +```shell +skipper make unit-test +``` -`skipper make subsystem-run` +### Run unit tests for specific package + +```shell +skipper make unit-test TEST=./internal/host +``` ### Update service for the subsystem tests -if you are making changes and don't want to deploy everything once again you can simple run this command +if you are making changes and don't want to deploy everything once again you can simply run this command: -`skipper make update && kubectl get pod --namespace assisted-installer -o name | grep bm-inventory | xargs kubectl delete --namespace assisted-installer` +```shell +skipper make update && kubectl get pod --namespace assisted-installer -o name | grep bm-inventory | xargs kubectl delete --namespace assisted-installer +``` -if will build and push a new image of the service to your docker registry, then delete the service pod from minikube, the deployment will handle the update and pull the new image to start the service again. +It will build and push a new image of the service to your Docker registry, then delete the service pod from minikube, the deployment will handle the update and pull the new image to start the service again. ## Deployment +### Deploy to minikube + The deployment is a system deployment, it contains all the components the service need for all the operations to work (if implemented). S3 service (scality), DB and will use the image generator to create the images in the deployed S3 and create relevant bucket in S3. -`skipper make deploy-all` +```shell +skipper make deploy-all +``` + +### Deploy to OpenShift + +Besides default minikube deployment, the service support deployment to OpenShift cluster using ingress as the access point to the service. + +```shell +skipper make deploy-all TARGET=oc-ingress +``` + +This deployment option have multiple optional parameters that should be used in case you are not the Admin of the cluster: +1. `APPLY_NAMESPACE` - True by default. Will try to deploy "assisted-installer" namespace, if you are not the Admin of the cluster or maybe you don't have permissions for this operation you may skip namespace deployment. +1. `INGRESS_DOMAIN` - By default deployment script will try to get the domain prefix from OpenShift ingress controller. If you don't have access to it then you may specify the domain yourself. For example: `apps.ocp.prod.psi.redhat.com` + +To set the parameters simply add them in the end of the command, for example +```shell +skipper make deploy-all TARGET=oc-ingress APPLY_NAMESPACE=False INGRESS_DOMAIN=apps.ocp.prod.psi.redhat.com +``` + +Note: All deployment configurations are under the `deploy` directory in case more detailed configuration is required. + +### Deploy UI + +This service support optional UI deployment. +```shell +skipper make deploy-ui +``` + +* In case you are using *podman* run the above command without `skipper`. + + +For OpenShift users, look at the service deployment options on OpenShift platform. + +### Deploy Monitoring + +This will allow you to deploy Prometheus and Grafana already integrated with Assisted installer: + +- On Minikube + + ```shell + # Step by step + make deploy-olm + make deploy-prometheus + make deploy-grafana + + # Or just all-in + make deploy-monitoring + ``` + +- On Openshift + + ```shell + # Step by step + make deploy-prometheus TARGET=oc-ingress APPLY_NAMESPACE=false + make deploy-grafana TARGET=oc-ingress APPLY_NAMESPACE=false + + # Or just all-in + make deploy-monitoring TARGET=oc-ingress APPLY_NAMESPACE=false + ``` + +NOTE: To expose the monitoring UI's on your local environment you could follow these steps + +```shell +kubectl config set-context $(kubectl config current-context) --namespace assisted-installer + +# To expose Prometheus +kubectl port-forward svc/prometheus-k8s 9090:9090 + +# To expose Grafana +kubectl port-forward svc/grafana 3000:3000 +``` + +Now you just need to access [http://127.0.0.1:3000](http://127.0.0.1:3000) to access to your Grafana deployment or [http://127.0.0.1:9090](http://127.0.0.1:9090) for Prometheus. + +### Deploy by tag + +This feature is for internal usage and not recommended to use by external users. +This option will select the required tag that will be used for each dependency. +If deploy-all use a new tag the update will be done automatically and there is no need to reboot/rollout any deployment. + +Deploy images according to the manifest: +``` +skipper make deploy-all DEPLOY_MANIFEST_PATH=./assisted-installer.yaml +``` + +Deploy images according to the manifest in the assisted-installer-deployment repo (require git tag/branch/hash): +``` +skipper make deploy-all DEPLOY_MANIFEST_TAG=master +``` + +Deploy all the images with the same tag. +The tag is not validated, so you need to make sure it actually exists. +``` +skipper make deploy-all DEPLOY_TAG= +``` + +Default tag is latest + ## Troubleshooting A document that can assist troubleshooting: [link](https://docs.google.com/document/d/1WDc5LQjNnqpznM9YFTGb9Bg1kqPVckgGepS4KBxGSqw) -## Linked repositories +## Linked repositories * #### coreos_installation_iso: - https://github.com/oshercc/coreos_installation_iso + https://github.com/oshercc/coreos_installation_iso - Image in charge of generating the Fedora-coreOs image used to install the host with the relevant ignition file - - Image is uploaded to deployed S3 under the name template "installer-image-" + Image in charge of generating the Fedora-coreOs image used to install the host with the relevant ignition file.\ + Image is uploaded to deployed S3 under the name template "installer-image-". * #### ignition manifests and kubeconfig generate: - + https://github.com/oshercc/ignition-manifests-and-kubeconfig-generate - - Image in charge of generating the fallowing installation files: + + Image in charge of generating the following installation files: * kubeconfig * bootstrap.ign * master.ign * worker.ign * metadata.json * kubeadmin-password - - Files are uploaded to deployed S3 under the name template "/" + + Files are uploaded to deployed S3 under the name template "/". diff --git a/build_deploy.sh b/build_deploy.sh new file mode 100755 index 000000000..6417664ac --- /dev/null +++ b/build_deploy.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# required for `skipper` according to +# https://github.com/Stratoscale/skipper/blob/upstream/README.md#python3-environment +export LC_ALL="en_US.UTF-8" +export LANG="en_US.UTF-8" + +TAG=$(git rev-parse --short=7 HEAD) +BM_INVENTORY_IMAGE="quay.io/app-sre/bm-inventory" + +SERVICE="${BM_INVENTORY_IMAGE}:latest" skipper make update-minimal +docker tag "${BM_INVENTORY_IMAGE}:latest" "${BM_INVENTORY_IMAGE}:${TAG}" + +#BM_INVENTORY_BUILD_IMAGE="quay.io/app-sre/bm-inventory-build" +# +#docker build -t "${BM_INVENTORY_BUILD_IMAGE}:latest" -f Dockerfile.bm-inventory-build . +#docker tag "${BM_INVENTORY_BUILD_IMAGE}:latest" "${BM_INVENTORY_BUILD_IMAGE}:${TAG}" + +#OBJ_EXPIRER_IMAGE="quay.io/app-sre/s3-object-expirer" +# +#docker build -t "${OBJ_EXPIRER_IMAGE}:latest" -f Dockerfile.s3-object-expirer . +#docker tag "${OBJ_EXPIRER_IMAGE}:latest" "${OBJ_EXPIRER_IMAGE}:${TAG}" + +DOCKER_CONF="${PWD}/.docker" +mkdir -p "${DOCKER_CONF}" +docker --config="${DOCKER_CONF}" login -u="${QUAY_USER}" -p="${QUAY_TOKEN}" quay.io + +docker --config="${DOCKER_CONF}" push "${BM_INVENTORY_IMAGE}:latest" +docker --config="${DOCKER_CONF}" push "${BM_INVENTORY_IMAGE}:${TAG}" + +#docker --config="${DOCKER_CONF}" push "${BM_INVENTORY_BUILD_IMAGE}:latest" +#docker --config="${DOCKER_CONF}" push "${BM_INVENTORY_BUILD_IMAGE}:${TAG}" +# +#docker --config="${DOCKER_CONF}" push "${OBJ_EXPIRER_IMAGE}:latest" +#docker --config="${DOCKER_CONF}" push "${OBJ_EXPIRER_IMAGE}:${TAG}" diff --git a/client/assisted_install_client.go b/client/assisted_install_client.go index d7eaa5db7..8cbe7ad12 100644 --- a/client/assisted_install_client.go +++ b/client/assisted_install_client.go @@ -15,6 +15,8 @@ import ( "github.com/filanov/bm-inventory/client/events" "github.com/filanov/bm-inventory/client/installer" + "github.com/filanov/bm-inventory/client/managed_domains" + "github.com/filanov/bm-inventory/client/versions" ) const ( @@ -61,12 +63,16 @@ func New(c Config) *AssistedInstall { cli.Transport = transport cli.Events = events.New(transport, strfmt.Default, c.AuthInfo) cli.Installer = installer.New(transport, strfmt.Default, c.AuthInfo) + cli.ManagedDomains = managed_domains.New(transport, strfmt.Default, c.AuthInfo) + cli.Versions = versions.New(transport, strfmt.Default, c.AuthInfo) return cli } // AssistedInstall is a client for assisted install type AssistedInstall struct { - Events *events.Client - Installer *installer.Client - Transport runtime.ClientTransport + Events *events.Client + Installer *installer.Client + ManagedDomains *managed_domains.Client + Versions *versions.Client + Transport runtime.ClientTransport } diff --git a/client/events/mock_API.go b/client/events/mock_API.go deleted file mode 100644 index b1f43d9d9..000000000 --- a/client/events/mock_API.go +++ /dev/null @@ -1,37 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package events - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" -) - -// MockAPI is an autogenerated mock type for the API type -type MockAPI struct { - mock.Mock -} - -// ListEvents provides a mock function with given fields: ctx, params -func (_m *MockAPI) ListEvents(ctx context.Context, params *ListEventsParams) (*ListEventsOK, error) { - ret := _m.Called(ctx, params) - - var r0 *ListEventsOK - if rf, ok := ret.Get(0).(func(context.Context, *ListEventsParams) *ListEventsOK); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*ListEventsOK) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *ListEventsParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/client/installer/cancel_installation_parameters.go b/client/installer/cancel_installation_parameters.go new file mode 100644 index 000000000..df2dbaced --- /dev/null +++ b/client/installer/cancel_installation_parameters.go @@ -0,0 +1,132 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCancelInstallationParams creates a new CancelInstallationParams object +// with the default values initialized. +func NewCancelInstallationParams() *CancelInstallationParams { + var () + return &CancelInstallationParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCancelInstallationParamsWithTimeout creates a new CancelInstallationParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCancelInstallationParamsWithTimeout(timeout time.Duration) *CancelInstallationParams { + var () + return &CancelInstallationParams{ + + timeout: timeout, + } +} + +// NewCancelInstallationParamsWithContext creates a new CancelInstallationParams object +// with the default values initialized, and the ability to set a context for a request +func NewCancelInstallationParamsWithContext(ctx context.Context) *CancelInstallationParams { + var () + return &CancelInstallationParams{ + + Context: ctx, + } +} + +// NewCancelInstallationParamsWithHTTPClient creates a new CancelInstallationParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCancelInstallationParamsWithHTTPClient(client *http.Client) *CancelInstallationParams { + var () + return &CancelInstallationParams{ + HTTPClient: client, + } +} + +/*CancelInstallationParams contains all the parameters to send to the API endpoint +for the cancel installation operation typically these are written to a http.Request +*/ +type CancelInstallationParams struct { + + /*ClusterID*/ + ClusterID strfmt.UUID + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cancel installation params +func (o *CancelInstallationParams) WithTimeout(timeout time.Duration) *CancelInstallationParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cancel installation params +func (o *CancelInstallationParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cancel installation params +func (o *CancelInstallationParams) WithContext(ctx context.Context) *CancelInstallationParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cancel installation params +func (o *CancelInstallationParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cancel installation params +func (o *CancelInstallationParams) WithHTTPClient(client *http.Client) *CancelInstallationParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cancel installation params +func (o *CancelInstallationParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClusterID adds the clusterID to the cancel installation params +func (o *CancelInstallationParams) WithClusterID(clusterID strfmt.UUID) *CancelInstallationParams { + o.SetClusterID(clusterID) + return o +} + +// SetClusterID adds the clusterId to the cancel installation params +func (o *CancelInstallationParams) SetClusterID(clusterID strfmt.UUID) { + o.ClusterID = clusterID +} + +// WriteToRequest writes these params to a swagger request +func (o *CancelInstallationParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cluster_id + if err := r.SetPathParam("cluster_id", o.ClusterID.String()); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/installer/cancel_installation_responses.go b/client/installer/cancel_installation_responses.go new file mode 100644 index 000000000..63e05011c --- /dev/null +++ b/client/installer/cancel_installation_responses.go @@ -0,0 +1,186 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/filanov/bm-inventory/models" +) + +// CancelInstallationReader is a Reader for the CancelInstallation structure. +type CancelInstallationReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CancelInstallationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 202: + result := NewCancelInstallationAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 404: + result := NewCancelInstallationNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 409: + result := NewCancelInstallationConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCancelInstallationInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewCancelInstallationAccepted creates a CancelInstallationAccepted with default headers values +func NewCancelInstallationAccepted() *CancelInstallationAccepted { + return &CancelInstallationAccepted{} +} + +/*CancelInstallationAccepted handles this case with default header values. + +Success. +*/ +type CancelInstallationAccepted struct { + Payload *models.Cluster +} + +func (o *CancelInstallationAccepted) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/actions/cancel][%d] cancelInstallationAccepted %+v", 202, o.Payload) +} + +func (o *CancelInstallationAccepted) GetPayload() *models.Cluster { + return o.Payload +} + +func (o *CancelInstallationAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Cluster) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCancelInstallationNotFound creates a CancelInstallationNotFound with default headers values +func NewCancelInstallationNotFound() *CancelInstallationNotFound { + return &CancelInstallationNotFound{} +} + +/*CancelInstallationNotFound handles this case with default header values. + +Error. +*/ +type CancelInstallationNotFound struct { + Payload *models.Error +} + +func (o *CancelInstallationNotFound) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/actions/cancel][%d] cancelInstallationNotFound %+v", 404, o.Payload) +} + +func (o *CancelInstallationNotFound) GetPayload() *models.Error { + return o.Payload +} + +func (o *CancelInstallationNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCancelInstallationConflict creates a CancelInstallationConflict with default headers values +func NewCancelInstallationConflict() *CancelInstallationConflict { + return &CancelInstallationConflict{} +} + +/*CancelInstallationConflict handles this case with default header values. + +Error. +*/ +type CancelInstallationConflict struct { + Payload *models.Error +} + +func (o *CancelInstallationConflict) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/actions/cancel][%d] cancelInstallationConflict %+v", 409, o.Payload) +} + +func (o *CancelInstallationConflict) GetPayload() *models.Error { + return o.Payload +} + +func (o *CancelInstallationConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCancelInstallationInternalServerError creates a CancelInstallationInternalServerError with default headers values +func NewCancelInstallationInternalServerError() *CancelInstallationInternalServerError { + return &CancelInstallationInternalServerError{} +} + +/*CancelInstallationInternalServerError handles this case with default header values. + +Error. +*/ +type CancelInstallationInternalServerError struct { + Payload *models.Error +} + +func (o *CancelInstallationInternalServerError) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/actions/cancel][%d] cancelInstallationInternalServerError %+v", 500, o.Payload) +} + +func (o *CancelInstallationInternalServerError) GetPayload() *models.Error { + return o.Payload +} + +func (o *CancelInstallationInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/client/installer/complete_installation_parameters.go b/client/installer/complete_installation_parameters.go new file mode 100644 index 000000000..d6fb4ee3c --- /dev/null +++ b/client/installer/complete_installation_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/filanov/bm-inventory/models" +) + +// NewCompleteInstallationParams creates a new CompleteInstallationParams object +// with the default values initialized. +func NewCompleteInstallationParams() *CompleteInstallationParams { + var () + return &CompleteInstallationParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCompleteInstallationParamsWithTimeout creates a new CompleteInstallationParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCompleteInstallationParamsWithTimeout(timeout time.Duration) *CompleteInstallationParams { + var () + return &CompleteInstallationParams{ + + timeout: timeout, + } +} + +// NewCompleteInstallationParamsWithContext creates a new CompleteInstallationParams object +// with the default values initialized, and the ability to set a context for a request +func NewCompleteInstallationParamsWithContext(ctx context.Context) *CompleteInstallationParams { + var () + return &CompleteInstallationParams{ + + Context: ctx, + } +} + +// NewCompleteInstallationParamsWithHTTPClient creates a new CompleteInstallationParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCompleteInstallationParamsWithHTTPClient(client *http.Client) *CompleteInstallationParams { + var () + return &CompleteInstallationParams{ + HTTPClient: client, + } +} + +/*CompleteInstallationParams contains all the parameters to send to the API endpoint +for the complete installation operation typically these are written to a http.Request +*/ +type CompleteInstallationParams struct { + + /*ClusterID*/ + ClusterID strfmt.UUID + /*CompletionParams*/ + CompletionParams *models.CompletionParams + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the complete installation params +func (o *CompleteInstallationParams) WithTimeout(timeout time.Duration) *CompleteInstallationParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the complete installation params +func (o *CompleteInstallationParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the complete installation params +func (o *CompleteInstallationParams) WithContext(ctx context.Context) *CompleteInstallationParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the complete installation params +func (o *CompleteInstallationParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the complete installation params +func (o *CompleteInstallationParams) WithHTTPClient(client *http.Client) *CompleteInstallationParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the complete installation params +func (o *CompleteInstallationParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClusterID adds the clusterID to the complete installation params +func (o *CompleteInstallationParams) WithClusterID(clusterID strfmt.UUID) *CompleteInstallationParams { + o.SetClusterID(clusterID) + return o +} + +// SetClusterID adds the clusterId to the complete installation params +func (o *CompleteInstallationParams) SetClusterID(clusterID strfmt.UUID) { + o.ClusterID = clusterID +} + +// WithCompletionParams adds the completionParams to the complete installation params +func (o *CompleteInstallationParams) WithCompletionParams(completionParams *models.CompletionParams) *CompleteInstallationParams { + o.SetCompletionParams(completionParams) + return o +} + +// SetCompletionParams adds the completionParams to the complete installation params +func (o *CompleteInstallationParams) SetCompletionParams(completionParams *models.CompletionParams) { + o.CompletionParams = completionParams +} + +// WriteToRequest writes these params to a swagger request +func (o *CompleteInstallationParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cluster_id + if err := r.SetPathParam("cluster_id", o.ClusterID.String()); err != nil { + return err + } + + if o.CompletionParams != nil { + if err := r.SetBodyParam(o.CompletionParams); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/installer/complete_installation_responses.go b/client/installer/complete_installation_responses.go new file mode 100644 index 000000000..f9db9b317 --- /dev/null +++ b/client/installer/complete_installation_responses.go @@ -0,0 +1,186 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/filanov/bm-inventory/models" +) + +// CompleteInstallationReader is a Reader for the CompleteInstallation structure. +type CompleteInstallationReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CompleteInstallationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 202: + result := NewCompleteInstallationAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 404: + result := NewCompleteInstallationNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 409: + result := NewCompleteInstallationConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewCompleteInstallationInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewCompleteInstallationAccepted creates a CompleteInstallationAccepted with default headers values +func NewCompleteInstallationAccepted() *CompleteInstallationAccepted { + return &CompleteInstallationAccepted{} +} + +/*CompleteInstallationAccepted handles this case with default header values. + +Success. +*/ +type CompleteInstallationAccepted struct { + Payload *models.Cluster +} + +func (o *CompleteInstallationAccepted) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/actions/complete_installation][%d] completeInstallationAccepted %+v", 202, o.Payload) +} + +func (o *CompleteInstallationAccepted) GetPayload() *models.Cluster { + return o.Payload +} + +func (o *CompleteInstallationAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Cluster) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCompleteInstallationNotFound creates a CompleteInstallationNotFound with default headers values +func NewCompleteInstallationNotFound() *CompleteInstallationNotFound { + return &CompleteInstallationNotFound{} +} + +/*CompleteInstallationNotFound handles this case with default header values. + +Error. +*/ +type CompleteInstallationNotFound struct { + Payload *models.Error +} + +func (o *CompleteInstallationNotFound) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/actions/complete_installation][%d] completeInstallationNotFound %+v", 404, o.Payload) +} + +func (o *CompleteInstallationNotFound) GetPayload() *models.Error { + return o.Payload +} + +func (o *CompleteInstallationNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCompleteInstallationConflict creates a CompleteInstallationConflict with default headers values +func NewCompleteInstallationConflict() *CompleteInstallationConflict { + return &CompleteInstallationConflict{} +} + +/*CompleteInstallationConflict handles this case with default header values. + +Error. +*/ +type CompleteInstallationConflict struct { + Payload *models.Error +} + +func (o *CompleteInstallationConflict) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/actions/complete_installation][%d] completeInstallationConflict %+v", 409, o.Payload) +} + +func (o *CompleteInstallationConflict) GetPayload() *models.Error { + return o.Payload +} + +func (o *CompleteInstallationConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCompleteInstallationInternalServerError creates a CompleteInstallationInternalServerError with default headers values +func NewCompleteInstallationInternalServerError() *CompleteInstallationInternalServerError { + return &CompleteInstallationInternalServerError{} +} + +/*CompleteInstallationInternalServerError handles this case with default header values. + +Error. +*/ +type CompleteInstallationInternalServerError struct { + Payload *models.Error +} + +func (o *CompleteInstallationInternalServerError) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/actions/complete_installation][%d] completeInstallationInternalServerError %+v", 500, o.Payload) +} + +func (o *CompleteInstallationInternalServerError) GetPayload() *models.Error { + return o.Payload +} + +func (o *CompleteInstallationInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/client/installer/disable_host_responses.go b/client/installer/disable_host_responses.go index 34a05a2d7..7165cd6a5 100644 --- a/client/installer/disable_host_responses.go +++ b/client/installer/disable_host_responses.go @@ -23,8 +23,8 @@ type DisableHostReader struct { // ReadResponse reads a server response into the received o. func (o *DisableHostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { - case 204: - result := NewDisableHostNoContent() + case 200: + result := NewDisableHostOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } @@ -53,23 +53,35 @@ func (o *DisableHostReader) ReadResponse(response runtime.ClientResponse, consum } } -// NewDisableHostNoContent creates a DisableHostNoContent with default headers values -func NewDisableHostNoContent() *DisableHostNoContent { - return &DisableHostNoContent{} +// NewDisableHostOK creates a DisableHostOK with default headers values +func NewDisableHostOK() *DisableHostOK { + return &DisableHostOK{} } -/*DisableHostNoContent handles this case with default header values. +/*DisableHostOK handles this case with default header values. Success. */ -type DisableHostNoContent struct { +type DisableHostOK struct { + Payload *models.Host } -func (o *DisableHostNoContent) Error() string { - return fmt.Sprintf("[DELETE /clusters/{cluster_id}/hosts/{host_id}/actions/enable][%d] disableHostNoContent ", 204) +func (o *DisableHostOK) Error() string { + return fmt.Sprintf("[DELETE /clusters/{cluster_id}/hosts/{host_id}/actions/enable][%d] disableHostOK %+v", 200, o.Payload) } -func (o *DisableHostNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { +func (o *DisableHostOK) GetPayload() *models.Host { + return o.Payload +} + +func (o *DisableHostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Host) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } return nil } diff --git a/client/installer/download_cluster_kubeconfig_parameters.go b/client/installer/download_cluster_kubeconfig_parameters.go new file mode 100644 index 000000000..86faac64f --- /dev/null +++ b/client/installer/download_cluster_kubeconfig_parameters.go @@ -0,0 +1,132 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewDownloadClusterKubeconfigParams creates a new DownloadClusterKubeconfigParams object +// with the default values initialized. +func NewDownloadClusterKubeconfigParams() *DownloadClusterKubeconfigParams { + var () + return &DownloadClusterKubeconfigParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewDownloadClusterKubeconfigParamsWithTimeout creates a new DownloadClusterKubeconfigParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewDownloadClusterKubeconfigParamsWithTimeout(timeout time.Duration) *DownloadClusterKubeconfigParams { + var () + return &DownloadClusterKubeconfigParams{ + + timeout: timeout, + } +} + +// NewDownloadClusterKubeconfigParamsWithContext creates a new DownloadClusterKubeconfigParams object +// with the default values initialized, and the ability to set a context for a request +func NewDownloadClusterKubeconfigParamsWithContext(ctx context.Context) *DownloadClusterKubeconfigParams { + var () + return &DownloadClusterKubeconfigParams{ + + Context: ctx, + } +} + +// NewDownloadClusterKubeconfigParamsWithHTTPClient creates a new DownloadClusterKubeconfigParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewDownloadClusterKubeconfigParamsWithHTTPClient(client *http.Client) *DownloadClusterKubeconfigParams { + var () + return &DownloadClusterKubeconfigParams{ + HTTPClient: client, + } +} + +/*DownloadClusterKubeconfigParams contains all the parameters to send to the API endpoint +for the download cluster kubeconfig operation typically these are written to a http.Request +*/ +type DownloadClusterKubeconfigParams struct { + + /*ClusterID*/ + ClusterID strfmt.UUID + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the download cluster kubeconfig params +func (o *DownloadClusterKubeconfigParams) WithTimeout(timeout time.Duration) *DownloadClusterKubeconfigParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the download cluster kubeconfig params +func (o *DownloadClusterKubeconfigParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the download cluster kubeconfig params +func (o *DownloadClusterKubeconfigParams) WithContext(ctx context.Context) *DownloadClusterKubeconfigParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the download cluster kubeconfig params +func (o *DownloadClusterKubeconfigParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the download cluster kubeconfig params +func (o *DownloadClusterKubeconfigParams) WithHTTPClient(client *http.Client) *DownloadClusterKubeconfigParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the download cluster kubeconfig params +func (o *DownloadClusterKubeconfigParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClusterID adds the clusterID to the download cluster kubeconfig params +func (o *DownloadClusterKubeconfigParams) WithClusterID(clusterID strfmt.UUID) *DownloadClusterKubeconfigParams { + o.SetClusterID(clusterID) + return o +} + +// SetClusterID adds the clusterId to the download cluster kubeconfig params +func (o *DownloadClusterKubeconfigParams) SetClusterID(clusterID strfmt.UUID) { + o.ClusterID = clusterID +} + +// WriteToRequest writes these params to a swagger request +func (o *DownloadClusterKubeconfigParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cluster_id + if err := r.SetPathParam("cluster_id", o.ClusterID.String()); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/installer/download_cluster_kubeconfig_responses.go b/client/installer/download_cluster_kubeconfig_responses.go new file mode 100644 index 000000000..a81a9addb --- /dev/null +++ b/client/installer/download_cluster_kubeconfig_responses.go @@ -0,0 +1,187 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/filanov/bm-inventory/models" +) + +// DownloadClusterKubeconfigReader is a Reader for the DownloadClusterKubeconfig structure. +type DownloadClusterKubeconfigReader struct { + formats strfmt.Registry + writer io.Writer +} + +// ReadResponse reads a server response into the received o. +func (o *DownloadClusterKubeconfigReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewDownloadClusterKubeconfigOK(o.writer) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 404: + result := NewDownloadClusterKubeconfigNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 409: + result := NewDownloadClusterKubeconfigConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewDownloadClusterKubeconfigInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewDownloadClusterKubeconfigOK creates a DownloadClusterKubeconfigOK with default headers values +func NewDownloadClusterKubeconfigOK(writer io.Writer) *DownloadClusterKubeconfigOK { + return &DownloadClusterKubeconfigOK{ + Payload: writer, + } +} + +/*DownloadClusterKubeconfigOK handles this case with default header values. + +Success. +*/ +type DownloadClusterKubeconfigOK struct { + Payload io.Writer +} + +func (o *DownloadClusterKubeconfigOK) Error() string { + return fmt.Sprintf("[GET /clusters/{cluster_id}/downloads/kubeconfig][%d] downloadClusterKubeconfigOK %+v", 200, o.Payload) +} + +func (o *DownloadClusterKubeconfigOK) GetPayload() io.Writer { + return o.Payload +} + +func (o *DownloadClusterKubeconfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDownloadClusterKubeconfigNotFound creates a DownloadClusterKubeconfigNotFound with default headers values +func NewDownloadClusterKubeconfigNotFound() *DownloadClusterKubeconfigNotFound { + return &DownloadClusterKubeconfigNotFound{} +} + +/*DownloadClusterKubeconfigNotFound handles this case with default header values. + +Error. +*/ +type DownloadClusterKubeconfigNotFound struct { + Payload *models.Error +} + +func (o *DownloadClusterKubeconfigNotFound) Error() string { + return fmt.Sprintf("[GET /clusters/{cluster_id}/downloads/kubeconfig][%d] downloadClusterKubeconfigNotFound %+v", 404, o.Payload) +} + +func (o *DownloadClusterKubeconfigNotFound) GetPayload() *models.Error { + return o.Payload +} + +func (o *DownloadClusterKubeconfigNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDownloadClusterKubeconfigConflict creates a DownloadClusterKubeconfigConflict with default headers values +func NewDownloadClusterKubeconfigConflict() *DownloadClusterKubeconfigConflict { + return &DownloadClusterKubeconfigConflict{} +} + +/*DownloadClusterKubeconfigConflict handles this case with default header values. + +Error. +*/ +type DownloadClusterKubeconfigConflict struct { + Payload *models.Error +} + +func (o *DownloadClusterKubeconfigConflict) Error() string { + return fmt.Sprintf("[GET /clusters/{cluster_id}/downloads/kubeconfig][%d] downloadClusterKubeconfigConflict %+v", 409, o.Payload) +} + +func (o *DownloadClusterKubeconfigConflict) GetPayload() *models.Error { + return o.Payload +} + +func (o *DownloadClusterKubeconfigConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDownloadClusterKubeconfigInternalServerError creates a DownloadClusterKubeconfigInternalServerError with default headers values +func NewDownloadClusterKubeconfigInternalServerError() *DownloadClusterKubeconfigInternalServerError { + return &DownloadClusterKubeconfigInternalServerError{} +} + +/*DownloadClusterKubeconfigInternalServerError handles this case with default header values. + +Error. +*/ +type DownloadClusterKubeconfigInternalServerError struct { + Payload *models.Error +} + +func (o *DownloadClusterKubeconfigInternalServerError) Error() string { + return fmt.Sprintf("[GET /clusters/{cluster_id}/downloads/kubeconfig][%d] downloadClusterKubeconfigInternalServerError %+v", 500, o.Payload) +} + +func (o *DownloadClusterKubeconfigInternalServerError) GetPayload() *models.Error { + return o.Payload +} + +func (o *DownloadClusterKubeconfigInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/client/installer/enable_host_responses.go b/client/installer/enable_host_responses.go index fe0dc67f4..2bc9f380e 100644 --- a/client/installer/enable_host_responses.go +++ b/client/installer/enable_host_responses.go @@ -23,8 +23,8 @@ type EnableHostReader struct { // ReadResponse reads a server response into the received o. func (o *EnableHostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { - case 204: - result := NewEnableHostNoContent() + case 200: + result := NewEnableHostOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } @@ -53,23 +53,35 @@ func (o *EnableHostReader) ReadResponse(response runtime.ClientResponse, consume } } -// NewEnableHostNoContent creates a EnableHostNoContent with default headers values -func NewEnableHostNoContent() *EnableHostNoContent { - return &EnableHostNoContent{} +// NewEnableHostOK creates a EnableHostOK with default headers values +func NewEnableHostOK() *EnableHostOK { + return &EnableHostOK{} } -/*EnableHostNoContent handles this case with default header values. +/*EnableHostOK handles this case with default header values. Success. */ -type EnableHostNoContent struct { +type EnableHostOK struct { + Payload *models.Host } -func (o *EnableHostNoContent) Error() string { - return fmt.Sprintf("[POST /clusters/{cluster_id}/hosts/{host_id}/actions/enable][%d] enableHostNoContent ", 204) +func (o *EnableHostOK) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/hosts/{host_id}/actions/enable][%d] enableHostOK %+v", 200, o.Payload) } -func (o *EnableHostNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { +func (o *EnableHostOK) GetPayload() *models.Host { + return o.Payload +} + +func (o *EnableHostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Host) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } return nil } diff --git a/client/installer/generate_cluster_i_s_o_responses.go b/client/installer/generate_cluster_i_s_o_responses.go index f22170db8..4fefc1c1f 100644 --- a/client/installer/generate_cluster_i_s_o_responses.go +++ b/client/installer/generate_cluster_i_s_o_responses.go @@ -168,14 +168,26 @@ func NewGenerateClusterISOConflict() *GenerateClusterISOConflict { Error. */ type GenerateClusterISOConflict struct { + Payload *models.Error } func (o *GenerateClusterISOConflict) Error() string { - return fmt.Sprintf("[POST /clusters/{cluster_id}/downloads/image][%d] generateClusterISOConflict ", 409) + return fmt.Sprintf("[POST /clusters/{cluster_id}/downloads/image][%d] generateClusterISOConflict %+v", 409, o.Payload) +} + +func (o *GenerateClusterISOConflict) GetPayload() *models.Error { + return o.Payload } func (o *GenerateClusterISOConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + return nil } diff --git a/client/installer/get_free_addresses_parameters.go b/client/installer/get_free_addresses_parameters.go new file mode 100644 index 000000000..f36b4a24b --- /dev/null +++ b/client/installer/get_free_addresses_parameters.go @@ -0,0 +1,225 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetFreeAddressesParams creates a new GetFreeAddressesParams object +// with the default values initialized. +func NewGetFreeAddressesParams() *GetFreeAddressesParams { + var ( + limitDefault = int64(8000) + ) + return &GetFreeAddressesParams{ + Limit: &limitDefault, + + timeout: cr.DefaultTimeout, + } +} + +// NewGetFreeAddressesParamsWithTimeout creates a new GetFreeAddressesParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewGetFreeAddressesParamsWithTimeout(timeout time.Duration) *GetFreeAddressesParams { + var ( + limitDefault = int64(8000) + ) + return &GetFreeAddressesParams{ + Limit: &limitDefault, + + timeout: timeout, + } +} + +// NewGetFreeAddressesParamsWithContext creates a new GetFreeAddressesParams object +// with the default values initialized, and the ability to set a context for a request +func NewGetFreeAddressesParamsWithContext(ctx context.Context) *GetFreeAddressesParams { + var ( + limitDefault = int64(8000) + ) + return &GetFreeAddressesParams{ + Limit: &limitDefault, + + Context: ctx, + } +} + +// NewGetFreeAddressesParamsWithHTTPClient creates a new GetFreeAddressesParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewGetFreeAddressesParamsWithHTTPClient(client *http.Client) *GetFreeAddressesParams { + var ( + limitDefault = int64(8000) + ) + return &GetFreeAddressesParams{ + Limit: &limitDefault, + HTTPClient: client, + } +} + +/*GetFreeAddressesParams contains all the parameters to send to the API endpoint +for the get free addresses operation typically these are written to a http.Request +*/ +type GetFreeAddressesParams struct { + + /*ClusterID*/ + ClusterID strfmt.UUID + /*Limit*/ + Limit *int64 + /*Network*/ + Network string + /*Prefix*/ + Prefix *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the get free addresses params +func (o *GetFreeAddressesParams) WithTimeout(timeout time.Duration) *GetFreeAddressesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get free addresses params +func (o *GetFreeAddressesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get free addresses params +func (o *GetFreeAddressesParams) WithContext(ctx context.Context) *GetFreeAddressesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get free addresses params +func (o *GetFreeAddressesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get free addresses params +func (o *GetFreeAddressesParams) WithHTTPClient(client *http.Client) *GetFreeAddressesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get free addresses params +func (o *GetFreeAddressesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClusterID adds the clusterID to the get free addresses params +func (o *GetFreeAddressesParams) WithClusterID(clusterID strfmt.UUID) *GetFreeAddressesParams { + o.SetClusterID(clusterID) + return o +} + +// SetClusterID adds the clusterId to the get free addresses params +func (o *GetFreeAddressesParams) SetClusterID(clusterID strfmt.UUID) { + o.ClusterID = clusterID +} + +// WithLimit adds the limit to the get free addresses params +func (o *GetFreeAddressesParams) WithLimit(limit *int64) *GetFreeAddressesParams { + o.SetLimit(limit) + return o +} + +// SetLimit adds the limit to the get free addresses params +func (o *GetFreeAddressesParams) SetLimit(limit *int64) { + o.Limit = limit +} + +// WithNetwork adds the network to the get free addresses params +func (o *GetFreeAddressesParams) WithNetwork(network string) *GetFreeAddressesParams { + o.SetNetwork(network) + return o +} + +// SetNetwork adds the network to the get free addresses params +func (o *GetFreeAddressesParams) SetNetwork(network string) { + o.Network = network +} + +// WithPrefix adds the prefix to the get free addresses params +func (o *GetFreeAddressesParams) WithPrefix(prefix *string) *GetFreeAddressesParams { + o.SetPrefix(prefix) + return o +} + +// SetPrefix adds the prefix to the get free addresses params +func (o *GetFreeAddressesParams) SetPrefix(prefix *string) { + o.Prefix = prefix +} + +// WriteToRequest writes these params to a swagger request +func (o *GetFreeAddressesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cluster_id + if err := r.SetPathParam("cluster_id", o.ClusterID.String()); err != nil { + return err + } + + if o.Limit != nil { + + // query param limit + var qrLimit int64 + if o.Limit != nil { + qrLimit = *o.Limit + } + qLimit := swag.FormatInt64(qrLimit) + if qLimit != "" { + if err := r.SetQueryParam("limit", qLimit); err != nil { + return err + } + } + + } + + // query param network + qrNetwork := o.Network + qNetwork := qrNetwork + if qNetwork != "" { + if err := r.SetQueryParam("network", qNetwork); err != nil { + return err + } + } + + if o.Prefix != nil { + + // query param prefix + var qrPrefix string + if o.Prefix != nil { + qrPrefix = *o.Prefix + } + qPrefix := qrPrefix + if qPrefix != "" { + if err := r.SetQueryParam("prefix", qPrefix); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/installer/get_free_addresses_responses.go b/client/installer/get_free_addresses_responses.go new file mode 100644 index 000000000..d05705516 --- /dev/null +++ b/client/installer/get_free_addresses_responses.go @@ -0,0 +1,145 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/filanov/bm-inventory/models" +) + +// GetFreeAddressesReader is a Reader for the GetFreeAddresses structure. +type GetFreeAddressesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetFreeAddressesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetFreeAddressesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 404: + result := NewGetFreeAddressesNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewGetFreeAddressesInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewGetFreeAddressesOK creates a GetFreeAddressesOK with default headers values +func NewGetFreeAddressesOK() *GetFreeAddressesOK { + return &GetFreeAddressesOK{} +} + +/*GetFreeAddressesOK handles this case with default header values. + +Success +*/ +type GetFreeAddressesOK struct { + Payload models.FreeAddressesList +} + +func (o *GetFreeAddressesOK) Error() string { + return fmt.Sprintf("[GET /clusters/{cluster_id}/free_addresses][%d] getFreeAddressesOK %+v", 200, o.Payload) +} + +func (o *GetFreeAddressesOK) GetPayload() models.FreeAddressesList { + return o.Payload +} + +func (o *GetFreeAddressesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetFreeAddressesNotFound creates a GetFreeAddressesNotFound with default headers values +func NewGetFreeAddressesNotFound() *GetFreeAddressesNotFound { + return &GetFreeAddressesNotFound{} +} + +/*GetFreeAddressesNotFound handles this case with default header values. + +Error. +*/ +type GetFreeAddressesNotFound struct { + Payload *models.Error +} + +func (o *GetFreeAddressesNotFound) Error() string { + return fmt.Sprintf("[GET /clusters/{cluster_id}/free_addresses][%d] getFreeAddressesNotFound %+v", 404, o.Payload) +} + +func (o *GetFreeAddressesNotFound) GetPayload() *models.Error { + return o.Payload +} + +func (o *GetFreeAddressesNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetFreeAddressesInternalServerError creates a GetFreeAddressesInternalServerError with default headers values +func NewGetFreeAddressesInternalServerError() *GetFreeAddressesInternalServerError { + return &GetFreeAddressesInternalServerError{} +} + +/*GetFreeAddressesInternalServerError handles this case with default header values. + +Error. +*/ +type GetFreeAddressesInternalServerError struct { + Payload *models.Error +} + +func (o *GetFreeAddressesInternalServerError) Error() string { + return fmt.Sprintf("[GET /clusters/{cluster_id}/free_addresses][%d] getFreeAddressesInternalServerError %+v", 500, o.Payload) +} + +func (o *GetFreeAddressesInternalServerError) GetPayload() *models.Error { + return o.Payload +} + +func (o *GetFreeAddressesInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/client/installer/get_next_steps_responses.go b/client/installer/get_next_steps_responses.go index bab58f3c9..08a7a965c 100644 --- a/client/installer/get_next_steps_responses.go +++ b/client/installer/get_next_steps_responses.go @@ -57,21 +57,23 @@ func NewGetNextStepsOK() *GetNextStepsOK { Success. */ type GetNextStepsOK struct { - Payload models.Steps + Payload *models.Steps } func (o *GetNextStepsOK) Error() string { return fmt.Sprintf("[GET /clusters/{cluster_id}/hosts/{host_id}/instructions][%d] getNextStepsOK %+v", 200, o.Payload) } -func (o *GetNextStepsOK) GetPayload() models.Steps { +func (o *GetNextStepsOK) GetPayload() *models.Steps { return o.Payload } func (o *GetNextStepsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + o.Payload = new(models.Steps) + // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } diff --git a/client/installer/installer_client.go b/client/installer/installer_client.go index 9d214f937..ed2f175ef 100644 --- a/client/installer/installer_client.go +++ b/client/installer/installer_client.go @@ -18,6 +18,12 @@ import ( // API is the interface of the installer client type API interface { + /* + CancelInstallation cancels an ongoing installation*/ + CancelInstallation(ctx context.Context, params *CancelInstallationParams) (*CancelInstallationAccepted, error) + /* + CompleteInstallation agents API to mark a finalizing installation as complete*/ + CompleteInstallation(ctx context.Context, params *CompleteInstallationParams) (*CompleteInstallationAccepted, error) /* DeregisterCluster deletes an open shift bare metal cluster definition*/ DeregisterCluster(ctx context.Context, params *DeregisterClusterParams) (*DeregisterClusterNoContent, error) @@ -26,16 +32,19 @@ type API interface { DeregisterHost(ctx context.Context, params *DeregisterHostParams) (*DeregisterHostNoContent, error) /* DisableHost disables a host for inclusion in the cluster*/ - DisableHost(ctx context.Context, params *DisableHostParams) (*DisableHostNoContent, error) + DisableHost(ctx context.Context, params *DisableHostParams) (*DisableHostOK, error) /* DownloadClusterFiles downloads files relating to the installed installing cluster*/ DownloadClusterFiles(ctx context.Context, params *DownloadClusterFilesParams, writer io.Writer) (*DownloadClusterFilesOK, error) /* DownloadClusterISO downloads the open shift per cluster discovery i s o*/ DownloadClusterISO(ctx context.Context, params *DownloadClusterISOParams, writer io.Writer) (*DownloadClusterISOOK, error) + /* + DownloadClusterKubeconfig downloads the kubeconfig file for this cluster*/ + DownloadClusterKubeconfig(ctx context.Context, params *DownloadClusterKubeconfigParams, writer io.Writer) (*DownloadClusterKubeconfigOK, error) /* EnableHost enables a host for inclusion in the cluster*/ - EnableHost(ctx context.Context, params *EnableHostParams) (*EnableHostNoContent, error) + EnableHost(ctx context.Context, params *EnableHostParams) (*EnableHostOK, error) /* GenerateClusterISO creates a new open shift per cluster discovery i s o*/ GenerateClusterISO(ctx context.Context, params *GenerateClusterISOParams) (*GenerateClusterISOCreated, error) @@ -45,6 +54,9 @@ type API interface { /* GetCredentials gets the the cluster admin credentials*/ GetCredentials(ctx context.Context, params *GetCredentialsParams) (*GetCredentialsOK, error) + /* + GetFreeAddresses retrieves the free address list for a network*/ + GetFreeAddresses(ctx context.Context, params *GetFreeAddressesParams) (*GetFreeAddressesOK, error) /* GetHost retrieves the details of the open shift bare metal host*/ GetHost(ctx context.Context, params *GetHostParams) (*GetHostOK, error) @@ -69,6 +81,9 @@ type API interface { /* RegisterHost registers a new open shift bare metal host*/ RegisterHost(ctx context.Context, params *RegisterHostParams) (*RegisterHostCreated, error) + /* + ResetCluster resets a failed installation*/ + ResetCluster(ctx context.Context, params *ResetClusterParams) (*ResetClusterAccepted, error) /* SetDebugStep sets a single shot debug step that will be sent next time the host agent will ask for a command*/ SetDebugStep(ctx context.Context, params *SetDebugStepParams) (*SetDebugStepNoContent, error) @@ -78,6 +93,9 @@ type API interface { /* UpdateHostInstallProgress updates installation progress*/ UpdateHostInstallProgress(ctx context.Context, params *UpdateHostInstallProgressParams) (*UpdateHostInstallProgressOK, error) + /* + UploadClusterIngressCert transfers the ingress certificate for the cluster*/ + UploadClusterIngressCert(ctx context.Context, params *UploadClusterIngressCertParams) (*UploadClusterIngressCertCreated, error) } // New creates a new installer API client. @@ -98,6 +116,54 @@ type Client struct { authInfo runtime.ClientAuthInfoWriter } +/* +CancelInstallation cancels an ongoing installation +*/ +func (a *Client) CancelInstallation(ctx context.Context, params *CancelInstallationParams) (*CancelInstallationAccepted, error) { + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CancelInstallation", + Method: "POST", + PathPattern: "/clusters/{cluster_id}/actions/cancel", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CancelInstallationReader{formats: a.formats}, + Context: ctx, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*CancelInstallationAccepted), nil + +} + +/* +CompleteInstallation agents API to mark a finalizing installation as complete +*/ +func (a *Client) CompleteInstallation(ctx context.Context, params *CompleteInstallationParams) (*CompleteInstallationAccepted, error) { + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CompleteInstallation", + Method: "POST", + PathPattern: "/clusters/{cluster_id}/actions/complete_installation", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CompleteInstallationReader{formats: a.formats}, + Context: ctx, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*CompleteInstallationAccepted), nil + +} + /* DeregisterCluster deletes an open shift bare metal cluster definition */ @@ -149,7 +215,7 @@ func (a *Client) DeregisterHost(ctx context.Context, params *DeregisterHostParam /* DisableHost disables a host for inclusion in the cluster */ -func (a *Client) DisableHost(ctx context.Context, params *DisableHostParams) (*DisableHostNoContent, error) { +func (a *Client) DisableHost(ctx context.Context, params *DisableHostParams) (*DisableHostOK, error) { result, err := a.transport.Submit(&runtime.ClientOperation{ ID: "DisableHost", @@ -166,7 +232,7 @@ func (a *Client) DisableHost(ctx context.Context, params *DisableHostParams) (*D if err != nil { return nil, err } - return result.(*DisableHostNoContent), nil + return result.(*DisableHostOK), nil } @@ -218,10 +284,34 @@ func (a *Client) DownloadClusterISO(ctx context.Context, params *DownloadCluster } +/* +DownloadClusterKubeconfig downloads the kubeconfig file for this cluster +*/ +func (a *Client) DownloadClusterKubeconfig(ctx context.Context, params *DownloadClusterKubeconfigParams, writer io.Writer) (*DownloadClusterKubeconfigOK, error) { + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "DownloadClusterKubeconfig", + Method: "GET", + PathPattern: "/clusters/{cluster_id}/downloads/kubeconfig", + ProducesMediaTypes: []string{"application/octet-stream"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &DownloadClusterKubeconfigReader{formats: a.formats, writer: writer}, + Context: ctx, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*DownloadClusterKubeconfigOK), nil + +} + /* EnableHost enables a host for inclusion in the cluster */ -func (a *Client) EnableHost(ctx context.Context, params *EnableHostParams) (*EnableHostNoContent, error) { +func (a *Client) EnableHost(ctx context.Context, params *EnableHostParams) (*EnableHostOK, error) { result, err := a.transport.Submit(&runtime.ClientOperation{ ID: "EnableHost", @@ -238,7 +328,7 @@ func (a *Client) EnableHost(ctx context.Context, params *EnableHostParams) (*Ena if err != nil { return nil, err } - return result.(*EnableHostNoContent), nil + return result.(*EnableHostOK), nil } @@ -314,6 +404,30 @@ func (a *Client) GetCredentials(ctx context.Context, params *GetCredentialsParam } +/* +GetFreeAddresses retrieves the free address list for a network +*/ +func (a *Client) GetFreeAddresses(ctx context.Context, params *GetFreeAddressesParams) (*GetFreeAddressesOK, error) { + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "GetFreeAddresses", + Method: "GET", + PathPattern: "/clusters/{cluster_id}/free_addresses", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetFreeAddressesReader{formats: a.formats}, + Context: ctx, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*GetFreeAddressesOK), nil + +} + /* GetHost retrieves the details of the open shift bare metal host */ @@ -506,6 +620,30 @@ func (a *Client) RegisterHost(ctx context.Context, params *RegisterHostParams) ( } +/* +ResetCluster resets a failed installation +*/ +func (a *Client) ResetCluster(ctx context.Context, params *ResetClusterParams) (*ResetClusterAccepted, error) { + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ResetCluster", + Method: "POST", + PathPattern: "/clusters/{cluster_id}/actions/reset", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ResetClusterReader{formats: a.formats}, + Context: ctx, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ResetClusterAccepted), nil + +} + /* SetDebugStep sets a single shot debug step that will be sent next time the host agent will ask for a command */ @@ -562,7 +700,7 @@ func (a *Client) UpdateHostInstallProgress(ctx context.Context, params *UpdateHo result, err := a.transport.Submit(&runtime.ClientOperation{ ID: "UpdateHostInstallProgress", Method: "PUT", - PathPattern: "/clusters/{clusterId}/hosts/{hostId}/progress", + PathPattern: "/clusters/{cluster_id}/hosts/{host_id}/progress", ProducesMediaTypes: []string{"application/json"}, ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, @@ -577,3 +715,27 @@ func (a *Client) UpdateHostInstallProgress(ctx context.Context, params *UpdateHo return result.(*UpdateHostInstallProgressOK), nil } + +/* +UploadClusterIngressCert transfers the ingress certificate for the cluster +*/ +func (a *Client) UploadClusterIngressCert(ctx context.Context, params *UploadClusterIngressCertParams) (*UploadClusterIngressCertCreated, error) { + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "UploadClusterIngressCert", + Method: "POST", + PathPattern: "/clusters/{cluster_id}/uploads/ingress-cert", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &UploadClusterIngressCertReader{formats: a.formats}, + Context: ctx, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*UploadClusterIngressCertCreated), nil + +} diff --git a/client/installer/mock_API.go b/client/installer/mock_API.go deleted file mode 100644 index 45f48601a..000000000 --- a/client/installer/mock_API.go +++ /dev/null @@ -1,475 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package installer - -import ( - context "context" - io "io" - - mock "github.com/stretchr/testify/mock" -) - -// MockAPI is an autogenerated mock type for the API type -type MockAPI struct { - mock.Mock -} - -// DeregisterCluster provides a mock function with given fields: ctx, params -func (_m *MockAPI) DeregisterCluster(ctx context.Context, params *DeregisterClusterParams) (*DeregisterClusterNoContent, error) { - ret := _m.Called(ctx, params) - - var r0 *DeregisterClusterNoContent - if rf, ok := ret.Get(0).(func(context.Context, *DeregisterClusterParams) *DeregisterClusterNoContent); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*DeregisterClusterNoContent) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *DeregisterClusterParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeregisterHost provides a mock function with given fields: ctx, params -func (_m *MockAPI) DeregisterHost(ctx context.Context, params *DeregisterHostParams) (*DeregisterHostNoContent, error) { - ret := _m.Called(ctx, params) - - var r0 *DeregisterHostNoContent - if rf, ok := ret.Get(0).(func(context.Context, *DeregisterHostParams) *DeregisterHostNoContent); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*DeregisterHostNoContent) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *DeregisterHostParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DisableHost provides a mock function with given fields: ctx, params -func (_m *MockAPI) DisableHost(ctx context.Context, params *DisableHostParams) (*DisableHostNoContent, error) { - ret := _m.Called(ctx, params) - - var r0 *DisableHostNoContent - if rf, ok := ret.Get(0).(func(context.Context, *DisableHostParams) *DisableHostNoContent); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*DisableHostNoContent) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *DisableHostParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DownloadClusterFiles provides a mock function with given fields: ctx, params, writer -func (_m *MockAPI) DownloadClusterFiles(ctx context.Context, params *DownloadClusterFilesParams, writer io.Writer) (*DownloadClusterFilesOK, error) { - ret := _m.Called(ctx, params, writer) - - var r0 *DownloadClusterFilesOK - if rf, ok := ret.Get(0).(func(context.Context, *DownloadClusterFilesParams, io.Writer) *DownloadClusterFilesOK); ok { - r0 = rf(ctx, params, writer) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*DownloadClusterFilesOK) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *DownloadClusterFilesParams, io.Writer) error); ok { - r1 = rf(ctx, params, writer) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DownloadClusterISO provides a mock function with given fields: ctx, params, writer -func (_m *MockAPI) DownloadClusterISO(ctx context.Context, params *DownloadClusterISOParams, writer io.Writer) (*DownloadClusterISOOK, error) { - ret := _m.Called(ctx, params, writer) - - var r0 *DownloadClusterISOOK - if rf, ok := ret.Get(0).(func(context.Context, *DownloadClusterISOParams, io.Writer) *DownloadClusterISOOK); ok { - r0 = rf(ctx, params, writer) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*DownloadClusterISOOK) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *DownloadClusterISOParams, io.Writer) error); ok { - r1 = rf(ctx, params, writer) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EnableHost provides a mock function with given fields: ctx, params -func (_m *MockAPI) EnableHost(ctx context.Context, params *EnableHostParams) (*EnableHostNoContent, error) { - ret := _m.Called(ctx, params) - - var r0 *EnableHostNoContent - if rf, ok := ret.Get(0).(func(context.Context, *EnableHostParams) *EnableHostNoContent); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*EnableHostNoContent) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *EnableHostParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GenerateClusterISO provides a mock function with given fields: ctx, params -func (_m *MockAPI) GenerateClusterISO(ctx context.Context, params *GenerateClusterISOParams) (*GenerateClusterISOCreated, error) { - ret := _m.Called(ctx, params) - - var r0 *GenerateClusterISOCreated - if rf, ok := ret.Get(0).(func(context.Context, *GenerateClusterISOParams) *GenerateClusterISOCreated); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*GenerateClusterISOCreated) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *GenerateClusterISOParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetCluster provides a mock function with given fields: ctx, params -func (_m *MockAPI) GetCluster(ctx context.Context, params *GetClusterParams) (*GetClusterOK, error) { - ret := _m.Called(ctx, params) - - var r0 *GetClusterOK - if rf, ok := ret.Get(0).(func(context.Context, *GetClusterParams) *GetClusterOK); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*GetClusterOK) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *GetClusterParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetCredentials provides a mock function with given fields: ctx, params -func (_m *MockAPI) GetCredentials(ctx context.Context, params *GetCredentialsParams) (*GetCredentialsOK, error) { - ret := _m.Called(ctx, params) - - var r0 *GetCredentialsOK - if rf, ok := ret.Get(0).(func(context.Context, *GetCredentialsParams) *GetCredentialsOK); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*GetCredentialsOK) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *GetCredentialsParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetHost provides a mock function with given fields: ctx, params -func (_m *MockAPI) GetHost(ctx context.Context, params *GetHostParams) (*GetHostOK, error) { - ret := _m.Called(ctx, params) - - var r0 *GetHostOK - if rf, ok := ret.Get(0).(func(context.Context, *GetHostParams) *GetHostOK); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*GetHostOK) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *GetHostParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetNextSteps provides a mock function with given fields: ctx, params -func (_m *MockAPI) GetNextSteps(ctx context.Context, params *GetNextStepsParams) (*GetNextStepsOK, error) { - ret := _m.Called(ctx, params) - - var r0 *GetNextStepsOK - if rf, ok := ret.Get(0).(func(context.Context, *GetNextStepsParams) *GetNextStepsOK); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*GetNextStepsOK) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *GetNextStepsParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// InstallCluster provides a mock function with given fields: ctx, params -func (_m *MockAPI) InstallCluster(ctx context.Context, params *InstallClusterParams) (*InstallClusterAccepted, error) { - ret := _m.Called(ctx, params) - - var r0 *InstallClusterAccepted - if rf, ok := ret.Get(0).(func(context.Context, *InstallClusterParams) *InstallClusterAccepted); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*InstallClusterAccepted) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *InstallClusterParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListClusters provides a mock function with given fields: ctx, params -func (_m *MockAPI) ListClusters(ctx context.Context, params *ListClustersParams) (*ListClustersOK, error) { - ret := _m.Called(ctx, params) - - var r0 *ListClustersOK - if rf, ok := ret.Get(0).(func(context.Context, *ListClustersParams) *ListClustersOK); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*ListClustersOK) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *ListClustersParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListHosts provides a mock function with given fields: ctx, params -func (_m *MockAPI) ListHosts(ctx context.Context, params *ListHostsParams) (*ListHostsOK, error) { - ret := _m.Called(ctx, params) - - var r0 *ListHostsOK - if rf, ok := ret.Get(0).(func(context.Context, *ListHostsParams) *ListHostsOK); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*ListHostsOK) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *ListHostsParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PostStepReply provides a mock function with given fields: ctx, params -func (_m *MockAPI) PostStepReply(ctx context.Context, params *PostStepReplyParams) (*PostStepReplyNoContent, error) { - ret := _m.Called(ctx, params) - - var r0 *PostStepReplyNoContent - if rf, ok := ret.Get(0).(func(context.Context, *PostStepReplyParams) *PostStepReplyNoContent); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*PostStepReplyNoContent) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *PostStepReplyParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RegisterCluster provides a mock function with given fields: ctx, params -func (_m *MockAPI) RegisterCluster(ctx context.Context, params *RegisterClusterParams) (*RegisterClusterCreated, error) { - ret := _m.Called(ctx, params) - - var r0 *RegisterClusterCreated - if rf, ok := ret.Get(0).(func(context.Context, *RegisterClusterParams) *RegisterClusterCreated); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*RegisterClusterCreated) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *RegisterClusterParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RegisterHost provides a mock function with given fields: ctx, params -func (_m *MockAPI) RegisterHost(ctx context.Context, params *RegisterHostParams) (*RegisterHostCreated, error) { - ret := _m.Called(ctx, params) - - var r0 *RegisterHostCreated - if rf, ok := ret.Get(0).(func(context.Context, *RegisterHostParams) *RegisterHostCreated); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*RegisterHostCreated) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *RegisterHostParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SetDebugStep provides a mock function with given fields: ctx, params -func (_m *MockAPI) SetDebugStep(ctx context.Context, params *SetDebugStepParams) (*SetDebugStepNoContent, error) { - ret := _m.Called(ctx, params) - - var r0 *SetDebugStepNoContent - if rf, ok := ret.Get(0).(func(context.Context, *SetDebugStepParams) *SetDebugStepNoContent); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*SetDebugStepNoContent) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *SetDebugStepParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// UpdateCluster provides a mock function with given fields: ctx, params -func (_m *MockAPI) UpdateCluster(ctx context.Context, params *UpdateClusterParams) (*UpdateClusterCreated, error) { - ret := _m.Called(ctx, params) - - var r0 *UpdateClusterCreated - if rf, ok := ret.Get(0).(func(context.Context, *UpdateClusterParams) *UpdateClusterCreated); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*UpdateClusterCreated) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *UpdateClusterParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// UpdateHostInstallProgress provides a mock function with given fields: ctx, params -func (_m *MockAPI) UpdateHostInstallProgress(ctx context.Context, params *UpdateHostInstallProgressParams) (*UpdateHostInstallProgressOK, error) { - ret := _m.Called(ctx, params) - - var r0 *UpdateHostInstallProgressOK - if rf, ok := ret.Get(0).(func(context.Context, *UpdateHostInstallProgressParams) *UpdateHostInstallProgressOK); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*UpdateHostInstallProgressOK) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *UpdateHostInstallProgressParams) error); ok { - r1 = rf(ctx, params) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/client/installer/register_host_responses.go b/client/installer/register_host_responses.go index 3b32e54fa..8fcfeddc7 100644 --- a/client/installer/register_host_responses.go +++ b/client/installer/register_host_responses.go @@ -35,6 +35,18 @@ func (o *RegisterHostReader) ReadResponse(response runtime.ClientResponse, consu return nil, err } return nil, result + case 403: + result := NewRegisterHostForbidden() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewRegisterHostNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result case 500: result := NewRegisterHostInternalServerError() if err := result.readResponse(response, consumer, o.formats); err != nil { @@ -113,6 +125,72 @@ func (o *RegisterHostBadRequest) readResponse(response runtime.ClientResponse, c return nil } +// NewRegisterHostForbidden creates a RegisterHostForbidden with default headers values +func NewRegisterHostForbidden() *RegisterHostForbidden { + return &RegisterHostForbidden{} +} + +/*RegisterHostForbidden handles this case with default header values. + +Error. +*/ +type RegisterHostForbidden struct { + Payload *models.Error +} + +func (o *RegisterHostForbidden) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/hosts][%d] registerHostForbidden %+v", 403, o.Payload) +} + +func (o *RegisterHostForbidden) GetPayload() *models.Error { + return o.Payload +} + +func (o *RegisterHostForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewRegisterHostNotFound creates a RegisterHostNotFound with default headers values +func NewRegisterHostNotFound() *RegisterHostNotFound { + return &RegisterHostNotFound{} +} + +/*RegisterHostNotFound handles this case with default header values. + +Error. +*/ +type RegisterHostNotFound struct { + Payload *models.Error +} + +func (o *RegisterHostNotFound) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/hosts][%d] registerHostNotFound %+v", 404, o.Payload) +} + +func (o *RegisterHostNotFound) GetPayload() *models.Error { + return o.Payload +} + +func (o *RegisterHostNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + // NewRegisterHostInternalServerError creates a RegisterHostInternalServerError with default headers values func NewRegisterHostInternalServerError() *RegisterHostInternalServerError { return &RegisterHostInternalServerError{} diff --git a/client/installer/reset_cluster_parameters.go b/client/installer/reset_cluster_parameters.go new file mode 100644 index 000000000..30388404b --- /dev/null +++ b/client/installer/reset_cluster_parameters.go @@ -0,0 +1,132 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewResetClusterParams creates a new ResetClusterParams object +// with the default values initialized. +func NewResetClusterParams() *ResetClusterParams { + var () + return &ResetClusterParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewResetClusterParamsWithTimeout creates a new ResetClusterParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewResetClusterParamsWithTimeout(timeout time.Duration) *ResetClusterParams { + var () + return &ResetClusterParams{ + + timeout: timeout, + } +} + +// NewResetClusterParamsWithContext creates a new ResetClusterParams object +// with the default values initialized, and the ability to set a context for a request +func NewResetClusterParamsWithContext(ctx context.Context) *ResetClusterParams { + var () + return &ResetClusterParams{ + + Context: ctx, + } +} + +// NewResetClusterParamsWithHTTPClient creates a new ResetClusterParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewResetClusterParamsWithHTTPClient(client *http.Client) *ResetClusterParams { + var () + return &ResetClusterParams{ + HTTPClient: client, + } +} + +/*ResetClusterParams contains all the parameters to send to the API endpoint +for the reset cluster operation typically these are written to a http.Request +*/ +type ResetClusterParams struct { + + /*ClusterID*/ + ClusterID strfmt.UUID + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the reset cluster params +func (o *ResetClusterParams) WithTimeout(timeout time.Duration) *ResetClusterParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the reset cluster params +func (o *ResetClusterParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the reset cluster params +func (o *ResetClusterParams) WithContext(ctx context.Context) *ResetClusterParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the reset cluster params +func (o *ResetClusterParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the reset cluster params +func (o *ResetClusterParams) WithHTTPClient(client *http.Client) *ResetClusterParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the reset cluster params +func (o *ResetClusterParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClusterID adds the clusterID to the reset cluster params +func (o *ResetClusterParams) WithClusterID(clusterID strfmt.UUID) *ResetClusterParams { + o.SetClusterID(clusterID) + return o +} + +// SetClusterID adds the clusterId to the reset cluster params +func (o *ResetClusterParams) SetClusterID(clusterID strfmt.UUID) { + o.ClusterID = clusterID +} + +// WriteToRequest writes these params to a swagger request +func (o *ResetClusterParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cluster_id + if err := r.SetPathParam("cluster_id", o.ClusterID.String()); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/installer/reset_cluster_responses.go b/client/installer/reset_cluster_responses.go new file mode 100644 index 000000000..8e21466ad --- /dev/null +++ b/client/installer/reset_cluster_responses.go @@ -0,0 +1,186 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/filanov/bm-inventory/models" +) + +// ResetClusterReader is a Reader for the ResetCluster structure. +type ResetClusterReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ResetClusterReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 202: + result := NewResetClusterAccepted() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 404: + result := NewResetClusterNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 409: + result := NewResetClusterConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewResetClusterInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewResetClusterAccepted creates a ResetClusterAccepted with default headers values +func NewResetClusterAccepted() *ResetClusterAccepted { + return &ResetClusterAccepted{} +} + +/*ResetClusterAccepted handles this case with default header values. + +Success. +*/ +type ResetClusterAccepted struct { + Payload *models.Cluster +} + +func (o *ResetClusterAccepted) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/actions/reset][%d] resetClusterAccepted %+v", 202, o.Payload) +} + +func (o *ResetClusterAccepted) GetPayload() *models.Cluster { + return o.Payload +} + +func (o *ResetClusterAccepted) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Cluster) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewResetClusterNotFound creates a ResetClusterNotFound with default headers values +func NewResetClusterNotFound() *ResetClusterNotFound { + return &ResetClusterNotFound{} +} + +/*ResetClusterNotFound handles this case with default header values. + +Error. +*/ +type ResetClusterNotFound struct { + Payload *models.Error +} + +func (o *ResetClusterNotFound) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/actions/reset][%d] resetClusterNotFound %+v", 404, o.Payload) +} + +func (o *ResetClusterNotFound) GetPayload() *models.Error { + return o.Payload +} + +func (o *ResetClusterNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewResetClusterConflict creates a ResetClusterConflict with default headers values +func NewResetClusterConflict() *ResetClusterConflict { + return &ResetClusterConflict{} +} + +/*ResetClusterConflict handles this case with default header values. + +Error. +*/ +type ResetClusterConflict struct { + Payload *models.Error +} + +func (o *ResetClusterConflict) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/actions/reset][%d] resetClusterConflict %+v", 409, o.Payload) +} + +func (o *ResetClusterConflict) GetPayload() *models.Error { + return o.Payload +} + +func (o *ResetClusterConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewResetClusterInternalServerError creates a ResetClusterInternalServerError with default headers values +func NewResetClusterInternalServerError() *ResetClusterInternalServerError { + return &ResetClusterInternalServerError{} +} + +/*ResetClusterInternalServerError handles this case with default header values. + +Error. +*/ +type ResetClusterInternalServerError struct { + Payload *models.Error +} + +func (o *ResetClusterInternalServerError) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/actions/reset][%d] resetClusterInternalServerError %+v", 500, o.Payload) +} + +func (o *ResetClusterInternalServerError) GetPayload() *models.Error { + return o.Payload +} + +func (o *ResetClusterInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/client/installer/update_host_install_progress_parameters.go b/client/installer/update_host_install_progress_parameters.go index dcc33d025..abaad4782 100644 --- a/client/installer/update_host_install_progress_parameters.go +++ b/client/installer/update_host_install_progress_parameters.go @@ -67,11 +67,11 @@ type UpdateHostInstallProgressParams struct { */ ClusterID strfmt.UUID - /*HostInstallProgressParams + /*HostProgress New progress value */ - HostInstallProgressParams models.HostInstallProgressParams + HostProgress *models.HostProgress /*HostID The ID of the host to retrieve @@ -127,15 +127,15 @@ func (o *UpdateHostInstallProgressParams) SetClusterID(clusterID strfmt.UUID) { o.ClusterID = clusterID } -// WithHostInstallProgressParams adds the hostInstallProgressParams to the update host install progress params -func (o *UpdateHostInstallProgressParams) WithHostInstallProgressParams(hostInstallProgressParams models.HostInstallProgressParams) *UpdateHostInstallProgressParams { - o.SetHostInstallProgressParams(hostInstallProgressParams) +// WithHostProgress adds the hostProgress to the update host install progress params +func (o *UpdateHostInstallProgressParams) WithHostProgress(hostProgress *models.HostProgress) *UpdateHostInstallProgressParams { + o.SetHostProgress(hostProgress) return o } -// SetHostInstallProgressParams adds the hostInstallProgressParams to the update host install progress params -func (o *UpdateHostInstallProgressParams) SetHostInstallProgressParams(hostInstallProgressParams models.HostInstallProgressParams) { - o.HostInstallProgressParams = hostInstallProgressParams +// SetHostProgress adds the hostProgress to the update host install progress params +func (o *UpdateHostInstallProgressParams) SetHostProgress(hostProgress *models.HostProgress) { + o.HostProgress = hostProgress } // WithHostID adds the hostID to the update host install progress params @@ -157,17 +157,19 @@ func (o *UpdateHostInstallProgressParams) WriteToRequest(r runtime.ClientRequest } var res []error - // path param clusterId - if err := r.SetPathParam("clusterId", o.ClusterID.String()); err != nil { + // path param cluster_id + if err := r.SetPathParam("cluster_id", o.ClusterID.String()); err != nil { return err } - if err := r.SetBodyParam(o.HostInstallProgressParams); err != nil { - return err + if o.HostProgress != nil { + if err := r.SetBodyParam(o.HostProgress); err != nil { + return err + } } - // path param hostId - if err := r.SetPathParam("hostId", o.HostID.String()); err != nil { + // path param host_id + if err := r.SetPathParam("host_id", o.HostID.String()); err != nil { return err } diff --git a/client/installer/update_host_install_progress_responses.go b/client/installer/update_host_install_progress_responses.go index 8036af470..bead39dea 100644 --- a/client/installer/update_host_install_progress_responses.go +++ b/client/installer/update_host_install_progress_responses.go @@ -7,9 +7,12 @@ package installer import ( "fmt" + "io" "github.com/go-openapi/runtime" "github.com/go-openapi/strfmt" + + "github.com/filanov/bm-inventory/models" ) // UpdateHostInstallProgressReader is a Reader for the UpdateHostInstallProgress structure. @@ -26,6 +29,18 @@ func (o *UpdateHostInstallProgressReader) ReadResponse(response runtime.ClientRe return nil, err } return result, nil + case 404: + result := NewUpdateHostInstallProgressNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewUpdateHostInstallProgressInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result default: return nil, runtime.NewAPIError("unknown error", response, response.Code()) @@ -45,10 +60,76 @@ type UpdateHostInstallProgressOK struct { } func (o *UpdateHostInstallProgressOK) Error() string { - return fmt.Sprintf("[PUT /clusters/{clusterId}/hosts/{hostId}/progress][%d] updateHostInstallProgressOK ", 200) + return fmt.Sprintf("[PUT /clusters/{cluster_id}/hosts/{host_id}/progress][%d] updateHostInstallProgressOK ", 200) } func (o *UpdateHostInstallProgressOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { return nil } + +// NewUpdateHostInstallProgressNotFound creates a UpdateHostInstallProgressNotFound with default headers values +func NewUpdateHostInstallProgressNotFound() *UpdateHostInstallProgressNotFound { + return &UpdateHostInstallProgressNotFound{} +} + +/*UpdateHostInstallProgressNotFound handles this case with default header values. + +Error. +*/ +type UpdateHostInstallProgressNotFound struct { + Payload *models.Error +} + +func (o *UpdateHostInstallProgressNotFound) Error() string { + return fmt.Sprintf("[PUT /clusters/{cluster_id}/hosts/{host_id}/progress][%d] updateHostInstallProgressNotFound %+v", 404, o.Payload) +} + +func (o *UpdateHostInstallProgressNotFound) GetPayload() *models.Error { + return o.Payload +} + +func (o *UpdateHostInstallProgressNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewUpdateHostInstallProgressInternalServerError creates a UpdateHostInstallProgressInternalServerError with default headers values +func NewUpdateHostInstallProgressInternalServerError() *UpdateHostInstallProgressInternalServerError { + return &UpdateHostInstallProgressInternalServerError{} +} + +/*UpdateHostInstallProgressInternalServerError handles this case with default header values. + +Error. +*/ +type UpdateHostInstallProgressInternalServerError struct { + Payload *models.Error +} + +func (o *UpdateHostInstallProgressInternalServerError) Error() string { + return fmt.Sprintf("[PUT /clusters/{cluster_id}/hosts/{host_id}/progress][%d] updateHostInstallProgressInternalServerError %+v", 500, o.Payload) +} + +func (o *UpdateHostInstallProgressInternalServerError) GetPayload() *models.Error { + return o.Payload +} + +func (o *UpdateHostInstallProgressInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/client/installer/upload_cluster_ingress_cert_parameters.go b/client/installer/upload_cluster_ingress_cert_parameters.go new file mode 100644 index 000000000..3d4e149cb --- /dev/null +++ b/client/installer/upload_cluster_ingress_cert_parameters.go @@ -0,0 +1,151 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/filanov/bm-inventory/models" +) + +// NewUploadClusterIngressCertParams creates a new UploadClusterIngressCertParams object +// with the default values initialized. +func NewUploadClusterIngressCertParams() *UploadClusterIngressCertParams { + var () + return &UploadClusterIngressCertParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewUploadClusterIngressCertParamsWithTimeout creates a new UploadClusterIngressCertParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewUploadClusterIngressCertParamsWithTimeout(timeout time.Duration) *UploadClusterIngressCertParams { + var () + return &UploadClusterIngressCertParams{ + + timeout: timeout, + } +} + +// NewUploadClusterIngressCertParamsWithContext creates a new UploadClusterIngressCertParams object +// with the default values initialized, and the ability to set a context for a request +func NewUploadClusterIngressCertParamsWithContext(ctx context.Context) *UploadClusterIngressCertParams { + var () + return &UploadClusterIngressCertParams{ + + Context: ctx, + } +} + +// NewUploadClusterIngressCertParamsWithHTTPClient creates a new UploadClusterIngressCertParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewUploadClusterIngressCertParamsWithHTTPClient(client *http.Client) *UploadClusterIngressCertParams { + var () + return &UploadClusterIngressCertParams{ + HTTPClient: client, + } +} + +/*UploadClusterIngressCertParams contains all the parameters to send to the API endpoint +for the upload cluster ingress cert operation typically these are written to a http.Request +*/ +type UploadClusterIngressCertParams struct { + + /*ClusterID*/ + ClusterID strfmt.UUID + /*IngressCertParams*/ + IngressCertParams models.IngressCertParams + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the upload cluster ingress cert params +func (o *UploadClusterIngressCertParams) WithTimeout(timeout time.Duration) *UploadClusterIngressCertParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the upload cluster ingress cert params +func (o *UploadClusterIngressCertParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the upload cluster ingress cert params +func (o *UploadClusterIngressCertParams) WithContext(ctx context.Context) *UploadClusterIngressCertParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the upload cluster ingress cert params +func (o *UploadClusterIngressCertParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the upload cluster ingress cert params +func (o *UploadClusterIngressCertParams) WithHTTPClient(client *http.Client) *UploadClusterIngressCertParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the upload cluster ingress cert params +func (o *UploadClusterIngressCertParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClusterID adds the clusterID to the upload cluster ingress cert params +func (o *UploadClusterIngressCertParams) WithClusterID(clusterID strfmt.UUID) *UploadClusterIngressCertParams { + o.SetClusterID(clusterID) + return o +} + +// SetClusterID adds the clusterId to the upload cluster ingress cert params +func (o *UploadClusterIngressCertParams) SetClusterID(clusterID strfmt.UUID) { + o.ClusterID = clusterID +} + +// WithIngressCertParams adds the ingressCertParams to the upload cluster ingress cert params +func (o *UploadClusterIngressCertParams) WithIngressCertParams(ingressCertParams models.IngressCertParams) *UploadClusterIngressCertParams { + o.SetIngressCertParams(ingressCertParams) + return o +} + +// SetIngressCertParams adds the ingressCertParams to the upload cluster ingress cert params +func (o *UploadClusterIngressCertParams) SetIngressCertParams(ingressCertParams models.IngressCertParams) { + o.IngressCertParams = ingressCertParams +} + +// WriteToRequest writes these params to a swagger request +func (o *UploadClusterIngressCertParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param cluster_id + if err := r.SetPathParam("cluster_id", o.ClusterID.String()); err != nil { + return err + } + + if err := r.SetBodyParam(o.IngressCertParams); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/installer/upload_cluster_ingress_cert_responses.go b/client/installer/upload_cluster_ingress_cert_responses.go new file mode 100644 index 000000000..ef72ac6d5 --- /dev/null +++ b/client/installer/upload_cluster_ingress_cert_responses.go @@ -0,0 +1,174 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/filanov/bm-inventory/models" +) + +// UploadClusterIngressCertReader is a Reader for the UploadClusterIngressCert structure. +type UploadClusterIngressCertReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *UploadClusterIngressCertReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 201: + result := NewUploadClusterIngressCertCreated() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewUploadClusterIngressCertBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewUploadClusterIngressCertNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewUploadClusterIngressCertInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewUploadClusterIngressCertCreated creates a UploadClusterIngressCertCreated with default headers values +func NewUploadClusterIngressCertCreated() *UploadClusterIngressCertCreated { + return &UploadClusterIngressCertCreated{} +} + +/*UploadClusterIngressCertCreated handles this case with default header values. + +Success. +*/ +type UploadClusterIngressCertCreated struct { +} + +func (o *UploadClusterIngressCertCreated) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/uploads/ingress-cert][%d] uploadClusterIngressCertCreated ", 201) +} + +func (o *UploadClusterIngressCertCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewUploadClusterIngressCertBadRequest creates a UploadClusterIngressCertBadRequest with default headers values +func NewUploadClusterIngressCertBadRequest() *UploadClusterIngressCertBadRequest { + return &UploadClusterIngressCertBadRequest{} +} + +/*UploadClusterIngressCertBadRequest handles this case with default header values. + +Error. +*/ +type UploadClusterIngressCertBadRequest struct { + Payload *models.Error +} + +func (o *UploadClusterIngressCertBadRequest) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/uploads/ingress-cert][%d] uploadClusterIngressCertBadRequest %+v", 400, o.Payload) +} + +func (o *UploadClusterIngressCertBadRequest) GetPayload() *models.Error { + return o.Payload +} + +func (o *UploadClusterIngressCertBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewUploadClusterIngressCertNotFound creates a UploadClusterIngressCertNotFound with default headers values +func NewUploadClusterIngressCertNotFound() *UploadClusterIngressCertNotFound { + return &UploadClusterIngressCertNotFound{} +} + +/*UploadClusterIngressCertNotFound handles this case with default header values. + +Error. +*/ +type UploadClusterIngressCertNotFound struct { + Payload *models.Error +} + +func (o *UploadClusterIngressCertNotFound) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/uploads/ingress-cert][%d] uploadClusterIngressCertNotFound %+v", 404, o.Payload) +} + +func (o *UploadClusterIngressCertNotFound) GetPayload() *models.Error { + return o.Payload +} + +func (o *UploadClusterIngressCertNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewUploadClusterIngressCertInternalServerError creates a UploadClusterIngressCertInternalServerError with default headers values +func NewUploadClusterIngressCertInternalServerError() *UploadClusterIngressCertInternalServerError { + return &UploadClusterIngressCertInternalServerError{} +} + +/*UploadClusterIngressCertInternalServerError handles this case with default header values. + +Error. +*/ +type UploadClusterIngressCertInternalServerError struct { + Payload *models.Error +} + +func (o *UploadClusterIngressCertInternalServerError) Error() string { + return fmt.Sprintf("[POST /clusters/{cluster_id}/uploads/ingress-cert][%d] uploadClusterIngressCertInternalServerError %+v", 500, o.Payload) +} + +func (o *UploadClusterIngressCertInternalServerError) GetPayload() *models.Error { + return o.Payload +} + +func (o *UploadClusterIngressCertInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/client/managed_domains/list_managed_domains_parameters.go b/client/managed_domains/list_managed_domains_parameters.go new file mode 100644 index 000000000..13530dbfc --- /dev/null +++ b/client/managed_domains/list_managed_domains_parameters.go @@ -0,0 +1,112 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package managed_domains + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewListManagedDomainsParams creates a new ListManagedDomainsParams object +// with the default values initialized. +func NewListManagedDomainsParams() *ListManagedDomainsParams { + + return &ListManagedDomainsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewListManagedDomainsParamsWithTimeout creates a new ListManagedDomainsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewListManagedDomainsParamsWithTimeout(timeout time.Duration) *ListManagedDomainsParams { + + return &ListManagedDomainsParams{ + + timeout: timeout, + } +} + +// NewListManagedDomainsParamsWithContext creates a new ListManagedDomainsParams object +// with the default values initialized, and the ability to set a context for a request +func NewListManagedDomainsParamsWithContext(ctx context.Context) *ListManagedDomainsParams { + + return &ListManagedDomainsParams{ + + Context: ctx, + } +} + +// NewListManagedDomainsParamsWithHTTPClient creates a new ListManagedDomainsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewListManagedDomainsParamsWithHTTPClient(client *http.Client) *ListManagedDomainsParams { + + return &ListManagedDomainsParams{ + HTTPClient: client, + } +} + +/*ListManagedDomainsParams contains all the parameters to send to the API endpoint +for the list managed domains operation typically these are written to a http.Request +*/ +type ListManagedDomainsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the list managed domains params +func (o *ListManagedDomainsParams) WithTimeout(timeout time.Duration) *ListManagedDomainsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the list managed domains params +func (o *ListManagedDomainsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the list managed domains params +func (o *ListManagedDomainsParams) WithContext(ctx context.Context) *ListManagedDomainsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the list managed domains params +func (o *ListManagedDomainsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the list managed domains params +func (o *ListManagedDomainsParams) WithHTTPClient(client *http.Client) *ListManagedDomainsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the list managed domains params +func (o *ListManagedDomainsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ListManagedDomainsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/managed_domains/list_managed_domains_responses.go b/client/managed_domains/list_managed_domains_responses.go new file mode 100644 index 000000000..6e8a61a0f --- /dev/null +++ b/client/managed_domains/list_managed_domains_responses.go @@ -0,0 +1,106 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package managed_domains + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/filanov/bm-inventory/models" +) + +// ListManagedDomainsReader is a Reader for the ListManagedDomains structure. +type ListManagedDomainsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ListManagedDomainsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewListManagedDomainsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 500: + result := NewListManagedDomainsInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewListManagedDomainsOK creates a ListManagedDomainsOK with default headers values +func NewListManagedDomainsOK() *ListManagedDomainsOK { + return &ListManagedDomainsOK{} +} + +/*ListManagedDomainsOK handles this case with default header values. + +Success. +*/ +type ListManagedDomainsOK struct { + Payload models.ListManagedDomains +} + +func (o *ListManagedDomainsOK) Error() string { + return fmt.Sprintf("[GET /domains][%d] listManagedDomainsOK %+v", 200, o.Payload) +} + +func (o *ListManagedDomainsOK) GetPayload() models.ListManagedDomains { + return o.Payload +} + +func (o *ListManagedDomainsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewListManagedDomainsInternalServerError creates a ListManagedDomainsInternalServerError with default headers values +func NewListManagedDomainsInternalServerError() *ListManagedDomainsInternalServerError { + return &ListManagedDomainsInternalServerError{} +} + +/*ListManagedDomainsInternalServerError handles this case with default header values. + +Error. +*/ +type ListManagedDomainsInternalServerError struct { + Payload *models.Error +} + +func (o *ListManagedDomainsInternalServerError) Error() string { + return fmt.Sprintf("[GET /domains][%d] listManagedDomainsInternalServerError %+v", 500, o.Payload) +} + +func (o *ListManagedDomainsInternalServerError) GetPayload() *models.Error { + return o.Payload +} + +func (o *ListManagedDomainsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/client/managed_domains/managed_domains_client.go b/client/managed_domains/managed_domains_client.go new file mode 100644 index 000000000..5b14d9d78 --- /dev/null +++ b/client/managed_domains/managed_domains_client.go @@ -0,0 +1,65 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package managed_domains + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +//go:generate mockery -name API -inpkg + +// API is the interface of the managed domains client +type API interface { + /* + ListManagedDomains lists of managed DNS domains*/ + ListManagedDomains(ctx context.Context, params *ListManagedDomainsParams) (*ListManagedDomainsOK, error) +} + +// New creates a new managed domains API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry, authInfo runtime.ClientAuthInfoWriter) *Client { + return &Client{ + transport: transport, + formats: formats, + authInfo: authInfo, + } +} + +/* +Client for managed domains API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry + authInfo runtime.ClientAuthInfoWriter +} + +/* +ListManagedDomains lists of managed DNS domains +*/ +func (a *Client) ListManagedDomains(ctx context.Context, params *ListManagedDomainsParams) (*ListManagedDomainsOK, error) { + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ListManagedDomains", + Method: "GET", + PathPattern: "/domains", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ListManagedDomainsReader{formats: a.formats}, + Context: ctx, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ListManagedDomainsOK), nil + +} diff --git a/client/versions/list_component_versions_parameters.go b/client/versions/list_component_versions_parameters.go new file mode 100644 index 000000000..5c7083435 --- /dev/null +++ b/client/versions/list_component_versions_parameters.go @@ -0,0 +1,112 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package versions + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewListComponentVersionsParams creates a new ListComponentVersionsParams object +// with the default values initialized. +func NewListComponentVersionsParams() *ListComponentVersionsParams { + + return &ListComponentVersionsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewListComponentVersionsParamsWithTimeout creates a new ListComponentVersionsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewListComponentVersionsParamsWithTimeout(timeout time.Duration) *ListComponentVersionsParams { + + return &ListComponentVersionsParams{ + + timeout: timeout, + } +} + +// NewListComponentVersionsParamsWithContext creates a new ListComponentVersionsParams object +// with the default values initialized, and the ability to set a context for a request +func NewListComponentVersionsParamsWithContext(ctx context.Context) *ListComponentVersionsParams { + + return &ListComponentVersionsParams{ + + Context: ctx, + } +} + +// NewListComponentVersionsParamsWithHTTPClient creates a new ListComponentVersionsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewListComponentVersionsParamsWithHTTPClient(client *http.Client) *ListComponentVersionsParams { + + return &ListComponentVersionsParams{ + HTTPClient: client, + } +} + +/*ListComponentVersionsParams contains all the parameters to send to the API endpoint +for the list component versions operation typically these are written to a http.Request +*/ +type ListComponentVersionsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the list component versions params +func (o *ListComponentVersionsParams) WithTimeout(timeout time.Duration) *ListComponentVersionsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the list component versions params +func (o *ListComponentVersionsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the list component versions params +func (o *ListComponentVersionsParams) WithContext(ctx context.Context) *ListComponentVersionsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the list component versions params +func (o *ListComponentVersionsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the list component versions params +func (o *ListComponentVersionsParams) WithHTTPClient(client *http.Client) *ListComponentVersionsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the list component versions params +func (o *ListComponentVersionsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ListComponentVersionsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/client/versions/list_component_versions_responses.go b/client/versions/list_component_versions_responses.go new file mode 100644 index 000000000..50ebb10e1 --- /dev/null +++ b/client/versions/list_component_versions_responses.go @@ -0,0 +1,69 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package versions + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/filanov/bm-inventory/models" +) + +// ListComponentVersionsReader is a Reader for the ListComponentVersions structure. +type ListComponentVersionsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ListComponentVersionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewListComponentVersionsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + + default: + return nil, runtime.NewAPIError("unknown error", response, response.Code()) + } +} + +// NewListComponentVersionsOK creates a ListComponentVersionsOK with default headers values +func NewListComponentVersionsOK() *ListComponentVersionsOK { + return &ListComponentVersionsOK{} +} + +/*ListComponentVersionsOK handles this case with default header values. + +Success. +*/ +type ListComponentVersionsOK struct { + Payload *models.ListVersions +} + +func (o *ListComponentVersionsOK) Error() string { + return fmt.Sprintf("[GET /component_versions][%d] listComponentVersionsOK %+v", 200, o.Payload) +} + +func (o *ListComponentVersionsOK) GetPayload() *models.ListVersions { + return o.Payload +} + +func (o *ListComponentVersionsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ListVersions) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/client/versions/versions_client.go b/client/versions/versions_client.go new file mode 100644 index 000000000..0f9f1f02f --- /dev/null +++ b/client/versions/versions_client.go @@ -0,0 +1,65 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package versions + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/runtime" + + strfmt "github.com/go-openapi/strfmt" +) + +//go:generate mockery -name API -inpkg + +// API is the interface of the versions client +type API interface { + /* + ListComponentVersions lists of componenets versions*/ + ListComponentVersions(ctx context.Context, params *ListComponentVersionsParams) (*ListComponentVersionsOK, error) +} + +// New creates a new versions API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry, authInfo runtime.ClientAuthInfoWriter) *Client { + return &Client{ + transport: transport, + formats: formats, + authInfo: authInfo, + } +} + +/* +Client for versions API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry + authInfo runtime.ClientAuthInfoWriter +} + +/* +ListComponentVersions lists of componenets versions +*/ +func (a *Client) ListComponentVersions(ctx context.Context, params *ListComponentVersionsParams) (*ListComponentVersionsOK, error) { + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ListComponentVersions", + Method: "GET", + PathPattern: "/component_versions", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ListComponentVersionsReader{formats: a.formats}, + Context: ctx, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + return result.(*ListComponentVersionsOK), nil + +} diff --git a/cmd/main.go b/cmd/main.go index 1b5804d70..48bef7c6a 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -6,26 +6,42 @@ import ( "net/http" "time" - "github.com/filanov/bm-inventory/internal/events" + "github.com/prometheus/client_golang/prometheus" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/jinzhu/gorm" - _ "github.com/jinzhu/gorm/dialects/mysql" - "github.com/kelseyhightower/envconfig" - "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/runtime" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" + "github.com/filanov/bm-inventory/internal/connectivity" + "github.com/filanov/bm-inventory/internal/domains" + "github.com/filanov/bm-inventory/internal/versions" + "github.com/filanov/bm-inventory/internal/bminventory" "github.com/filanov/bm-inventory/internal/cluster" + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/internal/imgexpirer" + "github.com/filanov/bm-inventory/internal/metrics" + + "github.com/filanov/bm-inventory/internal/events" "github.com/filanov/bm-inventory/internal/hardware" "github.com/filanov/bm-inventory/internal/host" "github.com/filanov/bm-inventory/models" + "github.com/filanov/bm-inventory/pkg/app" + "github.com/filanov/bm-inventory/pkg/auth" + "github.com/filanov/bm-inventory/pkg/db" "github.com/filanov/bm-inventory/pkg/job" "github.com/filanov/bm-inventory/pkg/requestid" + awsS3Client "github.com/filanov/bm-inventory/pkg/s3Client" + "github.com/filanov/bm-inventory/pkg/s3wrapper" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/jinzhu/gorm" + _ "github.com/jinzhu/gorm/dialects/postgres" + "github.com/kelseyhightower/envconfig" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "github.com/filanov/bm-inventory/pkg/thread" "github.com/filanov/bm-inventory/restapi" ) @@ -36,16 +52,25 @@ func init() { var Options struct { BMConfig bminventory.Config - DBHost string `envconfig:"DB_HOST" default:"mariadb"` - DBPort string `envconfig:"DB_PORT" default:"3306"` + DBConfig db.Config HWValidatorConfig hardware.ValidatorCfg JobConfig job.Config InstructionConfig host.InstructionConfig ClusterStateMonitorInterval time.Duration `envconfig:"CLUSTER_MONITOR_INTERVAL" default:"10s"` + S3Config s3wrapper.Config + HostStateMonitorInterval time.Duration `envconfig:"HOST_MONITOR_INTERVAL" default:"8s"` + Versions versions.Versions + UseK8s bool `envconfig:"USE_K8S" default:"true"` // TODO remove when jobs running deprecated + CreateS3Bucket bool `envconfig:"CREATE_S3_BUCKET" default:"false"` + ImageExpirationInterval time.Duration `envconfig:"IMAGE_EXPIRATION_INTERVAL" default:"30m"` + ImageExpirationTime time.Duration `envconfig:"IMAGE_EXPIRATION_TIME" default:"60m"` + ClusterConfig cluster.Config } func main() { log := logrus.New() + log.SetReportCaller(true) + err := envconfig.Process("myapp", &Options) if err != nil { log.Fatal(err.Error()) @@ -56,50 +81,105 @@ func main() { log.Println("Starting bm service") - db, err := gorm.Open("mysql", - fmt.Sprintf("admin:admin@tcp(%s:%s)/installer?charset=utf8&parseTime=True&loc=Local", - Options.DBHost, Options.DBPort)) + var kclient client.Client + if Options.UseK8s { - if err != nil { - log.Fatal("Fail to connect to DB, ", err) - } - defer db.Close() + if Options.CreateS3Bucket { + if err = s3wrapper.CreateBucket(&Options.S3Config); err != nil { + log.Fatal(err) + } + } + + scheme := runtime.NewScheme() + if err = clientgoscheme.AddToScheme(scheme); err != nil { + log.Fatal("Failed to add K8S scheme", err) + } + + kclient, err = client.New(config.GetConfigOrDie(), client.Options{Scheme: scheme}) + if err != nil && Options.UseK8s { + log.Fatal("failed to create client:", err) + } - scheme := runtime.NewScheme() - if err = clientgoscheme.AddToScheme(scheme); err != nil { - log.Fatal("Failed to add K8S scheme", err) + } else { + log.Println("running drone test, skipping S3") + kclient = nil } - kclient, err := client.New(config.GetConfigOrDie(), client.Options{Scheme: scheme}) + // Connect to db + dbConnectionStr := fmt.Sprintf("host=%s port=%s user=%s dbname=%s password=%s sslmode=disable", + Options.DBConfig.Host, Options.DBConfig.Port, Options.DBConfig.User, Options.DBConfig.Name, Options.DBConfig.Pass) + db, err := gorm.Open("postgres", dbConnectionStr) if err != nil { - log.Fatal("failed to create client:", err) + log.Fatal("Fail to connect to DB, ", err) } + defer db.Close() + db.DB().SetMaxIdleConns(0) + db.DB().SetMaxOpenConns(0) + db.DB().SetConnMaxLifetime(0) - if err = db.AutoMigrate(&models.Host{}, &models.Cluster{}, &events.Event{}).Error; err != nil { + if err = db.AutoMigrate(&models.Host{}, &common.Cluster{}, &events.Event{}).Error; err != nil { log.Fatal("failed to auto migrate, ", err) } + versionHandler := versions.NewHandler(Options.Versions) + domainHandler := domains.NewHandler(Options.BMConfig.BaseDNSDomains) eventsHandler := events.New(db, log.WithField("pkg", "events")) - hwValidator := hardware.NewValidator(Options.HWValidatorConfig) - instructionApi := host.NewInstructionManager(log, db, hwValidator, Options.InstructionConfig) - hostApi := host.NewManager(log.WithField("pkg", "host-state"), db, hwValidator, instructionApi) - clusterApi := cluster.NewManager(log.WithField("pkg", "cluster-state"), db, eventsHandler) + hwValidator := hardware.NewValidator(log.WithField("pkg", "validators"), Options.HWValidatorConfig) + connectivityValidator := connectivity.NewValidator(log.WithField("pkg", "validators")) + instructionApi := host.NewInstructionManager(log.WithField("pkg", "instructions"), db, hwValidator, Options.InstructionConfig, connectivityValidator) + prometheusRegistry := prometheus.DefaultRegisterer + metricsManager := metrics.NewMetricsManager(prometheusRegistry) + hostApi := host.NewManager(log.WithField("pkg", "host-state"), db, eventsHandler, hwValidator, instructionApi, &Options.HWValidatorConfig, metricsManager) + clusterApi := cluster.NewManager(Options.ClusterConfig, log.WithField("pkg", "cluster-state"), db, + eventsHandler, hostApi, metricsManager) clusterStateMonitor := thread.New( - log.WithField("pkg", "cluster-monitor"), "State Monitor", Options.ClusterStateMonitorInterval, clusterApi.ClusterMonitoring) + log.WithField("pkg", "cluster-monitor"), "Cluster State Monitor", Options.ClusterStateMonitorInterval, clusterApi.ClusterMonitoring) clusterStateMonitor.Start() defer clusterStateMonitor.Stop() + hostStateMonitor := thread.New( + log.WithField("pkg", "host-monitor"), "Host State Monitor", Options.HostStateMonitorInterval, hostApi.HostMonitoring) + hostStateMonitor.Start() + defer hostStateMonitor.Stop() + + s3Client, err := awsS3Client.NewS3Client(Options.BMConfig.S3EndpointURL, Options.BMConfig.AwsAccessKeyID, Options.BMConfig.AwsSecretAccessKey, log) + if err != nil { + log.Fatal("Failed to setup S3 client", err) + } + jobApi := job.New(log.WithField("pkg", "k8s-job-wrapper"), kclient, Options.JobConfig) - bm := bminventory.NewBareMetalInventory(db, log.WithField("pkg", "Inventory"), hostApi, clusterApi, Options.BMConfig, jobApi, eventsHandler) + + bm := bminventory.NewBareMetalInventory(db, log.WithField("pkg", "Inventory"), hostApi, clusterApi, Options.BMConfig, jobApi, eventsHandler, s3Client, metricsManager) events := events.NewApi(eventsHandler, logrus.WithField("pkg", "eventsApi")) + if Options.UseK8s { + s3WrapperClient, s3Err := s3wrapper.NewS3Client(&Options.S3Config) + if s3Err != nil { + log.Fatal("failed to create S3 client, ", err) + } + expirer := imgexpirer.NewManager(log, s3WrapperClient, Options.S3Config.S3Bucket, Options.ImageExpirationTime, eventsHandler) + imageExpirationMonitor := thread.New( + log.WithField("pkg", "image-expiration-monitor"), "Image Expiration Monitor", Options.ImageExpirationInterval, expirer.ExpirationTask) + imageExpirationMonitor.Start() + defer imageExpirationMonitor.Stop() + } else { + log.Info("Disabled image expiration monitor") + } + h, err := restapi.Handler(restapi.Config{ - InstallerAPI: bm, - EventsAPI: events, - Logger: log.Printf, + InstallerAPI: bm, + EventsAPI: events, + Logger: log.Printf, + VersionsAPI: versionHandler, + ManagedDomainsAPI: domainHandler, + InnerMiddleware: metrics.WithMatchedRoute(log.WithField("pkg", "matched-h"), prometheusRegistry), }) + h = app.WithMetricsResponderMiddleware(h) + h = app.WithHealthMiddleware(h) + // TODO: replace this with real auth + h = auth.GetUserInfoMiddleware(h) h = requestid.Middleware(h) if err != nil { log.Fatal("Failed to init rest handler,", err) diff --git a/deploy/assisted-installer-ingress-tls.yaml b/deploy/assisted-installer-ingress-tls.yaml new file mode 100644 index 000000000..7e171d320 --- /dev/null +++ b/deploy/assisted-installer-ingress-tls.yaml @@ -0,0 +1,21 @@ +kind: Ingress +apiVersion: networking.k8s.io/v1beta1 +metadata: + name: assisted-installer + namespace: REPLACE_NAMESPACE + annotations: + haproxy.router.openshift.io/timeout: 120s + ingress.kubernetes.io/ssl-redirect: "false" # for haproxy + nginx.ingress.kubernetes.io/ssl-redirect: "false" # for nginx +spec: + tls: + - hosts: + - REPLACE_HOSTNAME + secretName: bm-inventory-tls + rules: + - host: REPLACE_HOSTNAME + http: + paths: + - backend: + serviceName: bm-inventory + servicePort: 8090 \ No newline at end of file diff --git a/deploy/assisted-installer-ingress.yaml b/deploy/assisted-installer-ingress.yaml new file mode 100644 index 000000000..260011258 --- /dev/null +++ b/deploy/assisted-installer-ingress.yaml @@ -0,0 +1,15 @@ +kind: Ingress +apiVersion: networking.k8s.io/v1beta1 +metadata: + name: assisted-installer + namespace: REPLACE_NAMESPACE + annotations: + haproxy.router.openshift.io/timeout: 120s +spec: + rules: + - host: REPLACE_HOSTNAME + http: + paths: + - backend: + serviceName: bm-inventory + servicePort: 8090 diff --git a/deploy/bm-inventory-configmap.yaml b/deploy/bm-inventory-configmap.yaml index f7ef2355f..fe8e72a1e 100644 --- a/deploy/bm-inventory-configmap.yaml +++ b/deploy/bm-inventory-configmap.yaml @@ -2,10 +2,13 @@ apiVersion: v1 kind: ConfigMap metadata: name: bm-inventory-config - namespace: assisted-installer + namespace: REPLACE_NAMESPACE labels: app: bm-inventory data: INVENTORY_URL: REPLACE_URL INVENTORY_PORT: REPLACE_PORT - NAMESPACE: assisted-installer + NAMESPACE: REPLACE_NAMESPACE + BASE_DNS_DOMAINS: REPLACE_DOMAINS # example: name1:id1/provider1,name2:id2/provider2 + OPENSHIFT_INSTALL_RELEASE_IMAGE: "quay.io/openshift-release-dev/ocp-release@sha256:eab93b4591699a5a4ff50ad3517892653f04fb840127895bb3609b3cc68f98f3" + CREATE_S3_BUCKET: "true" diff --git a/deploy/bm-inventory-service.yaml b/deploy/bm-inventory-service.yaml index 63a5d6d16..ac3cb0f1d 100644 --- a/deploy/bm-inventory-service.yaml +++ b/deploy/bm-inventory-service.yaml @@ -4,10 +4,11 @@ metadata: labels: app: bm-inventory name: bm-inventory - namespace: assisted-installer + namespace: REPLACE_NAMESPACE spec: ports: - - port: 8090 + - name: bm-inventory + port: 8090 protocol: TCP targetPort: 8090 selector: diff --git a/deploy/bm-inventory.yaml b/deploy/bm-inventory.yaml index eef29c821..bfd9cac09 100644 --- a/deploy/bm-inventory.yaml +++ b/deploy/bm-inventory.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 kind: Deployment metadata: name: bm-inventory - namespace: assisted-installer + namespace: REPLACE_NAMESPACE spec: selector: matchLabels: @@ -15,15 +15,77 @@ spec: spec: containers: - name: bm-inventory + resources: + limits: + cpu: 200m + memory: 500Mi + requests: + cpu: 100m + memory: 400Mi image: REPLACE_IMAGE imagePullPolicy: Always ports: - containerPort: 8090 envFrom: - - configMapRef: - name: s3-config - configMapRef: name: bm-inventory-config env: - - name: IMAGE_BUILDER_CMD - value: "" + - name: DB_HOST + valueFrom: + secretKeyRef: + name: assisted-installer-rds + key: db.host + - name: DB_NAME + valueFrom: + secretKeyRef: + name: assisted-installer-rds + key: db.name + - name: DB_PASS + valueFrom: + secretKeyRef: + name: assisted-installer-rds + key: db.password + - name: DB_PORT + valueFrom: + secretKeyRef: + name: assisted-installer-rds + key: db.port + - name: DB_USER + valueFrom: + secretKeyRef: + name: assisted-installer-rds + key: db.user + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: aws_secret_access_key + name: assisted-installer-s3 + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: aws_access_key_id + name: assisted-installer-s3 + - name: S3_REGION + valueFrom: + secretKeyRef: + key: aws_region + name: assisted-installer-s3 + - name: S3_BUCKET + valueFrom: + secretKeyRef: + key: bucket + name: assisted-installer-s3 + - name: S3_ENDPOINT_URL + valueFrom: + secretKeyRef: + key: endpoint + name: assisted-installer-s3 + volumeMounts: + - name: route53-creds + mountPath: "/.aws" + readOnly: true + volumes: + - name: route53-creds + secret: + secretName: route53-creds + optional: true diff --git a/deploy/mariadb/mariadb-configmap.yaml b/deploy/mariadb/mariadb-configmap.yaml deleted file mode 100644 index 7df3beb28..000000000 --- a/deploy/mariadb/mariadb-configmap.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: mariadb-config - namespace: assisted-installer - labels: - app: mariadb -data: - MYSQL_DATABASE: installer - MYSQL_USER: admin - MYSQL_PASSWORD: admin - MYSQL_ROOT_PASSWORD: root \ No newline at end of file diff --git a/deploy/mariadb/mariadb-deployment.yaml b/deploy/mariadb/mariadb-deployment.yaml deleted file mode 100644 index 1567a4803..000000000 --- a/deploy/mariadb/mariadb-deployment.yaml +++ /dev/null @@ -1,47 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: mariadb - namespace: assisted-installer -spec: - selector: - matchLabels: - app: mariadb - replicas: 1 - template: - metadata: - labels: - app: mariadb - spec: - containers: - - name: mariadb - image: mariadb - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 3306 - envFrom: - - configMapRef: - name: mariadb-config - volumeMounts: - - mountPath: /var/lib/mysql - name: mariadbvol - volumes: - - name: mariadbvol - persistentVolumeClaim: - claimName: mariadb-pv-claim ---- -apiVersion: v1 -kind: Service -metadata: - name: mariadb - namespace: assisted-installer - labels: - app: mariadb -spec: - type: LoadBalancer - ports: - - port: 3306 - selector: - app: mariadb -status: - loadBalancer: {} diff --git a/deploy/monitoring/grafana/assisted-installer-grafana-dashboard.yaml b/deploy/monitoring/grafana/assisted-installer-grafana-dashboard.yaml new file mode 100644 index 000000000..a49adf1d9 --- /dev/null +++ b/deploy/monitoring/grafana/assisted-installer-grafana-dashboard.yaml @@ -0,0 +1,299 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: grafana-dashboard-assisted-installer + namespace: REPLACE_NAMESPACE +data: + assisted-installer-dashboard.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 1, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "#d44a3a", + "rgba(237, 129, 40, 0.89)", + "#73BF69" + ], + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 0, + "y": 0 + }, + "id": 2, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "up", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "0,1", + "timeFrom": null, + "timeShift": null, + "title": "Status", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "UP", + "value": "1" + }, + { + "op": "=", + "text": "DOWN", + "value": "0" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "fill": 1, + "gridPos": { + "h": 9, + "w": 15, + "x": 3, + "y": 0 + }, + "id": 4, + "legend": { + "avg": false, + "current": true, + "max": false, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": {}, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "process_cpu_seconds_total", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Seconds Total", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "gridPos": { + "h": 5, + "w": 3, + "x": 0, + "y": 4 + }, + "id": 6, + "links": [], + "options": { + "fieldOptions": { + "calcs": [ + "last" + ], + "defaults": { + "max": 100, + "min": 0, + "title": "" + }, + "mappings": [], + "override": {}, + "thresholds": [ + { + "color": "green", + "index": 0, + "value": null + }, + { + "color": "yellow", + "index": 1, + "value": 50 + }, + { + "color": "dark-red", + "index": 2, + "value": 80 + } + ], + "values": false + }, + "orientation": "auto", + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "6.2.4", + "targets": [ + { + "expr": "go_threads", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Go Threads", + "type": "gauge" + } + ], + "refresh": false, + "schemaVersion": 18, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Status", + "uid": "6iYtRpiGz", + "version": 5 + } diff --git a/deploy/monitoring/grafana/assisted-installer-k8s-grafana.yaml b/deploy/monitoring/grafana/assisted-installer-k8s-grafana.yaml new file mode 100644 index 000000000..73f787962 --- /dev/null +++ b/deploy/monitoring/grafana/assisted-installer-k8s-grafana.yaml @@ -0,0 +1,85 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + app: grafana + name: grafana + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + app: grafana + spec: + containers: + - name: grafana + image: grafana/grafana:latest + imagePullPolicy: IfNotPresent + ports: + - name: grafana + protocol: TCP + containerPort: 3000 + resources: + limits: + memory: "2Gi" + cpu: "1000m" + requests: + cpu: "100m" + memory: "100Mi" + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-storage + - mountPath: /etc/grafana/provisioning/datasources + name: grafana-datasources + - mountPath: /etc/grafana/provisioning/dashboards + name: grafana-dashboards + - mountPath: /grafana-dashboard-definitions/0/assistedinstaller + name: grafana-dashboard-assisted-installer + - mountPath: /etc/grafana + name: grafana-config + volumes: + - name: grafana-storage + emptyDir: {} + - name: grafana-datasources + secret: + defaultMode: 420 + secretName: grafana-datasources + - name: grafana-dashboards + configMap: + defaultMode: 420 + name: grafana-dashboards + - name: grafana-dashboard-assisted-installer + configMap: + defaultMode: 420 + name: grafana-dashboard-assisted-installer + - name: grafana-config + secret: + defaultMode: 420 + secretName: grafana-config +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: grafana + name: grafana + namespace: REPLACE_NAMESPACE +spec: + ports: + - name: grafana + port: 3000 + protocol: TCP + targetPort: grafana + selector: + app: grafana + type: ClusterIP diff --git a/deploy/monitoring/grafana/assisted-installer-ocp-grafana-route.yaml b/deploy/monitoring/grafana/assisted-installer-ocp-grafana-route.yaml new file mode 100644 index 000000000..95f5f5bc4 --- /dev/null +++ b/deploy/monitoring/grafana/assisted-installer-ocp-grafana-route.yaml @@ -0,0 +1,20 @@ +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + annotations: + openshift.io/host.generated: "true" + name: grafana + namespace: REPLACE_NAMESPACE +spec: + host: grafana-assisted-installer.INGRESS_DOMAIN + port: + targetPort: web-proxy + tls: + insecureEdgeTerminationPolicy: Redirect + termination: reencrypt + to: + kind: Service + name: grafana + weight: 100 + wildcardPolicy: None + diff --git a/deploy/monitoring/grafana/assisted-installer-ocp-grafana.yaml b/deploy/monitoring/grafana/assisted-installer-ocp-grafana.yaml new file mode 100644 index 000000000..a4d48658b --- /dev/null +++ b/deploy/monitoring/grafana/assisted-installer-ocp-grafana.yaml @@ -0,0 +1,133 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + app: grafana + name: grafana + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + app: grafana + spec: + containers: + - args: + - -config=/etc/grafana/grafana.ini + image: quay.io/openshift/origin-grafana:latest + imagePullPolicy: IfNotPresent + name: grafana + ports: + - containerPort: 3001 + name: http + protocol: TCP + resources: + limits: + memory: "2Gi" + cpu: "1000m" + requests: + cpu: "100m" + memory: "100Mi" + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-storage + - mountPath: /etc/grafana/provisioning/datasources + name: grafana-datasources + - mountPath: /etc/grafana/provisioning/dashboards + name: grafana-dashboards + - mountPath: /grafana-dashboard-definitions/0/assistedinstaller + name: grafana-dashboard-assisted-installer + - mountPath: /etc/grafana + name: grafana-config + - args: + - -provider=openshift + - -https-address=:3000 + - -http-address= + - -email-domain=* + - -upstream=http://localhost:3001 + - '-openshift-sar={"resource":"namespaces","resourceName":"assisted-installer","namespace":"REPLACE_NAMESPACE","verb":"get"}' + - -tls-cert=/etc/tls/private/tls.crt + - -tls-key=/etc/tls/private/tls.key + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret-file=/etc/proxy/secrets/session_secret + - -openshift-service-account=grafana + - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - -openshift-ca=/etc/proxy/custom-ca/ca.crt + - -skip-auth-regex=^/metrics + image: quay.io/openshift/origin-oauth-proxy:4.7 + imagePullPolicy: IfNotPresent + name: oauth-proxy + ports: + - containerPort: 3000 + name: web-proxy + protocol: TCP + volumeMounts: + - mountPath: /etc/tls/private + name: secret-grafana-tls + - mountPath: /etc/proxy/secrets + name: secret-grafana-proxy + - mountPath: /etc/proxy/custom-ca + name: secret-openshift-custom-ca + restartPolicy: Always + serviceAccountName: grafana + terminationGracePeriodSeconds: 30 + volumes: + - name: grafana-storage + emptyDir: {} + - name: grafana-datasources + secret: + defaultMode: 420 + secretName: grafana-datasources + - name: secret-openshift-custom-ca + secret: + defaultMode: 420 + secretName: openshift-custom-ca + - name: grafana-dashboards + configMap: + defaultMode: 420 + name: grafana-dashboards + - name: grafana-dashboard-assisted-installer + configMap: + defaultMode: 420 + name: grafana-dashboard-assisted-installer + - name: grafana-config + secret: + defaultMode: 420 + secretName: grafana-config + - name: secret-grafana-tls + secret: + defaultMode: 420 + secretName: grafana-tls + - name: secret-grafana-proxy + secret: + defaultMode: 420 + secretName: grafana-proxy +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.openshift.io/serving-cert-secret-name: grafana-tls + labels: + app: grafana + name: grafana + namespace: REPLACE_NAMESPACE +spec: + ports: + - name: web-proxy + port: 3000 + protocol: TCP + targetPort: web-proxy + selector: + app: grafana + type: ClusterIP diff --git a/deploy/monitoring/grafana/grafana-dashboards.yaml b/deploy/monitoring/grafana/grafana-dashboards.yaml new file mode 100644 index 000000000..5dd5e6fe8 --- /dev/null +++ b/deploy/monitoring/grafana/grafana-dashboards.yaml @@ -0,0 +1,21 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: grafana-dashboards + namespace: REPLACE_NAMESPACE +data: + dashboards.yaml: |- + { + "apiVersion": 1, + "providers": [ + { + "folder": "", + "name": "0", + "options": { + "path": "/grafana-dashboard-definitions/0" + }, + "orgId": 1, + "type": "file" + } + ] + } diff --git a/deploy/monitoring/grafana/grafana-k8s.ini b/deploy/monitoring/grafana/grafana-k8s.ini new file mode 100644 index 000000000..9d7dbf0b0 --- /dev/null +++ b/deploy/monitoring/grafana/grafana-k8s.ini @@ -0,0 +1,19 @@ +[auth] +disable_login_form = false +disable_signout_menu = false +[auth.basic] +enabled = true +admin_user = admin +admin_password = admin +[users] +allow_sign_up = false +auto_assign_org = true +auto_assign_org_role = Admin +[paths] +data = /var/lib/grafana +logs = /var/lib/grafana/logs +plugins = /var/lib/grafana/plugins +provisioning = /etc/grafana/provisioning +[server] +http_addr = 127.0.0.1 +http_port = 3000 diff --git a/deploy/monitoring/grafana/grafana.ini b/deploy/monitoring/grafana/grafana.ini new file mode 100644 index 000000000..e68a643b6 --- /dev/null +++ b/deploy/monitoring/grafana/grafana.ini @@ -0,0 +1,21 @@ +[auth] +disable_login_form = true +disable_signout_menu = true +[auth.basic] +enabled = false +[users] +allow_sign_up = false +auto_assign_org = true +auto_assign_org_role = Admin +[auth.proxy] +auto_sign_up = true +enabled = true +header_name = X-Forwarded-User +[paths] +data = /var/lib/grafana +logs = /var/lib/grafana/logs +plugins = /var/lib/grafana/plugins +provisioning = /etc/grafana/provisioning +[server] +http_addr = 127.0.0.1 +http_port = 3001 diff --git a/deploy/monitoring/grafana/prometheus.json b/deploy/monitoring/grafana/prometheus.json new file mode 100644 index 000000000..86748f628 --- /dev/null +++ b/deploy/monitoring/grafana/prometheus.json @@ -0,0 +1,16 @@ +{ + "apiVersion": 1, + "datasources": [ + { + "access": "proxy", + "basicAuth": false, + "withCredentials": false, + "editable": false, + "name": "assisted-installer-prometheus", + "orgId": 1, + "type": "prometheus", + "url": "http://prometheus-k8s:9090", + "version": 1 + } + ] +} diff --git a/deploy/monitoring/prometheus/assisted-installer-k8s-prometheus-subscription-instance.yaml b/deploy/monitoring/prometheus/assisted-installer-k8s-prometheus-subscription-instance.yaml new file mode 100644 index 000000000..17d922764 --- /dev/null +++ b/deploy/monitoring/prometheus/assisted-installer-k8s-prometheus-subscription-instance.yaml @@ -0,0 +1,13 @@ +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + name: assisted-installer-prometheus + labels: + prometheus: assisted-installer-prometheus + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + version: v2.8.0 + serviceMonitorSelector: + matchLabels: + app: assisted-installer-monitor diff --git a/deploy/monitoring/prometheus/assisted-installer-k8s-prometheus-svc.yaml b/deploy/monitoring/prometheus/assisted-installer-k8s-prometheus-svc.yaml new file mode 100644 index 000000000..0c97ca5cb --- /dev/null +++ b/deploy/monitoring/prometheus/assisted-installer-k8s-prometheus-svc.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + prometheus: assisted-installer-prometheus + name: prometheus-k8s + namespace: REPLACE_NAMESPACE +spec: + ports: + - name: web + port: 9090 + protocol: TCP + targetPort: web + selector: + app: prometheus + prometheus: assisted-installer-prometheus + type: ClusterIP diff --git a/deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-custom-ca.yaml b/deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-custom-ca.yaml new file mode 100644 index 000000000..faa68fc07 --- /dev/null +++ b/deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-custom-ca.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +data: + ca.crt: BASE64_CERT +kind: Secret +metadata: + name: openshift-custom-ca + namespace: REPLACE_NAMESPACE +type: Opaque diff --git a/deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-route.yaml b/deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-route.yaml new file mode 100644 index 000000000..9e03acab0 --- /dev/null +++ b/deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-route.yaml @@ -0,0 +1,20 @@ +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + labels: + prometheus: assisted-installer-prometheus + name: prometheus-assisted + namespace: REPLACE_NAMESPACE +spec: + host: monitoring-assisted-installer.INGRESS_DOMAIN + port: + targetPort: proxy + tls: + insecureEdgeTerminationPolicy: Redirect + termination: reencrypt + to: + kind: Service + name: prometheus-k8s + weight: 100 + wildcardPolicy: None + diff --git a/deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-subscription-instance.yaml b/deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-subscription-instance.yaml new file mode 100644 index 000000000..1d5044b23 --- /dev/null +++ b/deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-subscription-instance.yaml @@ -0,0 +1,52 @@ +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + name: assisted-installer-prometheus + labels: + prometheus: assisted-installer-prometheus + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + version: v2.8.0 + serviceAccountName: prometheus-k8s + resources: + limits: + memory: "2Gi" + cpu: "1000m" + requests: + cpu: "100m" + memory: "100Mi" + serviceMonitorSelector: + matchLabels: + app: assisted-installer-monitor + containers: + - args: + - -provider=openshift + - -https-address=:9091 + - -http-address= + - -email-domain=* + - -upstream=http://localhost:9090 + - -openshift-service-account=prometheus-k8s + - -openshift-ca=/etc/proxy/custom-ca/ca.crt + - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - '-openshift-sar={"resource":"namespaces","resourceName":"assisted-installer","namespace":"REPLACE_NAMESPACE","verb":"get"}' + - -tls-cert=/etc/tls/private/tls.crt + - -tls-key=/etc/tls/private/tls.key + - -cookie-secret-file=/etc/proxy/secrets/session_secret + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + image: quay.io/openshift/origin-oauth-proxy:4.7 + name: oauth-proxy + ports: + - containerPort: 9091 + name: web-proxy + volumeMounts: + - mountPath: /etc/tls/private + name: secret-prometheus-k8s-tls + - mountPath: /etc/proxy/secrets + name: secret-prometheus-k8s-proxy + - mountPath: /etc/proxy/custom-ca + name: secret-openshift-custom-ca + secrets: + - prometheus-k8s-tls + - prometheus-k8s-proxy + - openshift-custom-ca diff --git a/deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-svc.yaml b/deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-svc.yaml new file mode 100644 index 000000000..5d088b61b --- /dev/null +++ b/deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-svc.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.openshift.io/serving-cert-secret-name: prometheus-k8s-tls + labels: + prometheus: assisted-installer-prometheus + name: prometheus-k8s + namespace: REPLACE_NAMESPACE +spec: + ports: + - name: proxy + port: 9091 + protocol: TCP + targetPort: web-proxy + - name: grafana-access + port: 9090 + protocol: TCP + targetPort: web + selector: + app: prometheus + prometheus: assisted-installer-prometheus + type: ClusterIP diff --git a/deploy/monitoring/prometheus/assisted-installer-operator-group.yaml b/deploy/monitoring/prometheus/assisted-installer-operator-group.yaml new file mode 100644 index 000000000..5d3c451a1 --- /dev/null +++ b/deploy/monitoring/prometheus/assisted-installer-operator-group.yaml @@ -0,0 +1,8 @@ +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: monitoring-operators + namespace: REPLACE_NAMESPACE +spec: + targetNamespaces: + - REPLACE_NAMESPACE diff --git a/deploy/monitoring/prometheus/assisted-installer-operator-subscription.yaml b/deploy/monitoring/prometheus/assisted-installer-operator-subscription.yaml new file mode 100644 index 000000000..68d0f0adc --- /dev/null +++ b/deploy/monitoring/prometheus/assisted-installer-operator-subscription.yaml @@ -0,0 +1,11 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: prometheus + namespace: REPLACE_NAMESPACE +spec: + channel: beta + installPlanApproval: Automatic + name: prometheus + source: CAT_SRC + sourceNamespace: OLM_NAMESPACE diff --git a/deploy/monitoring/prometheus/assisted-installer-prometheus-svc-monitor.yaml b/deploy/monitoring/prometheus/assisted-installer-prometheus-svc-monitor.yaml new file mode 100644 index 000000000..4c8d30c5e --- /dev/null +++ b/deploy/monitoring/prometheus/assisted-installer-prometheus-svc-monitor.yaml @@ -0,0 +1,23 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + app: assisted-installer-monitor + name: assisted-installer + namespace: REPLACE_NAMESPACE +spec: + endpoints: + - interval: 30s + scrapeTimeout: 30s + port: bm-inventory + path: /metrics + params: + 'match[]': + - '{__name__=~".+"}' + scheme: http + namespaceSelector: + matchNames: + - REPLACE_NAMESPACE + selector: + matchLabels: + app: "bm-inventory" diff --git a/deploy/namespace/namespace.yaml b/deploy/namespace/namespace.yaml index 4d88c3131..fb2218912 100644 --- a/deploy/namespace/namespace.yaml +++ b/deploy/namespace/namespace.yaml @@ -1,6 +1,6 @@ apiVersion: v1 kind: Namespace metadata: - name: assisted-installer + name: REPLACE_NAMESPACE labels: - name: assisted-installer + name: REPLACE_NAMESPACE diff --git a/deploy/postgres/postgres-deployment.yaml b/deploy/postgres/postgres-deployment.yaml new file mode 100644 index 000000000..57c72adb0 --- /dev/null +++ b/deploy/postgres/postgres-deployment.yaml @@ -0,0 +1,67 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgres + namespace: REPLACE_NAMESPACE +spec: + selector: + matchLabels: + app: postgres + replicas: 1 + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: centos/postgresql-12-centos7 + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 5432 + env: + - name: POSTGRESQL_DATABASE + valueFrom: + secretKeyRef: + name: assisted-installer-rds + key: db.name + - name: POSTGRESQL_USER + valueFrom: + secretKeyRef: + name: assisted-installer-rds + key: db.user + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: assisted-installer-rds + key: db.password + volumeMounts: + - mountPath: /var/lib/postgress + name: postgredb + resources: + limits: + cpu: 200m + memory: 500Mi + requests: + cpu: 100m + memory: 400Mi + volumes: + - name: postgredb + persistentVolumeClaim: + claimName: postgres-pv-claim +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: REPLACE_NAMESPACE + labels: + app: postgres +spec: + type: LoadBalancer + ports: + - port: 5432 + selector: + app: postgres +status: + loadBalancer: {} diff --git a/deploy/postgres/postgres-secret.yaml b/deploy/postgres/postgres-secret.yaml new file mode 100644 index 000000000..582d32f15 --- /dev/null +++ b/deploy/postgres/postgres-secret.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Secret +metadata: + name: assisted-installer-rds + namespace: REPLACE_NAMESPACE + labels: + app: postgres +type: Opaque +stringData: + db.host: "postgres" + db.name: "installer" + db.password: "admin" + db.port: "5432" + db.user: "admin" diff --git a/deploy/mariadb/mariadb-storage.yaml b/deploy/postgres/postgres-storage.yaml similarity index 57% rename from deploy/mariadb/mariadb-storage.yaml rename to deploy/postgres/postgres-storage.yaml index ccc262140..9df99a350 100644 --- a/deploy/mariadb/mariadb-storage.yaml +++ b/deploy/postgres/postgres-storage.yaml @@ -1,13 +1,13 @@ kind: PersistentVolumeClaim apiVersion: v1 metadata: - name: mariadb-pv-claim - namespace: assisted-installer + name: postgres-pv-claim + namespace: REPLACE_NAMESPACE labels: - app: mariadb + app: postgres spec: accessModes: - ReadWriteOnce resources: requests: - storage: 1Gi + storage: 10Gi diff --git a/deploy/roles/default_role.yaml b/deploy/roles/default_role.yaml new file mode 100644 index 000000000..9f5577a45 --- /dev/null +++ b/deploy/roles/default_role.yaml @@ -0,0 +1,46 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: default + namespace: REPLACE_NAMESPACE +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: default + namespace: REPLACE_NAMESPACE +rules: + - verbs: + - get + - watch + - list + apiGroups: + - '' + resources: + - pods + - endpoints + - services + - verbs: + - get + - watch + - list + - delete + - create + apiGroups: + - batch + resources: + - jobs +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: default + namespace: REPLACE_NAMESPACE +subjects: + - kind: ServiceAccount + name: default + namespace: REPLACE_NAMESPACE +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default diff --git a/deploy/roles/role_binding.yaml b/deploy/roles/role_binding.yaml deleted file mode 100644 index 9efbacb1c..000000000 --- a/deploy/roles/role_binding.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: fabric8-rbac -subjects: - - kind: ServiceAccount - # Reference to upper's `metadata.name` - name: default - # Reference to upper's `metadata.namespace` - namespace: assisted-installer -roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/deploy/route53/route53-secret.yaml b/deploy/route53/route53-secret.yaml new file mode 100644 index 000000000..87781cded --- /dev/null +++ b/deploy/route53/route53-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +data: + credentials: BASE64_CREDS +kind: Secret +metadata: + name: route53-creds + namespace: REPLACE_NAMESPACE +type: Opaque \ No newline at end of file diff --git a/deploy/s3/s3-object-expirer-cron.yaml b/deploy/s3/s3-object-expirer-cron.yaml deleted file mode 100644 index 7ca892328..000000000 --- a/deploy/s3/s3-object-expirer-cron.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: batch/v1beta1 -kind: CronJob -metadata: - name: s3-object-expirer-cron-job - namespace: assisted-installer -spec: - schedule: "@hourly" - jobTemplate: - spec: - template: - spec: - containers: - - name: s3-object-expirer-job-pod - image: REPLACE_IMAGE - imagePullPolicy: Always - envFrom: - - configMapRef: - name: s3-config - args: - - /bin/bash - - -c - - python ./expirer.py - restartPolicy: OnFailure - backoffLimit: 3 diff --git a/deploy/s3/scality-configmap.yaml b/deploy/s3/scality-configmap.yaml deleted file mode 100644 index 85d146e45..000000000 --- a/deploy/s3/scality-configmap.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: s3-config - namespace: assisted-installer - labels: - app: scality -data: - S3_ENDPOINT_URL: REPLACE_URL - HOST_NAME: REPLACE_HOST_NAME - S3DATAPATH: /mnt/data - S3METADATAPATH: /mnt/data diff --git a/deploy/s3/scality-deployment.yaml b/deploy/s3/scality-deployment.yaml index 760b0879b..ee27740bd 100644 --- a/deploy/s3/scality-deployment.yaml +++ b/deploy/s3/scality-deployment.yaml @@ -4,7 +4,7 @@ metadata: labels: app: scality name: scality - namespace: assisted-installer + namespace: REPLACE_NAMESPACE spec: replicas: 1 selector: @@ -19,12 +19,52 @@ spec: - image: scality/s3server imagePullPolicy: Always name: s3server - envFrom: - - configMapRef: - name: s3-config + env: + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: assisted-installer-s3 + key: aws_secret_access_key + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: assisted-installer-s3 + key: aws_access_key_id + - name: AWS_REGION + valueFrom: + secretKeyRef: + name: assisted-installer-s3 + key: aws_region + - name: BUCKET + valueFrom: + secretKeyRef: + name: assisted-installer-s3 + key: bucket + - name: S3_ENDPOINT_URL + valueFrom: + secretKeyRef: + name: assisted-installer-s3 + key: endpoint + - name: S3DATAPATH + valueFrom: + secretKeyRef: + name: assisted-installer-s3 + key: s3_data_path + - name: S3METADATAPATH + valueFrom: + secretKeyRef: + name: assisted-installer-s3 + key: s3_metadata_path volumeMounts: - mountPath: /mnt/data name: scalityvol + resources: + limits: + cpu: 500m + memory: 2000Mi + requests: + cpu: 300m + memory: 2000Mi volumes: - name: scalityvol persistentVolumeClaim: @@ -35,8 +75,10 @@ kind: Service metadata: labels: app: scality - name: scality - namespace: assisted-installer + # cloudserver-front supported as one of the default hostnames that can receive messages + # to support other hostnamnes they need to be defined with HOST_NAME environment variable. + name: cloudserver-front + namespace: REPLACE_NAMESPACE spec: ports: - port: 8000 @@ -44,6 +86,6 @@ spec: targetPort: 8000 selector: app: scality - type: LoadBalancer + clusterIP: None status: loadBalancer: {} diff --git a/deploy/s3/scality-secret.yaml b/deploy/s3/scality-secret.yaml new file mode 100644 index 000000000..c486f61d8 --- /dev/null +++ b/deploy/s3/scality-secret.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Secret +metadata: + name: assisted-installer-s3 + namespace: REPLACE_NAMESPACE + labels: + app: scality +type: Opaque +stringData: + aws_access_key_id: "accessKey1" + aws_region: "us-east-1" + aws_secret_access_key: "verySecretKey1" + bucket: "test" + endpoint: "http://cloudserver-front:8000" + s3_data_path: /mnt/data + s3_metadata_path: /mnt/data diff --git a/deploy/s3/scality-storage.yaml b/deploy/s3/scality-storage.yaml index f6a404271..af9c6e2c7 100644 --- a/deploy/s3/scality-storage.yaml +++ b/deploy/s3/scality-storage.yaml @@ -2,7 +2,7 @@ kind: PersistentVolumeClaim apiVersion: v1 metadata: name: scality-pv-claim - namespace: assisted-installer + namespace: REPLACE_NAMESPACE labels: app: scality spec: diff --git a/deploy/ui/ui_ingress.yaml b/deploy/ui/ui_ingress.yaml new file mode 100644 index 000000000..f8d278a04 --- /dev/null +++ b/deploy/ui/ui_ingress.yaml @@ -0,0 +1,15 @@ +kind: Ingress +apiVersion: networking.k8s.io/v1beta1 +metadata: + name: assisted-installer-ui + namespace: REPLACE_NAMESPACE + annotations: + haproxy.router.openshift.io/timeout: 120s +spec: + rules: + - host: REPLACE_HOSTNAME + http: + paths: + - backend: + serviceName: ocp-metal-ui + servicePort: 80 diff --git a/dist/favicon-16x16.png b/dist/favicon-16x16.png new file mode 100644 index 000000000..8b194e617 Binary files /dev/null and b/dist/favicon-16x16.png differ diff --git a/dist/favicon-32x32.png b/dist/favicon-32x32.png new file mode 100644 index 000000000..249737fe4 Binary files /dev/null and b/dist/favicon-32x32.png differ diff --git a/dist/oauth2-redirect.html b/dist/oauth2-redirect.html new file mode 100644 index 000000000..a013fc82e --- /dev/null +++ b/dist/oauth2-redirect.html @@ -0,0 +1,68 @@ + + +Swagger UI: OAuth2 Redirect + + + + diff --git a/dist/swagger-ui-bundle.js b/dist/swagger-ui-bundle.js new file mode 100644 index 000000000..8b167d662 --- /dev/null +++ b/dist/swagger-ui-bundle.js @@ -0,0 +1,134 @@ +!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t(function(){try{return require("esprima")}catch(e){}}()):"function"==typeof define&&define.amd?define(["esprima"],t):"object"==typeof exports?exports.SwaggerUIBundle=t(function(){try{return require("esprima")}catch(e){}}()):e.SwaggerUIBundle=t(e.esprima)}(window,function(e){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var o=t[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,n),o.l=!0,o.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)n.d(r,o,function(t){return e[t]}.bind(null,o));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="/dist",n(n.s=493)}([function(e,t,n){"use strict";e.exports=n(106)},function(e,t,n){e.exports=function(){"use strict";var e=Array.prototype.slice;function t(e,t){t&&(e.prototype=Object.create(t.prototype)),e.prototype.constructor=e}function n(e){return a(e)?e:J(e)}function r(e){return s(e)?e:K(e)}function o(e){return u(e)?e:Y(e)}function i(e){return a(e)&&!c(e)?e:$(e)}function a(e){return!(!e||!e[p])}function s(e){return!(!e||!e[f])}function u(e){return!(!e||!e[h])}function c(e){return s(e)||u(e)}function l(e){return!(!e||!e[d])}t(r,n),t(o,n),t(i,n),n.isIterable=a,n.isKeyed=s,n.isIndexed=u,n.isAssociative=c,n.isOrdered=l,n.Keyed=r,n.Indexed=o,n.Set=i;var p="@@__IMMUTABLE_ITERABLE__@@",f="@@__IMMUTABLE_KEYED__@@",h="@@__IMMUTABLE_INDEXED__@@",d="@@__IMMUTABLE_ORDERED__@@",m=5,v=1<>>0;if(""+n!==t||4294967295===n)return NaN;t=n}return t<0?C(e)+t:t}function O(){return!0}function A(e,t,n){return(0===e||void 0!==n&&e<=-n)&&(void 0===t||void 0!==n&&t>=n)}function T(e,t){return I(e,t,0)}function j(e,t){return I(e,t,t)}function I(e,t,n){return void 0===e?n:e<0?Math.max(0,t+e):void 0===t?e:Math.min(t,e)}var P=0,M=1,N=2,R="function"==typeof Symbol&&Symbol.iterator,D="@@iterator",L=R||D;function U(e){this.next=e}function q(e,t,n,r){var o=0===e?t:1===e?n:[t,n];return r?r.value=o:r={value:o,done:!1},r}function F(){return{value:void 0,done:!0}}function B(e){return!!H(e)}function z(e){return e&&"function"==typeof e.next}function V(e){var t=H(e);return t&&t.call(e)}function H(e){var t=e&&(R&&e[R]||e[D]);if("function"==typeof t)return t}function W(e){return e&&"number"==typeof e.length}function J(e){return null==e?ie():a(e)?e.toSeq():function(e){var t=ue(e)||"object"==typeof e&&new te(e);if(!t)throw new TypeError("Expected Array or iterable object of values, or keyed object: "+e);return t}(e)}function K(e){return null==e?ie().toKeyedSeq():a(e)?s(e)?e.toSeq():e.fromEntrySeq():ae(e)}function Y(e){return null==e?ie():a(e)?s(e)?e.entrySeq():e.toIndexedSeq():se(e)}function $(e){return(null==e?ie():a(e)?s(e)?e.entrySeq():e:se(e)).toSetSeq()}U.prototype.toString=function(){return"[Iterator]"},U.KEYS=P,U.VALUES=M,U.ENTRIES=N,U.prototype.inspect=U.prototype.toSource=function(){return this.toString()},U.prototype[L]=function(){return this},t(J,n),J.of=function(){return J(arguments)},J.prototype.toSeq=function(){return this},J.prototype.toString=function(){return this.__toString("Seq {","}")},J.prototype.cacheResult=function(){return!this._cache&&this.__iterateUncached&&(this._cache=this.entrySeq().toArray(),this.size=this._cache.length),this},J.prototype.__iterate=function(e,t){return ce(this,e,t,!0)},J.prototype.__iterator=function(e,t){return le(this,e,t,!0)},t(K,J),K.prototype.toKeyedSeq=function(){return this},t(Y,J),Y.of=function(){return Y(arguments)},Y.prototype.toIndexedSeq=function(){return this},Y.prototype.toString=function(){return this.__toString("Seq [","]")},Y.prototype.__iterate=function(e,t){return ce(this,e,t,!1)},Y.prototype.__iterator=function(e,t){return le(this,e,t,!1)},t($,J),$.of=function(){return $(arguments)},$.prototype.toSetSeq=function(){return this},J.isSeq=oe,J.Keyed=K,J.Set=$,J.Indexed=Y;var G,Z,X,Q="@@__IMMUTABLE_SEQ__@@";function ee(e){this._array=e,this.size=e.length}function te(e){var t=Object.keys(e);this._object=e,this._keys=t,this.size=t.length}function ne(e){this._iterable=e,this.size=e.length||e.size}function re(e){this._iterator=e,this._iteratorCache=[]}function oe(e){return!(!e||!e[Q])}function ie(){return G||(G=new ee([]))}function ae(e){var t=Array.isArray(e)?new ee(e).fromEntrySeq():z(e)?new re(e).fromEntrySeq():B(e)?new ne(e).fromEntrySeq():"object"==typeof e?new te(e):void 0;if(!t)throw new TypeError("Expected Array or iterable object of [k, v] entries, or keyed object: "+e);return t}function se(e){var t=ue(e);if(!t)throw new TypeError("Expected Array or iterable object of values: "+e);return t}function ue(e){return W(e)?new ee(e):z(e)?new re(e):B(e)?new ne(e):void 0}function ce(e,t,n,r){var o=e._cache;if(o){for(var i=o.length-1,a=0;a<=i;a++){var s=o[n?i-a:a];if(!1===t(s[1],r?s[0]:a,e))return a+1}return a}return e.__iterateUncached(t,n)}function le(e,t,n,r){var o=e._cache;if(o){var i=o.length-1,a=0;return new U(function(){var e=o[n?i-a:a];return a++>i?{value:void 0,done:!0}:q(t,r?e[0]:a-1,e[1])})}return e.__iteratorUncached(t,n)}function pe(e,t){return t?function e(t,n,r,o){return Array.isArray(n)?t.call(o,r,Y(n).map(function(r,o){return e(t,r,o,n)})):he(n)?t.call(o,r,K(n).map(function(r,o){return e(t,r,o,n)})):n}(t,e,"",{"":e}):fe(e)}function fe(e){return Array.isArray(e)?Y(e).map(fe).toList():he(e)?K(e).map(fe).toMap():e}function he(e){return e&&(e.constructor===Object||void 0===e.constructor)}function de(e,t){if(e===t||e!=e&&t!=t)return!0;if(!e||!t)return!1;if("function"==typeof e.valueOf&&"function"==typeof t.valueOf){if((e=e.valueOf())===(t=t.valueOf())||e!=e&&t!=t)return!0;if(!e||!t)return!1}return!("function"!=typeof e.equals||"function"!=typeof t.equals||!e.equals(t))}function me(e,t){if(e===t)return!0;if(!a(t)||void 0!==e.size&&void 0!==t.size&&e.size!==t.size||void 0!==e.__hash&&void 0!==t.__hash&&e.__hash!==t.__hash||s(e)!==s(t)||u(e)!==u(t)||l(e)!==l(t))return!1;if(0===e.size&&0===t.size)return!0;var n=!c(e);if(l(e)){var r=e.entries();return t.every(function(e,t){var o=r.next().value;return o&&de(o[1],e)&&(n||de(o[0],t))})&&r.next().done}var o=!1;if(void 0===e.size)if(void 0===t.size)"function"==typeof e.cacheResult&&e.cacheResult();else{o=!0;var i=e;e=t,t=i}var p=!0,f=t.__iterate(function(t,r){if(n?!e.has(t):o?!de(t,e.get(r,y)):!de(e.get(r,y),t))return p=!1,!1});return p&&e.size===f}function ve(e,t){if(!(this instanceof ve))return new ve(e,t);if(this._value=e,this.size=void 0===t?1/0:Math.max(0,t),0===this.size){if(Z)return Z;Z=this}}function ge(e,t){if(!e)throw new Error(t)}function ye(e,t,n){if(!(this instanceof ye))return new ye(e,t,n);if(ge(0!==n,"Cannot step a Range by 0"),e=e||0,void 0===t&&(t=1/0),n=void 0===n?1:Math.abs(n),tr?{value:void 0,done:!0}:q(e,o,n[t?r-o++:o++])})},t(te,K),te.prototype.get=function(e,t){return void 0===t||this.has(e)?this._object[e]:t},te.prototype.has=function(e){return this._object.hasOwnProperty(e)},te.prototype.__iterate=function(e,t){for(var n=this._object,r=this._keys,o=r.length-1,i=0;i<=o;i++){var a=r[t?o-i:i];if(!1===e(n[a],a,this))return i+1}return i},te.prototype.__iterator=function(e,t){var n=this._object,r=this._keys,o=r.length-1,i=0;return new U(function(){var a=r[t?o-i:i];return i++>o?{value:void 0,done:!0}:q(e,a,n[a])})},te.prototype[d]=!0,t(ne,Y),ne.prototype.__iterateUncached=function(e,t){if(t)return this.cacheResult().__iterate(e,t);var n=V(this._iterable),r=0;if(z(n))for(var o;!(o=n.next()).done&&!1!==e(o.value,r++,this););return r},ne.prototype.__iteratorUncached=function(e,t){if(t)return this.cacheResult().__iterator(e,t);var n=V(this._iterable);if(!z(n))return new U(F);var r=0;return new U(function(){var t=n.next();return t.done?t:q(e,r++,t.value)})},t(re,Y),re.prototype.__iterateUncached=function(e,t){if(t)return this.cacheResult().__iterate(e,t);for(var n,r=this._iterator,o=this._iteratorCache,i=0;i=r.length){var t=n.next();if(t.done)return t;r[o]=t.value}return q(e,o,r[o++])})},t(ve,Y),ve.prototype.toString=function(){return 0===this.size?"Repeat []":"Repeat [ "+this._value+" "+this.size+" times ]"},ve.prototype.get=function(e,t){return this.has(e)?this._value:t},ve.prototype.includes=function(e){return de(this._value,e)},ve.prototype.slice=function(e,t){var n=this.size;return A(e,t,n)?this:new ve(this._value,j(t,n)-T(e,n))},ve.prototype.reverse=function(){return this},ve.prototype.indexOf=function(e){return de(this._value,e)?0:-1},ve.prototype.lastIndexOf=function(e){return de(this._value,e)?this.size:-1},ve.prototype.__iterate=function(e,t){for(var n=0;n=0&&t=0&&nn?{value:void 0,done:!0}:q(e,i++,a)})},ye.prototype.equals=function(e){return e instanceof ye?this._start===e._start&&this._end===e._end&&this._step===e._step:me(this,e)},t(be,n),t(_e,be),t(we,be),t(xe,be),be.Keyed=_e,be.Indexed=we,be.Set=xe;var Ee="function"==typeof Math.imul&&-2===Math.imul(4294967295,2)?Math.imul:function(e,t){var n=65535&(e|=0),r=65535&(t|=0);return n*r+((e>>>16)*r+n*(t>>>16)<<16>>>0)|0};function Se(e){return e>>>1&1073741824|3221225471&e}function Ce(e){if(!1===e||null==e)return 0;if("function"==typeof e.valueOf&&(!1===(e=e.valueOf())||null==e))return 0;if(!0===e)return 1;var t=typeof e;if("number"===t){if(e!=e||e===1/0)return 0;var n=0|e;for(n!==e&&(n^=4294967295*e);e>4294967295;)n^=e/=4294967295;return Se(n)}if("string"===t)return e.length>Me?function(e){var t=De[e];return void 0===t&&(t=ke(e),Re===Ne&&(Re=0,De={}),Re++,De[e]=t),t}(e):ke(e);if("function"==typeof e.hashCode)return e.hashCode();if("object"===t)return function(e){var t;if(je&&void 0!==(t=Oe.get(e)))return t;if(void 0!==(t=e[Pe]))return t;if(!Te){if(void 0!==(t=e.propertyIsEnumerable&&e.propertyIsEnumerable[Pe]))return t;if(void 0!==(t=function(e){if(e&&e.nodeType>0)switch(e.nodeType){case 1:return e.uniqueID;case 9:return e.documentElement&&e.documentElement.uniqueID}}(e)))return t}if(t=++Ie,1073741824&Ie&&(Ie=0),je)Oe.set(e,t);else{if(void 0!==Ae&&!1===Ae(e))throw new Error("Non-extensible objects are not allowed as keys.");if(Te)Object.defineProperty(e,Pe,{enumerable:!1,configurable:!1,writable:!1,value:t});else if(void 0!==e.propertyIsEnumerable&&e.propertyIsEnumerable===e.constructor.prototype.propertyIsEnumerable)e.propertyIsEnumerable=function(){return this.constructor.prototype.propertyIsEnumerable.apply(this,arguments)},e.propertyIsEnumerable[Pe]=t;else{if(void 0===e.nodeType)throw new Error("Unable to set a non-enumerable property on object.");e[Pe]=t}}return t}(e);if("function"==typeof e.toString)return ke(e.toString());throw new Error("Value type "+t+" cannot be hashed.")}function ke(e){for(var t=0,n=0;n=t.length)throw new Error("Missing value for key: "+t[n]);e.set(t[n],t[n+1])}})},Ue.prototype.toString=function(){return this.__toString("Map {","}")},Ue.prototype.get=function(e,t){return this._root?this._root.get(0,void 0,e,t):t},Ue.prototype.set=function(e,t){return Qe(this,e,t)},Ue.prototype.setIn=function(e,t){return this.updateIn(e,y,function(){return t})},Ue.prototype.remove=function(e){return Qe(this,e,y)},Ue.prototype.deleteIn=function(e){return this.updateIn(e,function(){return y})},Ue.prototype.update=function(e,t,n){return 1===arguments.length?e(this):this.updateIn([e],t,n)},Ue.prototype.updateIn=function(e,t,n){n||(n=t,t=void 0);var r=function e(t,n,r,o){var i=t===y,a=n.next();if(a.done){var s=i?r:t,u=o(s);return u===s?t:u}ge(i||t&&t.set,"invalid keyPath");var c=a.value,l=i?y:t.get(c,y),p=e(l,n,r,o);return p===l?t:p===y?t.remove(c):(i?Xe():t).set(c,p)}(this,rn(e),t,n);return r===y?void 0:r},Ue.prototype.clear=function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._root=null,this.__hash=void 0,this.__altered=!0,this):Xe()},Ue.prototype.merge=function(){return rt(this,void 0,arguments)},Ue.prototype.mergeWith=function(t){var n=e.call(arguments,1);return rt(this,t,n)},Ue.prototype.mergeIn=function(t){var n=e.call(arguments,1);return this.updateIn(t,Xe(),function(e){return"function"==typeof e.merge?e.merge.apply(e,n):n[n.length-1]})},Ue.prototype.mergeDeep=function(){return rt(this,ot,arguments)},Ue.prototype.mergeDeepWith=function(t){var n=e.call(arguments,1);return rt(this,it(t),n)},Ue.prototype.mergeDeepIn=function(t){var n=e.call(arguments,1);return this.updateIn(t,Xe(),function(e){return"function"==typeof e.mergeDeep?e.mergeDeep.apply(e,n):n[n.length-1]})},Ue.prototype.sort=function(e){return Tt(Jt(this,e))},Ue.prototype.sortBy=function(e,t){return Tt(Jt(this,t,e))},Ue.prototype.withMutations=function(e){var t=this.asMutable();return e(t),t.wasAltered()?t.__ensureOwner(this.__ownerID):this},Ue.prototype.asMutable=function(){return this.__ownerID?this:this.__ensureOwner(new E)},Ue.prototype.asImmutable=function(){return this.__ensureOwner()},Ue.prototype.wasAltered=function(){return this.__altered},Ue.prototype.__iterator=function(e,t){return new Ye(this,e,t)},Ue.prototype.__iterate=function(e,t){var n=this,r=0;return this._root&&this._root.iterate(function(t){return r++,e(t[1],t[0],n)},t),r},Ue.prototype.__ensureOwner=function(e){return e===this.__ownerID?this:e?Ze(this.size,this._root,e,this.__hash):(this.__ownerID=e,this.__altered=!1,this)},Ue.isMap=qe;var Fe,Be="@@__IMMUTABLE_MAP__@@",ze=Ue.prototype;function Ve(e,t){this.ownerID=e,this.entries=t}function He(e,t,n){this.ownerID=e,this.bitmap=t,this.nodes=n}function We(e,t,n){this.ownerID=e,this.count=t,this.nodes=n}function Je(e,t,n){this.ownerID=e,this.keyHash=t,this.entries=n}function Ke(e,t,n){this.ownerID=e,this.keyHash=t,this.entry=n}function Ye(e,t,n){this._type=t,this._reverse=n,this._stack=e._root&&Ge(e._root)}function $e(e,t){return q(e,t[0],t[1])}function Ge(e,t){return{node:e,index:0,__prev:t}}function Ze(e,t,n,r){var o=Object.create(ze);return o.size=e,o._root=t,o.__ownerID=n,o.__hash=r,o.__altered=!1,o}function Xe(){return Fe||(Fe=Ze(0))}function Qe(e,t,n){var r,o;if(e._root){var i=w(b),a=w(_);if(r=et(e._root,e.__ownerID,0,void 0,t,n,i,a),!a.value)return e;o=e.size+(i.value?n===y?-1:1:0)}else{if(n===y)return e;o=1,r=new Ve(e.__ownerID,[[t,n]])}return e.__ownerID?(e.size=o,e._root=r,e.__hash=void 0,e.__altered=!0,e):r?Ze(o,r):Xe()}function et(e,t,n,r,o,i,a,s){return e?e.update(t,n,r,o,i,a,s):i===y?e:(x(s),x(a),new Ke(t,r,[o,i]))}function tt(e){return e.constructor===Ke||e.constructor===Je}function nt(e,t,n,r,o){if(e.keyHash===r)return new Je(t,r,[e.entry,o]);var i,a=(0===n?e.keyHash:e.keyHash>>>n)&g,s=(0===n?r:r>>>n)&g;return new He(t,1<>1&1431655765))+(e>>2&858993459))+(e>>4)&252645135,e+=e>>8,127&(e+=e>>16)}function ut(e,t,n,r){var o=r?e:S(e);return o[t]=n,o}ze[Be]=!0,ze.delete=ze.remove,ze.removeIn=ze.deleteIn,Ve.prototype.get=function(e,t,n,r){for(var o=this.entries,i=0,a=o.length;i=ct)return function(e,t,n,r){e||(e=new E);for(var o=new Ke(e,Ce(n),[n,r]),i=0;i>>e)&g),i=this.bitmap;return 0==(i&o)?r:this.nodes[st(i&o-1)].get(e+m,t,n,r)},He.prototype.update=function(e,t,n,r,o,i,a){void 0===n&&(n=Ce(r));var s=(0===t?n:n>>>t)&g,u=1<=lt)return function(e,t,n,r,o){for(var i=0,a=new Array(v),s=0;0!==n;s++,n>>>=1)a[s]=1&n?t[i++]:void 0;return a[r]=o,new We(e,i+1,a)}(e,f,c,s,d);if(l&&!d&&2===f.length&&tt(f[1^p]))return f[1^p];if(l&&d&&1===f.length&&tt(d))return d;var b=e&&e===this.ownerID,_=l?d?c:c^u:c|u,w=l?d?ut(f,p,d,b):function(e,t,n){var r=e.length-1;if(n&&t===r)return e.pop(),e;for(var o=new Array(r),i=0,a=0;a>>e)&g,i=this.nodes[o];return i?i.get(e+m,t,n,r):r},We.prototype.update=function(e,t,n,r,o,i,a){void 0===n&&(n=Ce(r));var s=(0===t?n:n>>>t)&g,u=o===y,c=this.nodes,l=c[s];if(u&&!l)return this;var p=et(l,e,t+m,n,r,o,i,a);if(p===l)return this;var f=this.count;if(l){if(!p&&--f0&&r=0&&e=e.size||t<0)return e.withMutations(function(e){t<0?kt(e,t).set(0,n):kt(e,0,t+1).set(t,n)});t+=e._origin;var r=e._tail,o=e._root,i=w(_);return t>=At(e._capacity)?r=Et(r,e.__ownerID,0,t,n,i):o=Et(o,e.__ownerID,e._level,t,n,i),i.value?e.__ownerID?(e._root=o,e._tail=r,e.__hash=void 0,e.__altered=!0,e):wt(e._origin,e._capacity,e._level,o,r):e}(this,e,t)},ft.prototype.remove=function(e){return this.has(e)?0===e?this.shift():e===this.size-1?this.pop():this.splice(e,1):this},ft.prototype.insert=function(e,t){return this.splice(e,0,t)},ft.prototype.clear=function(){return 0===this.size?this:this.__ownerID?(this.size=this._origin=this._capacity=0,this._level=m,this._root=this._tail=null,this.__hash=void 0,this.__altered=!0,this):xt()},ft.prototype.push=function(){var e=arguments,t=this.size;return this.withMutations(function(n){kt(n,0,t+e.length);for(var r=0;r>>t&g;if(r>=this.array.length)return new vt([],e);var o,i=0===r;if(t>0){var a=this.array[r];if((o=a&&a.removeBefore(e,t-m,n))===a&&i)return this}if(i&&!o)return this;var s=St(this,e);if(!i)for(var u=0;u>>t&g;if(o>=this.array.length)return this;if(t>0){var i=this.array[o];if((r=i&&i.removeAfter(e,t-m,n))===i&&o===this.array.length-1)return this}var a=St(this,e);return a.array.splice(o+1),r&&(a.array[o]=r),a};var gt,yt,bt={};function _t(e,t){var n=e._origin,r=e._capacity,o=At(r),i=e._tail;return a(e._root,e._level,0);function a(e,s,u){return 0===s?function(e,a){var s=a===o?i&&i.array:e&&e.array,u=a>n?0:n-a,c=r-a;return c>v&&(c=v),function(){if(u===c)return bt;var e=t?--c:u++;return s&&s[e]}}(e,u):function(e,o,i){var s,u=e&&e.array,c=i>n?0:n-i>>o,l=1+(r-i>>o);return l>v&&(l=v),function(){for(;;){if(s){var e=s();if(e!==bt)return e;s=null}if(c===l)return bt;var n=t?--l:c++;s=a(u&&u[n],o-m,i+(n<>>n&g,u=e&&s0){var c=e&&e.array[s],l=Et(c,t,n-m,r,o,i);return l===c?e:((a=St(e,t)).array[s]=l,a)}return u&&e.array[s]===o?e:(x(i),a=St(e,t),void 0===o&&s===a.array.length-1?a.array.pop():a.array[s]=o,a)}function St(e,t){return t&&e&&t===e.ownerID?e:new vt(e?e.array.slice():[],t)}function Ct(e,t){if(t>=At(e._capacity))return e._tail;if(t<1<0;)n=n.array[t>>>r&g],r-=m;return n}}function kt(e,t,n){void 0!==t&&(t|=0),void 0!==n&&(n|=0);var r=e.__ownerID||new E,o=e._origin,i=e._capacity,a=o+t,s=void 0===n?i:n<0?i+n:o+n;if(a===o&&s===i)return e;if(a>=s)return e.clear();for(var u=e._level,c=e._root,l=0;a+l<0;)c=new vt(c&&c.array.length?[void 0,c]:[],r),l+=1<<(u+=m);l&&(a+=l,o+=l,s+=l,i+=l);for(var p=At(i),f=At(s);f>=1<p?new vt([],r):h;if(h&&f>p&&am;y-=m){var b=p>>>y&g;v=v.array[b]=St(v.array[b],r)}v.array[p>>>m&g]=h}if(s=f)a-=f,s-=f,u=m,c=null,d=d&&d.removeBefore(r,0,a);else if(a>o||f>>u&g;if(_!==f>>>u&g)break;_&&(l+=(1<o&&(c=c.removeBefore(r,u,a-l)),c&&fi&&(i=c.size),a(u)||(c=c.map(function(e){return pe(e)})),r.push(c)}return i>e.size&&(e=e.setSize(i)),at(e,t,r)}function At(e){return e>>m<=v&&a.size>=2*i.size?(r=(o=a.filter(function(e,t){return void 0!==e&&s!==t})).toKeyedSeq().map(function(e){return e[0]}).flip().toMap(),e.__ownerID&&(r.__ownerID=o.__ownerID=e.__ownerID)):(r=i.remove(t),o=s===a.size-1?a.pop():a.set(s,void 0))}else if(u){if(n===a.get(s)[1])return e;r=i,o=a.set(s,[t,n])}else r=i.set(t,a.size),o=a.set(a.size,[t,n]);return e.__ownerID?(e.size=r.size,e._map=r,e._list=o,e.__hash=void 0,e):It(r,o)}function Nt(e,t){this._iter=e,this._useKeys=t,this.size=e.size}function Rt(e){this._iter=e,this.size=e.size}function Dt(e){this._iter=e,this.size=e.size}function Lt(e){this._iter=e,this.size=e.size}function Ut(e){var t=en(e);return t._iter=e,t.size=e.size,t.flip=function(){return e},t.reverse=function(){var t=e.reverse.apply(this);return t.flip=function(){return e.reverse()},t},t.has=function(t){return e.includes(t)},t.includes=function(t){return e.has(t)},t.cacheResult=tn,t.__iterateUncached=function(t,n){var r=this;return e.__iterate(function(e,n){return!1!==t(n,e,r)},n)},t.__iteratorUncached=function(t,n){if(t===N){var r=e.__iterator(t,n);return new U(function(){var e=r.next();if(!e.done){var t=e.value[0];e.value[0]=e.value[1],e.value[1]=t}return e})}return e.__iterator(t===M?P:M,n)},t}function qt(e,t,n){var r=en(e);return r.size=e.size,r.has=function(t){return e.has(t)},r.get=function(r,o){var i=e.get(r,y);return i===y?o:t.call(n,i,r,e)},r.__iterateUncached=function(r,o){var i=this;return e.__iterate(function(e,o,a){return!1!==r(t.call(n,e,o,a),o,i)},o)},r.__iteratorUncached=function(r,o){var i=e.__iterator(N,o);return new U(function(){var o=i.next();if(o.done)return o;var a=o.value,s=a[0];return q(r,s,t.call(n,a[1],s,e),o)})},r}function Ft(e,t){var n=en(e);return n._iter=e,n.size=e.size,n.reverse=function(){return e},e.flip&&(n.flip=function(){var t=Ut(e);return t.reverse=function(){return e.flip()},t}),n.get=function(n,r){return e.get(t?n:-1-n,r)},n.has=function(n){return e.has(t?n:-1-n)},n.includes=function(t){return e.includes(t)},n.cacheResult=tn,n.__iterate=function(t,n){var r=this;return e.__iterate(function(e,n){return t(e,n,r)},!n)},n.__iterator=function(t,n){return e.__iterator(t,!n)},n}function Bt(e,t,n,r){var o=en(e);return r&&(o.has=function(r){var o=e.get(r,y);return o!==y&&!!t.call(n,o,r,e)},o.get=function(r,o){var i=e.get(r,y);return i!==y&&t.call(n,i,r,e)?i:o}),o.__iterateUncached=function(o,i){var a=this,s=0;return e.__iterate(function(e,i,u){if(t.call(n,e,i,u))return s++,o(e,r?i:s-1,a)},i),s},o.__iteratorUncached=function(o,i){var a=e.__iterator(N,i),s=0;return new U(function(){for(;;){var i=a.next();if(i.done)return i;var u=i.value,c=u[0],l=u[1];if(t.call(n,l,c,e))return q(o,r?c:s++,l,i)}})},o}function zt(e,t,n,r){var o=e.size;if(void 0!==t&&(t|=0),void 0!==n&&(n===1/0?n=o:n|=0),A(t,n,o))return e;var i=T(t,o),a=j(n,o);if(i!=i||a!=a)return zt(e.toSeq().cacheResult(),t,n,r);var s,u=a-i;u==u&&(s=u<0?0:u);var c=en(e);return c.size=0===s?s:e.size&&s||void 0,!r&&oe(e)&&s>=0&&(c.get=function(t,n){return(t=k(this,t))>=0&&ts)return{value:void 0,done:!0};var e=o.next();return r||t===M?e:q(t,u-1,t===P?void 0:e.value[1],e)})},c}function Vt(e,t,n,r){var o=en(e);return o.__iterateUncached=function(o,i){var a=this;if(i)return this.cacheResult().__iterate(o,i);var s=!0,u=0;return e.__iterate(function(e,i,c){if(!s||!(s=t.call(n,e,i,c)))return u++,o(e,r?i:u-1,a)}),u},o.__iteratorUncached=function(o,i){var a=this;if(i)return this.cacheResult().__iterator(o,i);var s=e.__iterator(N,i),u=!0,c=0;return new U(function(){var e,i,l;do{if((e=s.next()).done)return r||o===M?e:q(o,c++,o===P?void 0:e.value[1],e);var p=e.value;i=p[0],l=p[1],u&&(u=t.call(n,l,i,a))}while(u);return o===N?e:q(o,i,l,e)})},o}function Ht(e,t){var n=s(e),o=[e].concat(t).map(function(e){return a(e)?n&&(e=r(e)):e=n?ae(e):se(Array.isArray(e)?e:[e]),e}).filter(function(e){return 0!==e.size});if(0===o.length)return e;if(1===o.length){var i=o[0];if(i===e||n&&s(i)||u(e)&&u(i))return i}var c=new ee(o);return n?c=c.toKeyedSeq():u(e)||(c=c.toSetSeq()),(c=c.flatten(!0)).size=o.reduce(function(e,t){if(void 0!==e){var n=t.size;if(void 0!==n)return e+n}},0),c}function Wt(e,t,n){var r=en(e);return r.__iterateUncached=function(r,o){var i=0,s=!1;return function e(u,c){var l=this;u.__iterate(function(o,u){return(!t||c0}function $t(e,t,r){var o=en(e);return o.size=new ee(r).map(function(e){return e.size}).min(),o.__iterate=function(e,t){for(var n,r=this.__iterator(M,t),o=0;!(n=r.next()).done&&!1!==e(n.value,o++,this););return o},o.__iteratorUncached=function(e,o){var i=r.map(function(e){return e=n(e),V(o?e.reverse():e)}),a=0,s=!1;return new U(function(){var n;return s||(n=i.map(function(e){return e.next()}),s=n.some(function(e){return e.done})),s?{value:void 0,done:!0}:q(e,a++,t.apply(null,n.map(function(e){return e.value})))})},o}function Gt(e,t){return oe(e)?t:e.constructor(t)}function Zt(e){if(e!==Object(e))throw new TypeError("Expected [K, V] tuple: "+e)}function Xt(e){return Le(e.size),C(e)}function Qt(e){return s(e)?r:u(e)?o:i}function en(e){return Object.create((s(e)?K:u(e)?Y:$).prototype)}function tn(){return this._iter.cacheResult?(this._iter.cacheResult(),this.size=this._iter.size,this):J.prototype.cacheResult.call(this)}function nn(e,t){return e>t?1:e=0;n--)t={value:arguments[n],next:t};return this.__ownerID?(this.size=e,this._head=t,this.__hash=void 0,this.__altered=!0,this):An(e,t)},En.prototype.pushAll=function(e){if(0===(e=o(e)).size)return this;Le(e.size);var t=this.size,n=this._head;return e.reverse().forEach(function(e){t++,n={value:e,next:n}}),this.__ownerID?(this.size=t,this._head=n,this.__hash=void 0,this.__altered=!0,this):An(t,n)},En.prototype.pop=function(){return this.slice(1)},En.prototype.unshift=function(){return this.push.apply(this,arguments)},En.prototype.unshiftAll=function(e){return this.pushAll(e)},En.prototype.shift=function(){return this.pop.apply(this,arguments)},En.prototype.clear=function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._head=void 0,this.__hash=void 0,this.__altered=!0,this):Tn()},En.prototype.slice=function(e,t){if(A(e,t,this.size))return this;var n=T(e,this.size);if(j(t,this.size)!==this.size)return we.prototype.slice.call(this,e,t);for(var r=this.size-n,o=this._head;n--;)o=o.next;return this.__ownerID?(this.size=r,this._head=o,this.__hash=void 0,this.__altered=!0,this):An(r,o)},En.prototype.__ensureOwner=function(e){return e===this.__ownerID?this:e?An(this.size,this._head,e,this.__hash):(this.__ownerID=e,this.__altered=!1,this)},En.prototype.__iterate=function(e,t){if(t)return this.reverse().__iterate(e);for(var n=0,r=this._head;r&&!1!==e(r.value,n++,this);)r=r.next;return n},En.prototype.__iterator=function(e,t){if(t)return this.reverse().__iterator(e);var n=0,r=this._head;return new U(function(){if(r){var t=r.value;return r=r.next,q(e,n++,t)}return{value:void 0,done:!0}})},En.isStack=Sn;var Cn,kn="@@__IMMUTABLE_STACK__@@",On=En.prototype;function An(e,t,n,r){var o=Object.create(On);return o.size=e,o._head=t,o.__ownerID=n,o.__hash=r,o.__altered=!1,o}function Tn(){return Cn||(Cn=An(0))}function jn(e,t){var n=function(n){e.prototype[n]=t[n]};return Object.keys(t).forEach(n),Object.getOwnPropertySymbols&&Object.getOwnPropertySymbols(t).forEach(n),e}On[kn]=!0,On.withMutations=ze.withMutations,On.asMutable=ze.asMutable,On.asImmutable=ze.asImmutable,On.wasAltered=ze.wasAltered,n.Iterator=U,jn(n,{toArray:function(){Le(this.size);var e=new Array(this.size||0);return this.valueSeq().__iterate(function(t,n){e[n]=t}),e},toIndexedSeq:function(){return new Rt(this)},toJS:function(){return this.toSeq().map(function(e){return e&&"function"==typeof e.toJS?e.toJS():e}).__toJS()},toJSON:function(){return this.toSeq().map(function(e){return e&&"function"==typeof e.toJSON?e.toJSON():e}).__toJS()},toKeyedSeq:function(){return new Nt(this,!0)},toMap:function(){return Ue(this.toKeyedSeq())},toObject:function(){Le(this.size);var e={};return this.__iterate(function(t,n){e[n]=t}),e},toOrderedMap:function(){return Tt(this.toKeyedSeq())},toOrderedSet:function(){return gn(s(this)?this.valueSeq():this)},toSet:function(){return cn(s(this)?this.valueSeq():this)},toSetSeq:function(){return new Dt(this)},toSeq:function(){return u(this)?this.toIndexedSeq():s(this)?this.toKeyedSeq():this.toSetSeq()},toStack:function(){return En(s(this)?this.valueSeq():this)},toList:function(){return ft(s(this)?this.valueSeq():this)},toString:function(){return"[Iterable]"},__toString:function(e,t){return 0===this.size?e+t:e+" "+this.toSeq().map(this.__toStringMapper).join(", ")+" "+t},concat:function(){var t=e.call(arguments,0);return Gt(this,Ht(this,t))},includes:function(e){return this.some(function(t){return de(t,e)})},entries:function(){return this.__iterator(N)},every:function(e,t){Le(this.size);var n=!0;return this.__iterate(function(r,o,i){if(!e.call(t,r,o,i))return n=!1,!1}),n},filter:function(e,t){return Gt(this,Bt(this,e,t,!0))},find:function(e,t,n){var r=this.findEntry(e,t);return r?r[1]:n},forEach:function(e,t){return Le(this.size),this.__iterate(t?e.bind(t):e)},join:function(e){Le(this.size),e=void 0!==e?""+e:",";var t="",n=!0;return this.__iterate(function(r){n?n=!1:t+=e,t+=null!=r?r.toString():""}),t},keys:function(){return this.__iterator(P)},map:function(e,t){return Gt(this,qt(this,e,t))},reduce:function(e,t,n){var r,o;return Le(this.size),arguments.length<2?o=!0:r=t,this.__iterate(function(t,i,a){o?(o=!1,r=t):r=e.call(n,r,t,i,a)}),r},reduceRight:function(e,t,n){var r=this.toKeyedSeq().reverse();return r.reduce.apply(r,arguments)},reverse:function(){return Gt(this,Ft(this,!0))},slice:function(e,t){return Gt(this,zt(this,e,t,!0))},some:function(e,t){return!this.every(Rn(e),t)},sort:function(e){return Gt(this,Jt(this,e))},values:function(){return this.__iterator(M)},butLast:function(){return this.slice(0,-1)},isEmpty:function(){return void 0!==this.size?0===this.size:!this.some(function(){return!0})},count:function(e,t){return C(e?this.toSeq().filter(e,t):this)},countBy:function(e,t){return function(e,t,n){var r=Ue().asMutable();return e.__iterate(function(o,i){r.update(t.call(n,o,i,e),0,function(e){return e+1})}),r.asImmutable()}(this,e,t)},equals:function(e){return me(this,e)},entrySeq:function(){var e=this;if(e._cache)return new ee(e._cache);var t=e.toSeq().map(Nn).toIndexedSeq();return t.fromEntrySeq=function(){return e.toSeq()},t},filterNot:function(e,t){return this.filter(Rn(e),t)},findEntry:function(e,t,n){var r=n;return this.__iterate(function(n,o,i){if(e.call(t,n,o,i))return r=[o,n],!1}),r},findKey:function(e,t){var n=this.findEntry(e,t);return n&&n[0]},findLast:function(e,t,n){return this.toKeyedSeq().reverse().find(e,t,n)},findLastEntry:function(e,t,n){return this.toKeyedSeq().reverse().findEntry(e,t,n)},findLastKey:function(e,t){return this.toKeyedSeq().reverse().findKey(e,t)},first:function(){return this.find(O)},flatMap:function(e,t){return Gt(this,function(e,t,n){var r=Qt(e);return e.toSeq().map(function(o,i){return r(t.call(n,o,i,e))}).flatten(!0)}(this,e,t))},flatten:function(e){return Gt(this,Wt(this,e,!0))},fromEntrySeq:function(){return new Lt(this)},get:function(e,t){return this.find(function(t,n){return de(n,e)},void 0,t)},getIn:function(e,t){for(var n,r=this,o=rn(e);!(n=o.next()).done;){var i=n.value;if((r=r&&r.get?r.get(i,y):y)===y)return t}return r},groupBy:function(e,t){return function(e,t,n){var r=s(e),o=(l(e)?Tt():Ue()).asMutable();e.__iterate(function(i,a){o.update(t.call(n,i,a,e),function(e){return(e=e||[]).push(r?[a,i]:i),e})});var i=Qt(e);return o.map(function(t){return Gt(e,i(t))})}(this,e,t)},has:function(e){return this.get(e,y)!==y},hasIn:function(e){return this.getIn(e,y)!==y},isSubset:function(e){return e="function"==typeof e.includes?e:n(e),this.every(function(t){return e.includes(t)})},isSuperset:function(e){return(e="function"==typeof e.isSubset?e:n(e)).isSubset(this)},keyOf:function(e){return this.findKey(function(t){return de(t,e)})},keySeq:function(){return this.toSeq().map(Mn).toIndexedSeq()},last:function(){return this.toSeq().reverse().first()},lastKeyOf:function(e){return this.toKeyedSeq().reverse().keyOf(e)},max:function(e){return Kt(this,e)},maxBy:function(e,t){return Kt(this,t,e)},min:function(e){return Kt(this,e?Dn(e):qn)},minBy:function(e,t){return Kt(this,t?Dn(t):qn,e)},rest:function(){return this.slice(1)},skip:function(e){return this.slice(Math.max(0,e))},skipLast:function(e){return Gt(this,this.toSeq().reverse().skip(e).reverse())},skipWhile:function(e,t){return Gt(this,Vt(this,e,t,!0))},skipUntil:function(e,t){return this.skipWhile(Rn(e),t)},sortBy:function(e,t){return Gt(this,Jt(this,t,e))},take:function(e){return this.slice(0,Math.max(0,e))},takeLast:function(e){return Gt(this,this.toSeq().reverse().take(e).reverse())},takeWhile:function(e,t){return Gt(this,function(e,t,n){var r=en(e);return r.__iterateUncached=function(r,o){var i=this;if(o)return this.cacheResult().__iterate(r,o);var a=0;return e.__iterate(function(e,o,s){return t.call(n,e,o,s)&&++a&&r(e,o,i)}),a},r.__iteratorUncached=function(r,o){var i=this;if(o)return this.cacheResult().__iterator(r,o);var a=e.__iterator(N,o),s=!0;return new U(function(){if(!s)return{value:void 0,done:!0};var e=a.next();if(e.done)return e;var o=e.value,u=o[0],c=o[1];return t.call(n,c,u,i)?r===N?e:q(r,u,c,e):(s=!1,{value:void 0,done:!0})})},r}(this,e,t))},takeUntil:function(e,t){return this.takeWhile(Rn(e),t)},valueSeq:function(){return this.toIndexedSeq()},hashCode:function(){return this.__hash||(this.__hash=function(e){if(e.size===1/0)return 0;var t=l(e),n=s(e),r=t?1:0;return function(e,t){return t=Ee(t,3432918353),t=Ee(t<<15|t>>>-15,461845907),t=Ee(t<<13|t>>>-13,5),t=Ee((t=(t+3864292196|0)^e)^t>>>16,2246822507),t=Se((t=Ee(t^t>>>13,3266489909))^t>>>16)}(e.__iterate(n?t?function(e,t){r=31*r+Fn(Ce(e),Ce(t))|0}:function(e,t){r=r+Fn(Ce(e),Ce(t))|0}:t?function(e){r=31*r+Ce(e)|0}:function(e){r=r+Ce(e)|0}),r)}(this))}});var In=n.prototype;In[p]=!0,In[L]=In.values,In.__toJS=In.toArray,In.__toStringMapper=Ln,In.inspect=In.toSource=function(){return this.toString()},In.chain=In.flatMap,In.contains=In.includes,jn(r,{flip:function(){return Gt(this,Ut(this))},mapEntries:function(e,t){var n=this,r=0;return Gt(this,this.toSeq().map(function(o,i){return e.call(t,[i,o],r++,n)}).fromEntrySeq())},mapKeys:function(e,t){var n=this;return Gt(this,this.toSeq().flip().map(function(r,o){return e.call(t,r,o,n)}).flip())}});var Pn=r.prototype;function Mn(e,t){return t}function Nn(e,t){return[t,e]}function Rn(e){return function(){return!e.apply(this,arguments)}}function Dn(e){return function(){return-e.apply(this,arguments)}}function Ln(e){return"string"==typeof e?JSON.stringify(e):String(e)}function Un(){return S(arguments)}function qn(e,t){return et?-1:0}function Fn(e,t){return e^t+2654435769+(e<<6)+(e>>2)|0}return Pn[f]=!0,Pn[L]=In.entries,Pn.__toJS=In.toObject,Pn.__toStringMapper=function(e,t){return JSON.stringify(t)+": "+Ln(e)},jn(o,{toKeyedSeq:function(){return new Nt(this,!1)},filter:function(e,t){return Gt(this,Bt(this,e,t,!1))},findIndex:function(e,t){var n=this.findEntry(e,t);return n?n[0]:-1},indexOf:function(e){var t=this.keyOf(e);return void 0===t?-1:t},lastIndexOf:function(e){var t=this.lastKeyOf(e);return void 0===t?-1:t},reverse:function(){return Gt(this,Ft(this,!1))},slice:function(e,t){return Gt(this,zt(this,e,t,!1))},splice:function(e,t){var n=arguments.length;if(t=Math.max(0|t,0),0===n||2===n&&!t)return this;e=T(e,e<0?this.count():this.size);var r=this.slice(0,e);return Gt(this,1===n?r:r.concat(S(arguments,2),this.slice(e+t)))},findLastIndex:function(e,t){var n=this.findLastEntry(e,t);return n?n[0]:-1},first:function(){return this.get(0)},flatten:function(e){return Gt(this,Wt(this,e,!1))},get:function(e,t){return(e=k(this,e))<0||this.size===1/0||void 0!==this.size&&e>this.size?t:this.find(function(t,n){return n===e},void 0,t)},has:function(e){return(e=k(this,e))>=0&&(void 0!==this.size?this.size===1/0||e5e3)return e.textContent;return function(e){for(var n,r,o,i,a,s=e.textContent,u=0,c=s[0],l=1,p=e.innerHTML="",f=0;r=n,n=f<7&&"\\"==n?1:l;){if(l=c,c=s[++u],i=p.length>1,!l||f>8&&"\n"==l||[/\S/.test(l),1,1,!/[$\w]/.test(l),("/"==n||"\n"==n)&&i,'"'==n&&i,"'"==n&&i,s[u-4]+r+n=="--\x3e",r+n=="*/"][f])for(p&&(e.appendChild(a=t.createElement("span")).setAttribute("style",["color: #555; font-weight: bold;","","","color: #555;",""][f?f<3?2:f>6?4:f>3?3:+/^(a(bstract|lias|nd|rguments|rray|s(m|sert)?|uto)|b(ase|egin|ool(ean)?|reak|yte)|c(ase|atch|har|hecked|lass|lone|ompl|onst|ontinue)|de(bugger|cimal|clare|f(ault|er)?|init|l(egate|ete)?)|do|double|e(cho|ls?if|lse(if)?|nd|nsure|num|vent|x(cept|ec|p(licit|ort)|te(nds|nsion|rn)))|f(allthrough|alse|inal(ly)?|ixed|loat|or(each)?|riend|rom|unc(tion)?)|global|goto|guard|i(f|mp(lements|licit|ort)|n(it|clude(_once)?|line|out|stanceof|t(erface|ernal)?)?|s)|l(ambda|et|ock|ong)|m(icrolight|odule|utable)|NaN|n(amespace|ative|ext|ew|il|ot|ull)|o(bject|perator|r|ut|verride)|p(ackage|arams|rivate|rotected|rotocol|ublic)|r(aise|e(adonly|do|f|gister|peat|quire(_once)?|scue|strict|try|turn))|s(byte|ealed|elf|hort|igned|izeof|tatic|tring|truct|ubscript|uper|ynchronized|witch)|t(emplate|hen|his|hrows?|ransient|rue|ry|ype(alias|def|id|name|of))|u(n(checked|def(ined)?|ion|less|signed|til)|se|sing)|v(ar|irtual|oid|olatile)|w(char_t|hen|here|hile|ith)|xor|yield)$/.test(p):0]),a.appendChild(t.createTextNode(p))),o=f&&f<7?f:o,p="",f=11;![1,/[\/{}[(\-+*=<>:;|\\.,?!&@~]/.test(l),/[\])]/.test(l),/[$\w]/.test(l),"/"==l&&o<2&&"<"!=n,'"'==l,"'"==l,l+c+s[u+1]+s[u+2]=="\x3c!--",l+c=="/*",l+c=="//","#"==l][--f];);p+=l}}(e)}function re(e){var t;if([/filename\*=[^']+'\w*'"([^"]+)";?/i,/filename\*=[^']+'\w*'([^;]+);?/i,/filename="([^;]*);?"/i,/filename=([^;]*);?/i].some(function(n){return null!==(t=n.exec(e))}),null!==t&&t.length>1)try{return decodeURIComponent(t[1])}catch(e){console.error(e)}return null}function oe(e){return t=e.replace(/\.[^.\/]*$/,""),w()(b()(t));var t}var ie=function(e,t){if(e>t)return"Value must be less than Maximum"},ae=function(e,t){if(et)return"Value must be less than MaxLength"},me=function(e,t){if(e.length2&&void 0!==arguments[2]?arguments[2]:{},r=n.isOAS3,o=void 0!==r&&r,i=n.bypassRequiredCheck,a=void 0!==i&&i,s=[],u=e.get("required"),c=Object(R.a)(e,{isOAS3:o}),l=c.schema,p=c.parameterContentMediaType;if(!l)return s;var h=l.get("required"),m=l.get("maximum"),g=l.get("minimum"),y=l.get("type"),b=l.get("format"),_=l.get("maxLength"),w=l.get("minLength"),x=l.get("pattern");if(y&&(u||h||t)){var E="string"===y&&t,S="array"===y&&f()(t)&&t.length,C="array"===y&&v.a.List.isList(t)&&t.count(),k="array"===y&&"string"==typeof t&&t,O="file"===y&&t instanceof P.a.File,A="boolean"===y&&(t||!1===t),T="number"===y&&(t||0===t),j="integer"===y&&(t||0===t),I="object"===y&&"object"===d()(t)&&null!==t,M="object"===y&&"string"==typeof t&&t,N=[E,S,C,k,O,A,T,j,I,M],D=N.some(function(e){return!!e});if((u||h)&&!D&&!a)return s.push("Required field is not provided"),s;if("object"===y&&"string"==typeof t&&(null===p||"application/json"===p))try{JSON.parse(t)}catch(e){return s.push("Parameter string value must be valid JSON"),s}if(x){var L=ve(t,x);L&&s.push(L)}if(_||0===_){var U=de(t,_);U&&s.push(U)}if(w){var q=me(t,w);q&&s.push(q)}if(m||0===m){var F=ie(t,m);F&&s.push(F)}if(g||0===g){var B=ae(t,g);B&&s.push(B)}if("string"===y){var z;if(!(z="date-time"===b?fe(t):"uuid"===b?he(t):pe(t)))return s;s.push(z)}else if("boolean"===y){var V=le(t);if(!V)return s;s.push(V)}else if("number"===y){var H=se(t);if(!H)return s;s.push(H)}else if("integer"===y){var W=ue(t);if(!W)return s;s.push(W)}else if("array"===y){var J;if(!C||!t.count())return s;J=l.getIn(["items","type"]),t.forEach(function(e,t){var n;"number"===J?n=se(e):"integer"===J?n=ue(e):"string"===J&&(n=pe(e)),n&&s.push({index:t,error:n})})}else if("file"===y){var K=ce(t);if(!K)return s;s.push(K)}}return s},ye=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};if(/xml/.test(t)){if(!e.xml||!e.xml.name){if(e.xml=e.xml||{},!e.$$ref)return e.type||e.items||e.properties||e.additionalProperties?'\n\x3c!-- XML example cannot be generated; root element name is undefined --\x3e':null;var r=e.$$ref.match(/\S*\/(\S+)$/);e.xml.name=r[1]}return Object(j.memoizedCreateXMLExample)(e,n)}var i=Object(j.memoizedSampleFromSchema)(e,n);return"object"===d()(i)?o()(i,null,2):i},be=function(){var e={},t=P.a.location.search;if(!t)return{};if(""!=t){var n=t.substr(1).split("&");for(var r in n)n.hasOwnProperty(r)&&(r=n[r].split("="),e[decodeURIComponent(r[0])]=r[1]&&decodeURIComponent(r[1])||"")}return e},_e=function(t){return(t instanceof e?t:new e(t.toString(),"utf-8")).toString("base64")},we={operationsSorter:{alpha:function(e,t){return e.get("path").localeCompare(t.get("path"))},method:function(e,t){return e.get("method").localeCompare(t.get("method"))}},tagsSorter:{alpha:function(e,t){return e.localeCompare(t)}}},xe=function(e){var t=[];for(var n in e){var r=e[n];void 0!==r&&""!==r&&t.push([n,"=",encodeURIComponent(r).replace(/%20/g,"+")].join(""))}return t.join("&")},Ee=function(e,t,n){return!!C()(n,function(n){return O()(e[n],t[n])})};function Se(e){return"string"!=typeof e||""===e?"":Object(g.sanitizeUrl)(e)}function Ce(e){if(!v.a.OrderedMap.isOrderedMap(e))return null;if(!e.size)return null;var t=e.find(function(e,t){return t.startsWith("2")&&u()(e.get("content")||{}).length>0}),n=e.get("default")||v.a.OrderedMap(),r=(n.get("content")||v.a.OrderedMap()).keySeq().toJS().length?n:null;return t||r}var ke=function(e){return"string"==typeof e||e instanceof String?e.trim().replace(/\s/g,"%20"):""},Oe=function(e){return N()(ke(e).replace(/%20/g,"_"))},Ae=function(e){return e.filter(function(e,t){return/^x-/.test(t)})},Te=function(e){return e.filter(function(e,t){return/^pattern|maxLength|minLength|maximum|minimum/.test(t)})};function je(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:function(){return!0};if("object"!==d()(e)||f()(e)||null===e||!t)return e;var r=a()({},e);return u()(r).forEach(function(e){e===t&&n(r[e],e)?delete r[e]:r[e]=je(r[e],t,n)}),r}function Ie(e){if("string"==typeof e)return e;if(e&&e.toJS&&(e=e.toJS()),"object"===d()(e)&&null!==e)try{return o()(e,null,2)}catch(t){return String(e)}return null==e?"":e.toString()}function Pe(e){return"number"==typeof e?e.toString():e}function Me(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.returnAll,r=void 0!==n&&n,o=t.allowHashes,i=void 0===o||o;if(!v.a.Map.isMap(e))throw new Error("paramToIdentifier: received a non-Im.Map parameter as input");var a=e.get("name"),s=e.get("in"),u=[];return e&&e.hashCode&&s&&a&&i&&u.push("".concat(s,".").concat(a,".hash-").concat(e.hashCode())),s&&a&&u.push("".concat(s,".").concat(a)),u.push(a),r?u:u[0]||""}function Ne(e,t){return Me(e,{returnAll:!0}).map(function(e){return t[e]}).filter(function(e){return void 0!==e})[0]}function Re(){return Le(L()(32).toString("base64"))}function De(e){return Le(q()("sha256").update(e).digest("base64"))}function Le(e){return e.replace(/\+/g,"-").replace(/\//g,"_").replace(/=/g,"")}}).call(this,n(66).Buffer)},function(e,t){e.exports=function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}},function(e,t,n){var r=n(54);function o(e,t){for(var n=0;n1?t-1:0),r=1;r1&&void 0!==arguments[1]?arguments[1]:r,n=null,i=null;return function(){return o(t,n,arguments)||(i=e.apply(null,arguments)),n=arguments,i}})},function(e,t,n){var r=n(757),o=n(758),i=n(764);e.exports=function(e){return r(e)||o(e)||i()}},function(e,t,n){var r=n(600),o=n(601),i=n(604);e.exports=function(e,t){return r(e)||o(e,t)||i()}},function(e,t,n){e.exports=n(574)},function(e,t,n){"use strict";var r=function(e){};e.exports=function(e,t,n,o,i,a,s,u){if(r(t),!e){var c;if(void 0===t)c=new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var l=[n,o,i,a,s,u],p=0;(c=new Error(t.replace(/%s/g,function(){return l[p++]}))).name="Invariant Violation"}throw c.framesToPop=1,c}}},function(e,t,n){e.exports=n(558)},function(e,t,n){e.exports=n(578)},function(e,t){e.exports=function(){var e={location:{},history:{},open:function(){},close:function(){},File:function(){}};if("undefined"==typeof window)return e;try{e=window;for(var t=0,n=["File","Blob","FormData"];t>",i={listOf:function(e){return c(e,"List",r.List.isList)},mapOf:function(e,t){return l(e,t,"Map",r.Map.isMap)},orderedMapOf:function(e,t){return l(e,t,"OrderedMap",r.OrderedMap.isOrderedMap)},setOf:function(e){return c(e,"Set",r.Set.isSet)},orderedSetOf:function(e){return c(e,"OrderedSet",r.OrderedSet.isOrderedSet)},stackOf:function(e){return c(e,"Stack",r.Stack.isStack)},iterableOf:function(e){return c(e,"Iterable",r.Iterable.isIterable)},recordOf:function(e){return s(function(t,n,o,i,s){for(var u=arguments.length,c=Array(u>5?u-5:0),l=5;l6?u-6:0),l=6;l5?c-5:0),p=5;p5?i-5:0),s=5;s key("+l[p]+")"].concat(a));if(h instanceof Error)return h}})).apply(void 0,i);var u})}function p(e){var t=void 0===arguments[1]?"Iterable":arguments[1],n=void 0===arguments[2]?r.Iterable.isIterable:arguments[2];return s(function(r,o,i,s,u){for(var c=arguments.length,l=Array(c>5?c-5:0),p=5;p4)}function u(e){var t=e.get("swagger");return"string"==typeof t&&t.startsWith("2.0")}function c(e){return function(t,n){return function(r){return n&&n.specSelectors&&n.specSelectors.specJson?s(n.specSelectors.specJson())?a.a.createElement(e,o()({},r,n,{Ori:t})):a.a.createElement(t,r):(console.warn("OAS3 wrapper: couldn't get spec"),null)}}}},function(e,t,n){"use strict"; +/* +object-assign +(c) Sindre Sorhus +@license MIT +*/var r=Object.getOwnPropertySymbols,o=Object.prototype.hasOwnProperty,i=Object.prototype.propertyIsEnumerable;function a(e){if(null==e)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(e)}e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},r)).join("")}catch(e){return!1}}()?Object.assign:function(e,t){for(var n,s,u=a(e),c=1;c0){var o=n.map(function(e){return console.error(e),e.line=e.fullPath?g(y,e.fullPath):null,e.path=e.fullPath?e.fullPath.join("."):null,e.level="error",e.type="thrown",e.source="resolver",A()(e,"message",{enumerable:!0,value:e.message}),e});i.newThrownErrBatch(o)}return r.updateResolved(t)})}},_e=[],we=V()(k()(S.a.mark(function e(){var t,n,r,o,i,a,s,u,c,l,p,f,h,d,m,v,g;return S.a.wrap(function(e){for(;;)switch(e.prev=e.next){case 0:if(t=_e.system){e.next=4;break}return console.error("debResolveSubtrees: don't have a system to operate on, aborting."),e.abrupt("return");case 4:if(n=t.errActions,r=t.errSelectors,o=t.fn,i=o.resolveSubtree,a=o.AST,s=void 0===a?{}:a,u=t.specSelectors,c=t.specActions,i){e.next=8;break}return console.error("Error: Swagger-Client did not provide a `resolveSubtree` method, doing nothing."),e.abrupt("return");case 8:return l=s.getLineNumberForPath?s.getLineNumberForPath:function(){},p=u.specStr(),f=t.getConfigs(),h=f.modelPropertyMacro,d=f.parameterMacro,m=f.requestInterceptor,v=f.responseInterceptor,e.prev=11,e.next=14,_e.reduce(function(){var e=k()(S.a.mark(function e(t,o){var a,s,c,f,g,y,b;return S.a.wrap(function(e){for(;;)switch(e.prev=e.next){case 0:return e.next=2,t;case 2:return a=e.sent,s=a.resultMap,c=a.specWithCurrentSubtrees,e.next=7,i(c,o,{baseDoc:u.url(),modelPropertyMacro:h,parameterMacro:d,requestInterceptor:m,responseInterceptor:v});case 7:return f=e.sent,g=f.errors,y=f.spec,r.allErrors().size&&n.clearBy(function(e){return"thrown"!==e.get("type")||"resolver"!==e.get("source")||!e.get("fullPath").every(function(e,t){return e===o[t]||void 0===o[t]})}),j()(g)&&g.length>0&&(b=g.map(function(e){return e.line=e.fullPath?l(p,e.fullPath):null,e.path=e.fullPath?e.fullPath.join("."):null,e.level="error",e.type="thrown",e.source="resolver",A()(e,"message",{enumerable:!0,value:e.message}),e}),n.newThrownErrBatch(b)),W()(s,o,y),W()(c,o,y),e.abrupt("return",{resultMap:s,specWithCurrentSubtrees:c});case 15:case"end":return e.stop()}},e)}));return function(t,n){return e.apply(this,arguments)}}(),x.a.resolve({resultMap:(u.specResolvedSubtree([])||Object(R.Map)()).toJS(),specWithCurrentSubtrees:u.specJson().toJS()}));case 14:g=e.sent,delete _e.system,_e=[],e.next=22;break;case 19:e.prev=19,e.t0=e.catch(11),console.error(e.t0);case 22:c.updateResolvedSubtree([],g.resultMap);case 23:case"end":return e.stop()}},e,null,[[11,19]])})),35),xe=function(e){return function(t){_e.map(function(e){return e.join("@@")}).indexOf(e.join("@@"))>-1||(_e.push(e),_e.system=t,we())}};function Ee(e,t,n,r,o){return{type:X,payload:{path:e,value:r,paramName:t,paramIn:n,isXml:o}}}function Se(e,t,n,r){return{type:X,payload:{path:e,param:t,value:n,isXml:r}}}var Ce=function(e,t){return{type:le,payload:{path:e,value:t}}},ke=function(){return{type:le,payload:{path:[],value:Object(R.Map)()}}},Oe=function(e,t){return{type:ee,payload:{pathMethod:e,isOAS3:t}}},Ae=function(e,t,n,r){return{type:Q,payload:{pathMethod:e,paramName:t,paramIn:n,includeEmptyValue:r}}};function Te(e){return{type:se,payload:{pathMethod:e}}}function je(e,t){return{type:ue,payload:{path:e,value:t,key:"consumes_value"}}}function Ie(e,t){return{type:ue,payload:{path:e,value:t,key:"produces_value"}}}var Pe=function(e,t,n){return{payload:{path:e,method:t,res:n},type:te}},Me=function(e,t,n){return{payload:{path:e,method:t,req:n},type:ne}},Ne=function(e,t,n){return{payload:{path:e,method:t,req:n},type:re}},Re=function(e){return{payload:e,type:oe}},De=function(e){return function(t){var n=t.fn,r=t.specActions,o=t.specSelectors,i=t.getConfigs,a=t.oas3Selectors,s=e.pathName,u=e.method,c=e.operation,l=i(),p=l.requestInterceptor,f=l.responseInterceptor,h=c.toJS();if(c&&c.get("parameters")&&c.get("parameters").filter(function(e){return e&&!0===e.get("allowEmptyValue")}).forEach(function(t){if(o.parameterInclusionSettingFor([s,u],t.get("name"),t.get("in"))){e.parameters=e.parameters||{};var n=Object(J.C)(t,e.parameters);(!n||n&&0===n.size)&&(e.parameters[t.get("name")]="")}}),e.contextUrl=L()(o.url()).toString(),h&&h.operationId?e.operationId=h.operationId:h&&s&&u&&(e.operationId=n.opId(h,s,u)),o.isOAS3()){var d="".concat(s,":").concat(u);e.server=a.selectedServer(d)||a.selectedServer();var m=a.serverVariables({server:e.server,namespace:d}).toJS(),g=a.serverVariables({server:e.server}).toJS();e.serverVariables=_()(m).length?m:g,e.requestContentType=a.requestContentType(s,u),e.responseContentType=a.responseContentType(s,u)||"*/*";var b=a.requestBodyValue(s,u);Object(J.t)(b)?e.requestBody=JSON.parse(b):b&&b.toJS?e.requestBody=b.toJS():e.requestBody=b}var w=y()({},e);w=n.buildRequest(w),r.setRequest(e.pathName,e.method,w);e.requestInterceptor=function(t){var n=p.apply(this,[t]),o=y()({},n);return r.setMutatedRequest(e.pathName,e.method,o),n},e.responseInterceptor=f;var x=v()();return n.execute(e).then(function(t){t.duration=v()()-x,r.setResponse(e.pathName,e.method,t)}).catch(function(t){console.error(t),r.setResponse(e.pathName,e.method,{error:!0,err:q()(t)})})}},Le=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.path,n=e.method,r=d()(e,["path","method"]);return function(e){var o=e.fn.fetch,i=e.specSelectors,a=e.specActions,s=i.specJsonWithResolvedSubtrees().toJS(),u=i.operationScheme(t,n),c=i.contentTypeValues([t,n]).toJS(),l=c.requestContentType,p=c.responseContentType,f=/xml/i.test(l),h=i.parameterValues([t,n],f).toJS();return a.executeRequest(Y({},r,{fetch:o,spec:s,pathName:t,method:n,parameters:h,requestContentType:l,scheme:u,responseContentType:p}))}};function Ue(e,t){return{type:ie,payload:{path:e,method:t}}}function qe(e,t){return{type:ae,payload:{path:e,method:t}}}function Fe(e,t,n){return{type:pe,payload:{scheme:e,path:t,method:n}}}},function(e,t){var n=e.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=n)},function(e,t,n){"use strict";var r=n(142),o=["kind","resolve","construct","instanceOf","predicate","represent","defaultStyle","styleAliases"],i=["scalar","sequence","mapping"];e.exports=function(e,t){var n,a;if(t=t||{},Object.keys(t).forEach(function(t){if(-1===o.indexOf(t))throw new r('Unknown option "'+t+'" is met in definition of "'+e+'" YAML type.')}),this.tag=e,this.kind=t.kind||null,this.resolve=t.resolve||function(){return!0},this.construct=t.construct||function(e){return e},this.instanceOf=t.instanceOf||null,this.predicate=t.predicate||null,this.represent=t.represent||null,this.defaultStyle=t.defaultStyle||null,this.styleAliases=(n=t.styleAliases||null,a={},null!==n&&Object.keys(n).forEach(function(e){n[e].forEach(function(t){a[String(t)]=e})}),a),-1===i.indexOf(this.kind))throw new r('Unknown kind "'+this.kind+'" is specified for "'+e+'" YAML type.')}},function(e,t,n){var r=n(199)("wks"),o=n(201),i=n(43).Symbol,a="function"==typeof i;(e.exports=function(e){return r[e]||(r[e]=a&&i[e]||(a?i:o)("Symbol."+e))}).store=r},function(e,t){e.exports=function(e){return"object"==typeof e?null!==e:"function"==typeof e}},function(e,t,n){var r=n(216)("wks"),o=n(163),i=n(31).Symbol,a="function"==typeof i;(e.exports=function(e){return r[e]||(r[e]=a&&i[e]||(a?i:o)("Symbol."+e))}).store=r},function(e,t,n){var r=n(43),o=n(74),i=n(83),a=n(98),s=n(157),u=function(e,t,n){var c,l,p,f,h=e&u.F,d=e&u.G,m=e&u.S,v=e&u.P,g=e&u.B,y=d?r:m?r[t]||(r[t]={}):(r[t]||{}).prototype,b=d?o:o[t]||(o[t]={}),_=b.prototype||(b.prototype={});for(c in d&&(n=t),n)p=((l=!h&&y&&void 0!==y[c])?y:n)[c],f=g&&l?s(p,r):v&&"function"==typeof p?s(Function.call,p):p,y&&a(y,c,p,e&u.U),b[c]!=p&&i(b,c,f),v&&_[c]!=p&&(_[c]=p)};r.core=o,u.F=1,u.G=2,u.S=4,u.P=8,u.B=16,u.W=32,u.U=64,u.R=128,e.exports=u},function(e,t,n){var r=n(34);e.exports=function(e){if(!r(e))throw TypeError(e+" is not an object!");return e}},function(e,t){var n;n=function(){return this}();try{n=n||new Function("return this")()}catch(e){"object"==typeof window&&(n=window)}e.exports=n},function(e,t){var n=Array.isArray;e.exports=n},function(e,t,n){"use strict";var r=!("undefined"==typeof window||!window.document||!window.document.createElement),o={canUseDOM:r,canUseWorkers:"undefined"!=typeof Worker,canUseEventListeners:r&&!(!window.addEventListener&&!window.attachEvent),canUseViewport:r&&!!window.screen,isInWorker:!r};e.exports=o},function(e,t,n){"use strict";var r=Object.prototype.hasOwnProperty;function o(e,t){return!!e&&r.call(e,t)}var i=/\\([\\!"#$%&'()*+,.\/:;<=>?@[\]^_`{|}~-])/g;function a(e){return!(e>=55296&&e<=57343)&&(!(e>=64976&&e<=65007)&&(65535!=(65535&e)&&65534!=(65535&e)&&(!(e>=0&&e<=8)&&(11!==e&&(!(e>=14&&e<=31)&&(!(e>=127&&e<=159)&&!(e>1114111)))))))}function s(e){if(e>65535){var t=55296+((e-=65536)>>10),n=56320+(1023&e);return String.fromCharCode(t,n)}return String.fromCharCode(e)}var u=/&([a-z#][a-z0-9]{1,31});/gi,c=/^#((?:x[a-f0-9]{1,8}|[0-9]{1,8}))/i,l=n(469);function p(e,t){var n=0;return o(l,t)?l[t]:35===t.charCodeAt(0)&&c.test(t)&&a(n="x"===t[1].toLowerCase()?parseInt(t.slice(2),16):parseInt(t.slice(1),10))?s(n):e}var f=/[&<>"]/,h=/[&<>"]/g,d={"&":"&","<":"<",">":">",'"':"""};function m(e){return d[e]}t.assign=function(e){return[].slice.call(arguments,1).forEach(function(t){if(t){if("object"!=typeof t)throw new TypeError(t+"must be object");Object.keys(t).forEach(function(n){e[n]=t[n]})}}),e},t.isString=function(e){return"[object String]"===function(e){return Object.prototype.toString.call(e)}(e)},t.has=o,t.unescapeMd=function(e){return e.indexOf("\\")<0?e:e.replace(i,"$1")},t.isValidEntityCode=a,t.fromCodePoint=s,t.replaceEntities=function(e){return e.indexOf("&")<0?e:e.replace(u,p)},t.escapeHtml=function(e){return f.test(e)?e.replace(h,m):e}},function(e,t,n){var r=n(56),o=n(773);e.exports=function(e,t){if(null==e)return{};var n,i,a=o(e,t);if(r){var s=r(e);for(i=0;i=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}},function(e,t){var n=e.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=n)},function(e,t,n){var r=n(36),o=n(100),i=n(75),a=/"/g,s=function(e,t,n,r){var o=String(i(e)),s="<"+t;return""!==n&&(s+=" "+n+'="'+String(r).replace(a,""")+'"'),s+">"+o+""};e.exports=function(e,t){var n={};n[e]=t(s),r(r.P+r.F*o(function(){var t=""[e]('"');return t!==t.toLowerCase()||t.split('"').length>3}),"String",n)}},function(e,t,n){"use strict";n.r(t),n.d(t,"NEW_THROWN_ERR",function(){return i}),n.d(t,"NEW_THROWN_ERR_BATCH",function(){return a}),n.d(t,"NEW_SPEC_ERR",function(){return s}),n.d(t,"NEW_SPEC_ERR_BATCH",function(){return u}),n.d(t,"NEW_AUTH_ERR",function(){return c}),n.d(t,"CLEAR",function(){return l}),n.d(t,"CLEAR_BY",function(){return p}),n.d(t,"newThrownErr",function(){return f}),n.d(t,"newThrownErrBatch",function(){return h}),n.d(t,"newSpecErr",function(){return d}),n.d(t,"newSpecErrBatch",function(){return m}),n.d(t,"newAuthErr",function(){return v}),n.d(t,"clear",function(){return g}),n.d(t,"clearBy",function(){return y});var r=n(122),o=n.n(r),i="err_new_thrown_err",a="err_new_thrown_err_batch",s="err_new_spec_err",u="err_new_spec_err_batch",c="err_new_auth_err",l="err_clear",p="err_clear_by";function f(e){return{type:i,payload:o()(e)}}function h(e){return{type:a,payload:e}}function d(e){return{type:s,payload:e}}function m(e){return{type:u,payload:e}}function v(e){return{type:c,payload:e}}function g(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return{type:l,payload:e}}function y(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:function(){return!0};return{type:p,payload:e}}},function(e,t,n){var r=n(99);e.exports=function(e){if(!r(e))throw TypeError(e+" is not an object!");return e}},function(e,t){"function"==typeof Object.create?e.exports=function(e,t){e.super_=t,e.prototype=Object.create(t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}})}:e.exports=function(e,t){e.super_=t;var n=function(){};n.prototype=t.prototype,e.prototype=new n,e.prototype.constructor=e}},function(e,t,n){var r=n(66),o=r.Buffer;function i(e,t){for(var n in e)t[n]=e[n]}function a(e,t,n){return o(e,t,n)}o.from&&o.alloc&&o.allocUnsafe&&o.allocUnsafeSlow?e.exports=r:(i(r,t),t.Buffer=a),i(o,a),a.from=function(e,t,n){if("number"==typeof e)throw new TypeError("Argument must not be a number");return o(e,t,n)},a.alloc=function(e,t,n){if("number"!=typeof e)throw new TypeError("Argument must be a number");var r=o(e);return void 0!==t?"string"==typeof n?r.fill(t,n):r.fill(t):r.fill(0),r},a.allocUnsafe=function(e){if("number"!=typeof e)throw new TypeError("Argument must be a number");return o(e)},a.allocUnsafeSlow=function(e){if("number"!=typeof e)throw new TypeError("Argument must be a number");return r.SlowBuffer(e)}},function(e,t,n){var r=n(37),o=n(353),i=n(220),a=Object.defineProperty;t.f=n(50)?Object.defineProperty:function(e,t,n){if(r(e),t=i(t,!0),r(n),o)try{return a(e,t,n)}catch(e){}if("get"in n||"set"in n)throw TypeError("Accessors not supported!");return"value"in n&&(e[t]=n.value),e}},function(e,t,n){e.exports=!n(78)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},function(e,t,n){var r=n(371),o="object"==typeof self&&self&&self.Object===Object&&self,i=r||o||Function("return this")();e.exports=i},function(e,t){e.exports=function(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)}},function(e,t,n){"use strict";e.exports={debugTool:null}},function(e,t,n){e.exports=n(576)},function(e,t,n){e.exports=n(770)},function(e,t,n){e.exports=n(772)},function(e,t,n){e.exports=function(e){var t={};function n(r){if(t[r])return t[r].exports;var o=t[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,n),o.l=!0,o.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)n.d(r,o,function(t){return e[t]}.bind(null,o));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=57)}([function(e,t){e.exports=n(16)},function(e,t){e.exports=n(14)},function(e,t){e.exports=n(27)},function(e,t){e.exports=n(17)},function(e,t){e.exports=n(61)},function(e,t){e.exports=n(55)},function(e,t){e.exports=n(56)},function(e,t){e.exports=n(126)},function(e,t){e.exports=n(2)},function(e,t){e.exports=n(54)},function(e,t){e.exports=n(95)},function(e,t){e.exports=n(29)},function(e,t){e.exports=n(931)},function(e,t){e.exports=n(12)},function(e,t){e.exports=n(193)},function(e,t){e.exports=n(937)},function(e,t){e.exports=n(94)},function(e,t){e.exports=n(121)},function(e,t){e.exports=n(410)},function(e,t){e.exports=n(194)},function(e,t){e.exports=n(4)},function(e,t){e.exports=n(5)},function(e,t){e.exports=n(13)},function(e,t){e.exports=n(940)},function(e,t){e.exports=n(944)},function(e,t){e.exports=n(945)},function(e,t){e.exports=n(947)},function(e,t){e.exports=n(150)},function(e,t){e.exports=n(80)},function(e,t){e.exports=n(354)},function(e,t){e.exports=n(358)},function(e,t){e.exports=n(951)},function(e,t){e.exports=n(6)},function(e,t){e.exports=n(427)},function(e,t){e.exports=n(954)},function(e,t){e.exports=n(955)},function(e,t){e.exports=n(52)},function(e,t){e.exports=n(66)},function(e,t){e.exports=n(956)},function(e,t){e.exports=n(286)},function(e,t){e.exports=n(960)},function(e,t){e.exports=n(7)},function(e,t){e.exports=n(8)},function(e,t){e.exports=n(965)},function(e,t){e.exports=n(275)},function(e,t){e.exports=n(966)},function(e,t){e.exports=n(149)},function(e,t){e.exports=n(967)},function(e,t){e.exports=n(975)},function(e,t){e.exports=n(976)},function(e,t){e.exports=n(977)},function(e,t){e.exports=n(42)},function(e,t){e.exports=n(267)},function(e,t){e.exports=n(39)},function(e,t){e.exports=n(980)},function(e,t){e.exports=n(981)},function(e,t){e.exports=n(982)},function(e,t,n){e.exports=n(61)},function(e,t){e.exports=n(983)},function(e,t){e.exports=n(984)},function(e,t){e.exports=n(985)},function(e,t,n){"use strict";n.r(t);var r={};n.r(r),n.d(r,"path",function(){return Jn}),n.d(r,"query",function(){return Kn}),n.d(r,"header",function(){return $n}),n.d(r,"cookie",function(){return Gn});var o=n(9),i=n.n(o),a=n(10),s=n.n(a),u=n(4),c=n.n(u),l=n(5),p=n.n(l),f=n(6),h=n.n(f),d=n(0),m=n.n(d),v=n(8),g=n.n(v),y=(n(58),n(15)),b=n.n(y),_=n(25),w=n.n(_),x=n(12),E=n.n(x),S=n(7),C=n.n(S),k=n(38),O=n.n(k),A=n(1),T=n.n(A),j=n(11),I=n.n(j),P=n(2),M=n.n(P),N=n(22),R=n.n(N),D=n(18),L=n.n(D),U=n(19),q=n.n(U),F=(n(59),n(31)),B=n.n(F),z=n(27),V=n.n(z),H=n(39),W=n.n(H),J=n(17),K=n.n(J),Y=n(26),$=n.n(Y),G=n(20),Z=n.n(G),X=n(21),Q=n.n(X),ee=n(40),te=n.n(ee),ne=n(41),re=n.n(ne),oe=n(32),ie=n.n(oe),ae=n(42),se=n.n(ae),ue=n(43);function ce(){if("undefined"==typeof Reflect||!$.a)return!1;if($.a.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call($()(Date,[],function(){})),!0}catch(e){return!1}}var le=function(e){var t=function(e,t){return{name:e,value:t}};return K()(e.prototype.set)||K()(e.prototype.get)||K()(e.prototype.getAll)||K()(e.prototype.has)?e:function(e){se()(o,e);var n,r=(n=o,function(){var e,t=ie()(n);if(ce()){var r=ie()(this).constructor;e=$()(t,arguments,r)}else e=t.apply(this,arguments);return re()(this,e)});function o(e){var t;return Z()(this,o),(t=r.call(this,e))._entryList=[],t}return Q()(o,[{key:"append",value:function(e,n,r){return this._entryList.push(t(e,n)),te()(ie()(o.prototype),"append",this).call(this,e,n,r)}},{key:"set",value:function(e,n){var r=t(e,n);this._entryList=this._entryList.filter(function(t){return t.name!==e}),this._entryList.push(r)}},{key:"get",value:function(e){var t=this._entryList.find(function(t){return t.name===e});return void 0===t?null:t}},{key:"getAll",value:function(e){return this._entryList.filter(function(t){return t.name===e}).map(function(e){return e.value})}},{key:"has",value:function(e){return this._entryList.some(function(t){return t.name===e})}}]),o}(e)}(n.n(ue).a);function pe(e,t){var n=m()(e);if(h.a){var r=h()(e);t&&(r=r.filter(function(t){return p()(e,t).enumerable})),n.push.apply(n,r)}return n}var fe={serializeRes:ve,mergeInQueryOrForm:xe};function he(e){return de.apply(this,arguments)}function de(){return(de=q()(C.a.mark(function e(t){var n,r,o,i,a,s=arguments;return C.a.wrap(function(e){for(;;)switch(e.prev=e.next){case 0:if(n=s.length>1&&void 0!==s[1]?s[1]:{},"object"===M()(t)&&(t=(n=t).url),n.headers=n.headers||{},fe.mergeInQueryOrForm(n),n.headers&&m()(n.headers).forEach(function(e){var t=n.headers[e];"string"==typeof t&&(n.headers[e]=t.replace(/\n+/g," "))}),!n.requestInterceptor){e.next=12;break}return e.next=8,n.requestInterceptor(n);case 8:if(e.t0=e.sent,e.t0){e.next=11;break}e.t0=n;case 11:n=e.t0;case 12:return r=n.headers["content-type"]||n.headers["Content-Type"],/multipart\/form-data/i.test(r)&&(delete n.headers["content-type"],delete n.headers["Content-Type"]),e.prev=14,e.next=17,(n.userFetch||fetch)(n.url,n);case 17:return o=e.sent,e.next=20,fe.serializeRes(o,t,n);case 20:if(o=e.sent,!n.responseInterceptor){e.next=28;break}return e.next=24,n.responseInterceptor(o);case 24:if(e.t1=e.sent,e.t1){e.next=27;break}e.t1=o;case 27:o=e.t1;case 28:e.next=38;break;case 30:if(e.prev=30,e.t2=e.catch(14),o){e.next=34;break}throw e.t2;case 34:throw(i=new Error(o.statusText)).statusCode=i.status=o.status,i.responseError=e.t2,i;case 38:if(o.ok){e.next=43;break}throw(a=new Error(o.statusText)).statusCode=a.status=o.status,a.response=o,a;case 43:return e.abrupt("return",o);case 44:case"end":return e.stop()}},e,null,[[14,30]])}))).apply(this,arguments)}var me=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return/(json|xml|yaml|text)\b/.test(e)};function ve(e,t){var n=(arguments.length>2&&void 0!==arguments[2]?arguments[2]:{}).loadSpec,r=void 0!==n&&n,o={ok:e.ok,url:e.url||t,status:e.status,statusText:e.statusText,headers:ge(e.headers)},i=o.headers["content-type"],a=r||me(i);return(a?e.text:e.blob||e.buffer).call(e).then(function(e){if(o.text=e,o.data=e,a)try{var t=function(e,t){return t&&(0===t.indexOf("application/json")||t.indexOf("+json")>0)?JSON.parse(e):V.a.safeLoad(e)}(e,i);o.body=t,o.obj=t}catch(e){o.parseError=e}return o})}function ge(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return K()(e.entries)?L()(e.entries()).reduce(function(e,t){var n=R()(t,2),r=n[0],o=n[1];return e[r]=function(e){return e.includes(", ")?e.split(", "):e}(o),e},{}):{}}function ye(e,t){return t||"undefined"==typeof navigator||(t=navigator),t&&"ReactNative"===t.product?!(!e||"object"!==M()(e)||"string"!=typeof e.uri):"undefined"!=typeof File?e instanceof File:null!==e&&"object"===M()(e)&&"function"==typeof e.pipe}function be(e,t){var n=e.collectionFormat,r=e.allowEmptyValue,o="object"===M()(e)?e.value:e;if(void 0===o&&r)return"";if(ye(o)||"boolean"==typeof o)return o;var i=encodeURIComponent;return t&&(i=W()(o)?function(e){return e}:function(e){return I()(e)}),"object"!==M()(o)||T()(o)?T()(o)?T()(o)&&!n?o.map(i).join(","):"multi"===n?o.map(i):o.map(i).join({csv:",",ssv:"%20",tsv:"%09",pipes:"|"}[n]):i(o):""}function _e(e){return O()(e).reduce(function(e,t){var n=R()(t,2),r=n[0],o=n[1];return o.collectionFormat||o.isOAS3formatArray?o.value.forEach(function(t){return e.append(r,be(function(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{},t=e.url,n=void 0===t?"":t,r=e.query,o=e.form;if(o){var i=m()(o).some(function(e){return ye(o[e].value)}),a=e.headers["content-type"]||e.headers["Content-Type"];i||/multipart\/form-data/i.test(a)?e.body=_e(e.form):e.body=we(o),delete e.form}if(r){var s=n.split("?"),u=R()(s,2),c=u[0],l=u[1],p="";if(l){var f=B.a.parse(l);m()(r).forEach(function(e){return delete f[e]}),p=B.a.stringify(f,{encode:!0})}var h=function(){for(var e=arguments.length,t=new Array(e),n=0;n0){var o=t(e,n[n.length-1],n);o&&(r=r.concat(o))}if(T()(e)){var i=e.map(function(e,r){return Xe(e,t,n.concat(r))});i&&(r=r.concat(i))}else if(nt(e)){var a=m()(e).map(function(r){return Xe(e[r],t,n.concat(r))});a&&(r=r.concat(a))}return r=et(r)}function Qe(e){return T()(e)?e:[e]}function et(e){var t;return(t=[]).concat.apply(t,Fe()(e.map(function(e){return T()(e)?et(e):e})))}function tt(e){return e.filter(function(e){return void 0!==e})}function nt(e){return e&&"object"===M()(e)}function rt(e){return e&&"function"==typeof e}function ot(e){if(st(e)){var t=e.op;return"add"===t||"remove"===t||"replace"===t}return!1}function it(e){return ot(e)||st(e)&&"mutation"===e.type}function at(e){return it(e)&&("add"===e.op||"replace"===e.op||"merge"===e.op||"mergeDeep"===e.op)}function st(e){return e&&"object"===M()(e)}function ut(e,t){try{return ze.a.getValueByPointer(e,t)}catch(e){return console.error(e),{}}}var ct=n(47),lt=n.n(ct),pt=n(48),ft=n(35),ht=n.n(ft);function dt(e,t){function n(){Error.captureStackTrace?Error.captureStackTrace(this,this.constructor):this.stack=(new Error).stack;for(var e=arguments.length,n=new Array(e),r=0;r-1&&-1===yt.indexOf(n)||bt.indexOf(r)>-1||_t.some(function(e){return r.indexOf(e)>-1})}function xt(e,t){var n=e.split("#"),r=R()(n,2),o=r[0],i=r[1],a=E.a.resolve(o||"",t||"");return i?"".concat(a,"#").concat(i):a}var Et="application/json, application/yaml",St=new RegExp("^([a-z]+://|//)","i"),Ct=dt("JSONRefError",function(e,t,n){this.originalError=n,Ne()(this,t||{})}),kt={},Ot=new lt.a,At=[function(e){return"paths"===e[0]&&"responses"===e[3]&&"content"===e[5]&&"example"===e[7]},function(e){return"paths"===e[0]&&"requestBody"===e[3]&&"content"===e[4]&&"example"===e[6]}],Tt={key:"$ref",plugin:function(e,t,n,r){var o=r.getInstance(),i=n.slice(0,-1);if(!wt(i)&&(a=i,!At.some(function(e){return e(a)}))){var a,s=r.getContext(n).baseDoc;if("string"!=typeof e)return new Ct("$ref: must be a string (JSON-Ref)",{$ref:e,baseDoc:s,fullPath:n});var u,c,l,p=Nt(e),f=p[0],h=p[1]||"";try{u=s||f?Pt(f,s):null}catch(t){return Mt(t,{pointer:h,$ref:e,basePath:u,fullPath:n})}if(function(e,t,n,r){var o=Ot.get(r);o||(o={},Ot.set(r,o));var i=function(e){if(0===e.length)return"";return"/".concat(e.map(Ft).join("/"))}(n),a="".concat(t||"","#").concat(e),s=i.replace(/allOf\/\d+\/?/g,""),u=r.contextTree.get([]).baseDoc;if(t==u&&zt(s,e))return!0;var c="";if(n.some(function(e){return c="".concat(c,"/").concat(Ft(e)),o[c]&&o[c].some(function(e){return zt(e,a)||zt(a,e)})}))return!0;o[s]=(o[s]||[]).concat(a)}(h,u,i,r)&&!o.useCircularStructures){var d=xt(e,u);return e===d?null:Ke.replace(n,d)}if(null==u?(l=Ut(h),void 0===(c=r.get(l))&&(c=new Ct("Could not resolve reference: ".concat(e),{pointer:h,$ref:e,baseDoc:s,fullPath:n}))):c=null!=(c=Rt(u,h)).__value?c.__value:c.catch(function(t){throw Mt(t,{pointer:h,$ref:e,baseDoc:s,fullPath:n})}),c instanceof Error)return[Ke.remove(n),c];var v=xt(e,u),g=Ke.replace(i,c,{$$ref:v});if(u&&u!==s)return[g,Ke.context(i,{baseDoc:u})];try{if(!function(e,t){var n=[e];return t.path.reduce(function(e,t){return n.push(e[t]),e[t]},e),function e(t){return Ke.isObject(t)&&(n.indexOf(t)>=0||m()(t).some(function(n){return e(t[n])}))}(t.value)}(r.state,g)||o.useCircularStructures)return g}catch(e){return null}}}},jt=Ne()(Tt,{docCache:kt,absoluteify:Pt,clearCache:function(e){void 0!==e?delete kt[e]:m()(kt).forEach(function(e){delete kt[e]})},JSONRefError:Ct,wrapError:Mt,getDoc:Dt,split:Nt,extractFromDoc:Rt,fetchJSON:function(e){return Object(pt.fetch)(e,{headers:{Accept:Et},loadSpec:!0}).then(function(e){return e.text()}).then(function(e){return V.a.safeLoad(e)})},extract:Lt,jsonPointerToArray:Ut,unescapeJsonPointerToken:qt}),It=jt;function Pt(e,t){if(!St.test(e)){if(!t)throw new Ct("Tried to resolve a relative URL, without having a basePath. path: '".concat(e,"' basePath: '").concat(t,"'"));return E.a.resolve(t,e)}return e}function Mt(e,t){var n;return n=e&&e.response&&e.response.body?"".concat(e.response.body.code," ").concat(e.response.body.message):e.message,new Ct("Could not resolve reference: ".concat(n),t,e)}function Nt(e){return(e+"").split("#")}function Rt(e,t){var n=kt[e];if(n&&!Ke.isPromise(n))try{var r=Lt(t,n);return Ne()(je.a.resolve(r),{__value:r})}catch(e){return je.a.reject(e)}return Dt(e).then(function(e){return Lt(t,e)})}function Dt(e){var t=kt[e];return t?Ke.isPromise(t)?t:je.a.resolve(t):(kt[e]=jt.fetchJSON(e).then(function(t){return kt[e]=t,t}),kt[e])}function Lt(e,t){var n=Ut(e);if(n.length<1)return t;var r=Ke.getIn(t,n);if(void 0===r)throw new Ct("Could not resolve pointer: ".concat(e," does not exist in document"),{pointer:e});return r}function Ut(e){if("string"!=typeof e)throw new TypeError("Expected a string, got a ".concat(M()(e)));return"/"===e[0]&&(e=e.substr(1)),""===e?[]:e.split("/").map(qt)}function qt(e){return"string"!=typeof e?e:ht.a.unescape(e.replace(/~1/g,"/").replace(/~0/g,"~"))}function Ft(e){return ht.a.escape(e.replace(/~/g,"~0").replace(/\//g,"~1"))}var Bt=function(e){return!e||"/"===e||"#"===e};function zt(e,t){if(Bt(t))return!0;var n=e.charAt(t.length),r=t.slice(-1);return 0===e.indexOf(t)&&(!n||"/"===n||"#"===n)&&"#"!==r}var Vt={key:"allOf",plugin:function(e,t,n,r,o){if(!o.meta||!o.meta.$$ref){var i=n.slice(0,-1);if(!wt(i)){if(!T()(e)){var a=new TypeError("allOf must be an array");return a.fullPath=n,a}var s=!1,u=o.value;i.forEach(function(e){u&&(u=u[e])}),delete(u=Ne()({},u)).allOf;var c=[];return c.push(r.replace(i,{})),e.forEach(function(e,t){if(!r.isObject(e)){if(s)return null;s=!0;var o=new TypeError("Elements in allOf must be objects");return o.fullPath=n,c.push(o)}c.push(r.mergeDeep(i,e));var a=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=n.specmap,o=n.getBaseUrlForNodePath,i=void 0===o?function(e){return r.getContext([].concat(Fe()(t),Fe()(e))).baseDoc}:o,a=n.targetKeys,s=void 0===a?["$ref","$$ref"]:a,u=[];return vt()(e).forEach(function(){if(s.indexOf(this.key)>-1){var e=this.path,n=t.concat(this.path),o=xt(this.node,i(e));u.push(r.replace(n,o))}}),u}(e,n.slice(0,-1),{getBaseUrlForNodePath:function(e){return r.getContext([].concat(Fe()(n),[t],Fe()(e))).baseDoc},specmap:r});c.push.apply(c,Fe()(a))}),c.push(r.mergeDeep(i,u)),u.$$ref||c.push(r.remove([].concat(i,"$$ref"))),c}}}},Ht={key:"parameters",plugin:function(e,t,n,r,o){if(T()(e)&&e.length){var i=Ne()([],e),a=n.slice(0,-1),s=Ne()({},Ke.getIn(r.spec,a));return e.forEach(function(e,t){try{i[t].default=r.parameterMacro(s,e)}catch(e){var o=new Error(e);return o.fullPath=n,o}}),Ke.replace(n,i)}return Ke.replace(n,e)}},Wt={key:"properties",plugin:function(e,t,n,r){var o=Ne()({},e);for(var i in e)try{o[i].default=r.modelPropertyMacro(o[i])}catch(e){var a=new Error(e);return a.fullPath=n,a}return Ke.replace(n,o)}};function Jt(e,t){var n=m()(e);if(h.a){var r=h()(e);t&&(r=r.filter(function(t){return p()(e,t).enumerable})),n.push.apply(n,r)}return n}var Kt=function(){function e(t){Z()(this,e),this.root=Yt(t||{})}return Q()(e,[{key:"set",value:function(e,t){var n=this.getParent(e,!0);if(n){var r=e[e.length-1],o=n.children;o[r]?$t(o[r],t,n):o[r]=Yt(t,n)}else $t(this.root,t,null)}},{key:"get",value:function(e){if((e=e||[]).length<1)return this.root.value;for(var t,n,r=this.root,o=0;o=e.length?{done:!0}:{done:!1,value:e[t++]}},e:function(e){throw e},f:n}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var r,o,i=!0,a=!1;return{s:function(){r=Se()(e)},n:function(){var e=r.next();return i=e.done,e},e:function(e){a=!0,o=e},f:function(){try{i||null==r.return||r.return()}finally{if(a)throw o}}}}function Zt(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n1?n-1:0),o=1;o1?n-1:0),o=1;o0})}},{key:"nextPromisedPatch",value:function(){if(this.promisedPatches.length>0)return je.a.race(this.promisedPatches.map(function(e){return e.value}))}},{key:"getPluginHistory",value:function(e){var t=this.getPluginName(e);return this.pluginHistory[t]||[]}},{key:"getPluginRunCount",value:function(e){return this.getPluginHistory(e).length}},{key:"getPluginHistoryTip",value:function(e){var t=this.getPluginHistory(e);return t&&t[t.length-1]||{}}},{key:"getPluginMutationIndex",value:function(e){var t=this.getPluginHistoryTip(e).mutationIndex;return"number"!=typeof t?-1:t}},{key:"getPluginName",value:function(e){return e.pluginName}},{key:"updatePluginHistory",value:function(e,t){var n=this.getPluginName(e);(this.pluginHistory[n]=this.pluginHistory[n]||[]).push(t)}},{key:"updatePatches",value:function(e,t){var n=this;Ke.normalizeArray(e).forEach(function(e){if(e instanceof Error)n.errors.push(e);else try{if(!Ke.isObject(e))return void n.debug("updatePatches","Got a non-object patch",e);if(n.showDebug&&n.allPatches.push(e),Ke.isPromise(e.value))return n.promisedPatches.push(e),void n.promisedPatchThen(e);if(Ke.isContextPatch(e))return void n.setContext(e.path,e.value);if(Ke.isMutation(e))return void n.updateMutations(e)}catch(e){console.error(e),n.errors.push(e)}})}},{key:"updateMutations",value:function(e){"object"===M()(e.value)&&!T()(e.value)&&this.allowMetaPatches&&(e.value=Ne()({},e.value));var t=Ke.applyPatch(this.state,e,{allowMetaPatches:this.allowMetaPatches});t&&(this.mutations.push(e),this.state=t)}},{key:"removePromisedPatch",value:function(e){var t=this.promisedPatches.indexOf(e);t<0?this.debug("Tried to remove a promisedPatch that isn't there!"):this.promisedPatches.splice(t,1)}},{key:"promisedPatchThen",value:function(e){var t=this;return e.value=e.value.then(function(n){var r=Ne()({},e,{value:n});t.removePromisedPatch(e),t.updatePatches(r)}).catch(function(n){t.removePromisedPatch(e),t.updatePatches(n)})}},{key:"getMutations",value:function(e,t){return e=e||0,"number"!=typeof t&&(t=this.mutations.length),this.mutations.slice(e,t)}},{key:"getCurrentMutations",value:function(){return this.getMutationsForPlugin(this.getCurrentPlugin())}},{key:"getMutationsForPlugin",value:function(e){var t=this.getPluginMutationIndex(e);return this.getMutations(t+1)}},{key:"getCurrentPlugin",value:function(){return this.currentPlugin}},{key:"getPatchesOfType",value:function(e,t){return e.filter(t)}},{key:"getLib",value:function(){return this.libMethods}},{key:"_get",value:function(e){return Ke.getIn(this.state,e)}},{key:"_getContext",value:function(e){return this.contextTree.get(e)}},{key:"setContext",value:function(e,t){return this.contextTree.set(e,t)}},{key:"_hasRun",value:function(e){return this.getPluginRunCount(this.getCurrentPlugin())>(e||0)}},{key:"_clone",value:function(e){return JSON.parse(I()(e))}},{key:"dispatch",value:function(){var e=this,t=this,n=this.nextPlugin();if(!n){var r=this.nextPromisedPatch();if(r)return r.then(function(){return e.dispatch()}).catch(function(){return e.dispatch()});var o={spec:this.state,errors:this.errors};return this.showDebug&&(o.patches=this.allPatches),je.a.resolve(o)}if(t.pluginCount=t.pluginCount||{},t.pluginCount[n]=(t.pluginCount[n]||0)+1,t.pluginCount[n]>100)return je.a.resolve({spec:t.state,errors:t.errors.concat(new Error("We've reached a hard limit of ".concat(100," plugin runs")))});if(n!==this.currentPlugin&&this.promisedPatches.length){var i=this.promisedPatches.map(function(e){return e.value});return je.a.all(i.map(function(e){return e.then(Ue.a,Ue.a)})).then(function(){return e.dispatch()})}return function(){t.currentPlugin=n;var e=t.getCurrentMutations(),r=t.mutations.length-1;try{if(n.isGenerator){var o,i=Gt(n(e,t.getLib()));try{for(i.s();!(o=i.n()).done;){a(o.value)}}catch(e){i.e(e)}finally{i.f()}}else{a(n(e,t.getLib()))}}catch(e){console.error(e),a([Ne()(Pe()(e),{plugin:n})])}finally{t.updatePluginHistory(n,{mutationIndex:r})}return t.dispatch()}();function a(e){e&&(e=Ke.fullyNormalizeArray(e),t.updatePatches(e,n))}}}]),e}();var Qt={refs:It,allOf:Vt,parameters:Ht,properties:Wt},en=n(36),tn=n.n(en);function nn(e){if(void 0===Ae.a||null==e[ke.a]){if(T()(e)||(e=function(e,t){if(!e)return;if("string"==typeof e)return rn(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);"Object"===n&&e.constructor&&(n=e.constructor.name);if("Map"===n||"Set"===n)return L()(n);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return rn(e,t)}(e))){var t=0,n=function(){};return{s:n,n:function(){return t>=e.length?{done:!0}:{done:!1,value:e[t++]}},e:function(e){throw e},f:n}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var r,o,i=!0,a=!1;return{s:function(){r=Se()(e)},n:function(){var e=r.next();return i=e.done,e},e:function(e){a=!0,o=e},f:function(){try{i||null==r.return||r.return()}finally{if(a)throw o}}}}function rn(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n2&&void 0!==arguments[2]?arguments[2]:"",r=(arguments.length>3&&void 0!==arguments[3]?arguments[3]:{}).v2OperationIdCompatibilityMode;return e&&"object"===M()(e)?(e.operationId||"").replace(/\s/g,"").length?an(e.operationId):function(e,t){if((arguments.length>2&&void 0!==arguments[2]?arguments[2]:{}).v2OperationIdCompatibilityMode){var n="".concat(t.toLowerCase(),"_").concat(e).replace(/[\s!@#$%^&*()_+=[{\]};:<>|.\/?,\\'""-]/g,"_");return(n=n||"".concat(e.substring(1),"_").concat(t)).replace(/((_){2,})/g,"_").replace(/^(_)*/g,"").replace(/([_])*$/g,"")}return"".concat(on(t)).concat(an(e))}(t,n,{v2OperationIdCompatibilityMode:r}):null}function cn(e,t){return"".concat(on(t),"-").concat(e)}function ln(e,t){return e&&e.paths?function(e,t){return pn(e,t,!0)||null}(e,function(e){var n=e.pathName,r=e.method,o=e.operation;if(!o||"object"!==M()(o))return!1;var i=o.operationId;return[un(o,n,r),cn(n,r),i].some(function(e){return e&&e===t})}):null}function pn(e,t,n){if(!e||"object"!==M()(e)||!e.paths||"object"!==M()(e.paths))return null;var r=e.paths;for(var o in r)for(var i in r[o])if("PARAMETERS"!==i.toUpperCase()){var a=r[o][i];if(a&&"object"===M()(a)){var s={spec:e,pathName:o,method:i.toUpperCase(),operation:a},u=t(s);if(n&&u)return s}}}function fn(e){var t=e.spec,n=t.paths,r={};if(!n||t.$$normalized)return e;for(var o in n){var i=n[o];if(tn()(i)){var a=i.parameters,s=function(e){var n=i[e];if(!tn()(n))return"continue";var s=un(n,o,e);if(s){r[s]?r[s].push(n):r[s]=[n];var u=r[s];if(u.length>1)u.forEach(function(e,t){e.__originalOperationId=e.__originalOperationId||e.operationId,e.operationId="".concat(s).concat(t+1)});else if(void 0!==n.operationId){var c=u[0];c.__originalOperationId=c.__originalOperationId||n.operationId,c.operationId=s}}if("parameters"!==e){var l=[],p={};for(var f in t)"produces"!==f&&"consumes"!==f&&"security"!==f||(p[f]=t[f],l.push(p));if(a&&(p.parameters=a,l.push(p)),l.length){var h,d=nn(l);try{for(d.s();!(h=d.n()).done;){var m=h.value;for(var v in m)if(n[v]){if("parameters"===v){var g,y=nn(m[v]);try{var b=function(){var e=g.value;n[v].some(function(t){return t.name&&t.name===e.name||t.$ref&&t.$ref===e.$ref||t.$$ref&&t.$$ref===e.$$ref||t===e})||n[v].push(e)};for(y.s();!(g=y.n()).done;)b()}catch(e){y.e(e)}finally{y.f()}}}else n[v]=m[v]}}catch(e){d.e(e)}finally{d.f()}}}};for(var u in i)s(u)}}return t.$$normalized=!0,e}function hn(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.requestInterceptor,r=t.responseInterceptor,o=e.withCredentials?"include":"same-origin";return function(t){return e({url:t,loadSpec:!0,requestInterceptor:n,responseInterceptor:r,headers:{Accept:Et},credentials:o}).then(function(e){return e.body})}}function dn(e){var t=e.fetch,n=e.spec,r=e.url,o=e.mode,i=e.allowMetaPatches,a=void 0===i||i,s=e.pathDiscriminator,u=e.modelPropertyMacro,c=e.parameterMacro,l=e.requestInterceptor,p=e.responseInterceptor,f=e.skipNormalization,h=e.useCircularStructures,d=e.http,m=e.baseDoc;return m=m||r,d=t||d||he,n?v(n):hn(d,{requestInterceptor:l,responseInterceptor:p})(m).then(v);function v(e){m&&(Qt.refs.docCache[m]=e),Qt.refs.fetchJSON=hn(d,{requestInterceptor:l,responseInterceptor:p});var t,n=[Qt.refs];return"function"==typeof c&&n.push(Qt.parameters),"function"==typeof u&&n.push(Qt.properties),"strict"!==o&&n.push(Qt.allOf),(t={spec:e,context:{baseDoc:m},plugins:n,allowMetaPatches:a,pathDiscriminator:s,parameterMacro:c,modelPropertyMacro:u,useCircularStructures:h},new Xt(t).dispatch()).then(f?function(){var e=q()(C.a.mark(function e(t){return C.a.wrap(function(e){for(;;)switch(e.prev=e.next){case 0:return e.abrupt("return",t);case 1:case"end":return e.stop()}},e)}));return function(t){return e.apply(this,arguments)}}():fn)}}var mn=n(16),vn=n.n(mn);function gn(e,t){var n=m()(e);if(h.a){var r=h()(e);t&&(r=r.filter(function(t){return p()(e,t).enumerable})),n.push.apply(n,r)}return n}function yn(e){for(var t=1;t2&&void 0!==m[2]?m[2]:{},o=r.returnEntireTree,i=r.baseDoc,a=r.requestInterceptor,s=r.responseInterceptor,u=r.parameterMacro,c=r.modelPropertyMacro,l=r.useCircularStructures,p={pathDiscriminator:n,baseDoc:i,requestInterceptor:a,responseInterceptor:s,parameterMacro:u,modelPropertyMacro:c,useCircularStructures:l},f=fn({spec:t}),h=f.spec,e.next=6,dn(yn({},p,{spec:h,allowMetaPatches:!0,skipNormalization:!0}));case 6:return d=e.sent,!o&&T()(n)&&n.length&&(d.spec=vn()(d.spec,n)||null),e.abrupt("return",d);case 9:case"end":return e.stop()}},e)}))).apply(this,arguments)}var _n=n(50),wn=n.n(_n);function xn(e,t){var n=m()(e);if(h.a){var r=h()(e);t&&(r=r.filter(function(t){return p()(e,t).enumerable})),n.push.apply(n,r)}return n}function En(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{};return function(t){var n=t.pathName,r=t.method,o=t.operationId;return function(t){var i=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return e.execute(En({spec:e.spec},wn()(e,"requestInterceptor","responseInterceptor","userFetch"),{pathName:n,method:r,parameters:t,operationId:o},i))}}}};var On=n(51),An=n.n(On),Tn=n(52),jn=n.n(Tn),In=n(53),Pn=n.n(In),Mn=n(24),Nn=n.n(Mn),Rn=n(54),Dn=n.n(Rn),Ln={body:function(e){var t=e.req,n=e.value;t.body=n},header:function(e){var t=e.req,n=e.parameter,r=e.value;t.headers=t.headers||{},void 0!==r&&(t.headers[n.name]=r)},query:function(e){var t=e.req,n=e.value,r=e.parameter;t.query=t.query||{},!1===n&&"boolean"===r.type&&(n="false");0===n&&["number","integer"].indexOf(r.type)>-1&&(n="0");if(n)t.query[r.name]={collectionFormat:r.collectionFormat,value:n};else if(r.allowEmptyValue&&void 0!==n){var o=r.name;t.query[o]=t.query[o]||{},t.query[o].allowEmptyValue=!0}},path:function(e){var t=e.req,n=e.value,r=e.parameter;t.url=t.url.split("{".concat(r.name,"}")).join(encodeURIComponent(n))},formData:function(e){var t=e.req,n=e.value,r=e.parameter;(n||r.allowEmptyValue)&&(t.form=t.form||{},t.form[r.name]={value:n,allowEmptyValue:r.allowEmptyValue,collectionFormat:r.collectionFormat})}};n(60);var Un=n(55),qn=n.n(Un),Fn=n(56),Bn=function(e){return":/?#[]@!$&'()*+,;=".indexOf(e)>-1},zn=function(e){return/^[a-z0-9\-._~]+$/i.test(e)};function Vn(e){var t=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}).escape,n=arguments.length>2?arguments[2]:void 0;return"number"==typeof e&&(e=e.toString()),"string"==typeof e&&e.length&&t?n?JSON.parse(e):Object(Fn.stringToCharArray)(e).map(function(e){return zn(e)?e:Bn(e)&&"unsafe"===t?e:(qn()(e)||[]).map(function(e){return"0".concat(e.toString(16).toUpperCase()).slice(-2)}).map(function(e){return"%".concat(e)}).join("")}).join(""):e}function Hn(e){var t=e.value;return T()(t)?function(e){var t=e.key,n=e.value,r=e.style,o=e.explode,i=e.escape,a=function(e){return Vn(e,{escape:i})};if("simple"===r)return n.map(function(e){return a(e)}).join(",");if("label"===r)return".".concat(n.map(function(e){return a(e)}).join("."));if("matrix"===r)return n.map(function(e){return a(e)}).reduce(function(e,n){return!e||o?"".concat(e||"",";").concat(t,"=").concat(n):"".concat(e,",").concat(n)},"");if("form"===r){var s=o?"&".concat(t,"="):",";return n.map(function(e){return a(e)}).join(s)}if("spaceDelimited"===r){var u=o?"".concat(t,"="):"";return n.map(function(e){return a(e)}).join(" ".concat(u))}if("pipeDelimited"===r){var c=o?"".concat(t,"="):"";return n.map(function(e){return a(e)}).join("|".concat(c))}}(e):"object"===M()(t)?function(e){var t=e.key,n=e.value,r=e.style,o=e.explode,i=e.escape,a=function(e){return Vn(e,{escape:i})},s=m()(n);if("simple"===r)return s.reduce(function(e,t){var r=a(n[t]),i=o?"=":",",s=e?"".concat(e,","):"";return"".concat(s).concat(t).concat(i).concat(r)},"");if("label"===r)return s.reduce(function(e,t){var r=a(n[t]),i=o?"=":".",s=e?"".concat(e,"."):".";return"".concat(s).concat(t).concat(i).concat(r)},"");if("matrix"===r&&o)return s.reduce(function(e,t){var r=a(n[t]),o=e?"".concat(e,";"):";";return"".concat(o).concat(t,"=").concat(r)},"");if("matrix"===r)return s.reduce(function(e,r){var o=a(n[r]),i=e?"".concat(e,","):";".concat(t,"=");return"".concat(i).concat(r,",").concat(o)},"");if("form"===r)return s.reduce(function(e,t){var r=a(n[t]),i=e?"".concat(e).concat(o?"&":","):"",s=o?"=":",";return"".concat(i).concat(t).concat(s).concat(r)},"")}(e):function(e){var t=e.key,n=e.value,r=e.style,o=e.escape,i=function(e){return Vn(e,{escape:o})};if("simple"===r)return i(n);if("label"===r)return".".concat(i(n));if("matrix"===r)return";".concat(t,"=").concat(i(n));if("form"===r)return i(n);if("deepObject"===r)return i(n)}(e)}function Wn(e,t){return t.includes("application/json")?"string"==typeof e?e:I()(e):e.toString()}function Jn(e){var t=e.req,n=e.value,r=e.parameter,o=r.name,i=r.style,a=r.explode,s=r.content;if(s){var u=m()(s)[0];t.url=t.url.split("{".concat(o,"}")).join(Vn(Wn(n,u),{escape:!0}))}else{var c=Hn({key:r.name,value:n,style:i||"simple",explode:a||!1,escape:!0});t.url=t.url.split("{".concat(o,"}")).join(c)}}function Kn(e){var t=e.req,n=e.value,r=e.parameter;if(t.query=t.query||{},r.content){var o=m()(r.content)[0];t.query[r.name]=Wn(n,o)}else if(!1===n&&(n="false"),0===n&&(n="0"),n){var i=M()(n);if("deepObject"===r.style)m()(n).forEach(function(e){var o=n[e];t.query["".concat(r.name,"[").concat(e,"]")]={value:Hn({key:e,value:o,style:"deepObject",escape:r.allowReserved?"unsafe":"reserved"}),skipEncoding:!0}});else if("object"!==i||T()(n)||"form"!==r.style&&r.style||!r.explode&&void 0!==r.explode){var a=encodeURIComponent(r.name);t.query[a]={value:Hn({key:a,value:n,style:r.style||"form",explode:void 0===r.explode||r.explode,escape:r.allowReserved?"unsafe":"reserved"}),skipEncoding:!0}}else{m()(n).forEach(function(e){var o=n[e];t.query[e]={value:Hn({key:e,value:o,style:r.style||"form",escape:r.allowReserved?"unsafe":"reserved"}),skipEncoding:!0}})}}else if(r.allowEmptyValue&&void 0!==n){var s=r.name;t.query[s]=t.query[s]||{},t.query[s].allowEmptyValue=!0}}var Yn=["accept","authorization","content-type"];function $n(e){var t=e.req,n=e.parameter,r=e.value;if(t.headers=t.headers||{},!(Yn.indexOf(n.name.toLowerCase())>-1))if(n.content){var o=m()(n.content)[0];t.headers[n.name]=Wn(r,o)}else void 0!==r&&(t.headers[n.name]=Hn({key:n.name,value:r,style:n.style||"simple",explode:void 0!==n.explode&&n.explode,escape:!1}))}function Gn(e){var t=e.req,n=e.parameter,r=e.value;t.headers=t.headers||{};var o=M()(r);if(n.content){var i=m()(n.content)[0];t.headers.Cookie="".concat(n.name,"=").concat(Wn(r,i))}else if("undefined"!==o){var a="object"===o&&!T()(r)&&n.explode?"":"".concat(n.name,"=");t.headers.Cookie=a+Hn({key:n.name,value:r,escape:!1,style:n.style||"form",explode:void 0!==n.explode&&n.explode})}}var Zn=n(37),Xn=function(e,t){var n=e.operation,r=e.requestBody,o=e.securities,i=e.spec,a=e.attachContentTypeForEmptyPayload,s=e.requestContentType;t=function(e){var t=e.request,n=e.securities,r=void 0===n?{}:n,o=e.operation,i=void 0===o?{}:o,a=e.spec,s=b()({},t),u=r.authorized,c=void 0===u?{}:u,l=i.security||a.security||[],p=c&&!!m()(c).length,f=vn()(a,["components","securitySchemes"])||{};if(s.headers=s.headers||{},s.query=s.query||{},!m()(r).length||!p||!l||T()(i.security)&&!i.security.length)return t;return l.forEach(function(e,t){for(var n in e){var r=c[n],o=f[n];if(r){var i=r.value||r,a=o.type;if(r)if("apiKey"===a)"query"===o.in&&(s.query[o.name]=i),"header"===o.in&&(s.headers[o.name]=i),"cookie"===o.in&&(s.cookies[o.name]=i);else if("http"===a){if("basic"===o.scheme){var u=i.username||"",l=i.password||"",p=Nn()("".concat(u,":").concat(l));s.headers.Authorization="Basic ".concat(p)}"bearer"===o.scheme&&(s.headers.Authorization="Bearer ".concat(i))}else if("oauth2"===a){var h=r.token||{},d=h[o["x-tokenName"]||"access_token"],m=h.token_type;m&&"bearer"!==m.toLowerCase()||(m="Bearer"),s.headers.Authorization="".concat(m," ").concat(d)}}}}),s}({request:t,securities:o,operation:n,spec:i});var u=n.requestBody||{},c=m()(u.content||{}),l=s&&c.indexOf(s)>-1;if(r||a){if(s&&l)t.headers["Content-Type"]=s;else if(!s){var p=c[0];p&&(t.headers["Content-Type"]=p,s=p)}}else s&&l&&(t.headers["Content-Type"]=s);return r&&(s?c.indexOf(s)>-1&&("application/x-www-form-urlencoded"===s||0===s.indexOf("multipart/")?"object"===M()(r)?(t.form={},m()(r).forEach(function(e){var n,o,i=r[e],a=!1;"undefined"!=typeof File&&(o=i instanceof File),"undefined"!=typeof Blob&&(o=o||i instanceof Blob),void 0!==Zn.Buffer&&(o=o||Zn.Buffer.isBuffer(i)),"object"!==M()(i)||o?n=i:T()(i)?"application/x-www-form-urlencoded"===s?n=i.toString():(n=i,a=!0):n=I()(i),t.form[e]={value:n,isOAS3formatArray:a}})):t.form=r:t.body=r):t.body=r),t};var Qn=function(e,t){var n=e.spec,r=e.operation,o=e.securities,i=e.requestContentType,a=e.attachContentTypeForEmptyPayload;if((t=function(e){var t=e.request,n=e.securities,r=void 0===n?{}:n,o=e.operation,i=void 0===o?{}:o,a=e.spec,s=b()({},t),u=r.authorized,c=void 0===u?{}:u,l=r.specSecurity,p=void 0===l?[]:l,f=i.security||p,h=c&&!!m()(c).length,d=a.securityDefinitions;if(s.headers=s.headers||{},s.query=s.query||{},!m()(r).length||!h||!f||T()(i.security)&&!i.security.length)return t;return f.forEach(function(e,t){for(var n in e){var r=c[n];if(r){var o=r.token,i=r.value||r,a=d[n],u=a.type,l=a["x-tokenName"]||"access_token",p=o&&o[l],f=o&&o.token_type;if(r)if("apiKey"===u){var h="query"===a.in?"query":"headers";s[h]=s[h]||{},s[h][a.name]=i}else if("basic"===u)if(i.header)s.headers.authorization=i.header;else{var m=i.username||"",v=i.password||"";i.base64=Nn()("".concat(m,":").concat(v)),s.headers.authorization="Basic ".concat(i.base64)}else"oauth2"===u&&p&&(f=f&&"bearer"!==f.toLowerCase()?f:"Bearer",s.headers.authorization="".concat(f," ").concat(p))}}}),s}({request:t,securities:o,operation:r,spec:n})).body||t.form||a)i?t.headers["Content-Type"]=i:T()(r.consumes)?t.headers["Content-Type"]=r.consumes[0]:T()(n.consumes)?t.headers["Content-Type"]=n.consumes[0]:r.parameters&&r.parameters.filter(function(e){return"file"===e.type}).length?t.headers["Content-Type"]="multipart/form-data":r.parameters&&r.parameters.filter(function(e){return"formData"===e.in}).length&&(t.headers["Content-Type"]="application/x-www-form-urlencoded");else if(i){var s=r.parameters&&r.parameters.filter(function(e){return"body"===e.in}).length>0,u=r.parameters&&r.parameters.filter(function(e){return"formData"===e.in}).length>0;(s||u)&&(t.headers["Content-Type"]=i)}return t};function er(e,t){var n=m()(e);if(h.a){var r=h()(e);t&&(r=r.filter(function(t){return p()(e,t).enumerable})),n.push.apply(n,r)}return n}function tr(e){for(var t=1;t1&&console.warn("Parameter '".concat(e.name,"' is ambiguous because the defined spec has more than one parameter with the name: '").concat(e.name,"' and the passed-in parameter values did not define an 'in' value.")),null!==n){if(void 0!==e.default&&void 0===n&&(n=e.default),void 0===n&&e.required&&!e.allowEmptyValue)throw new Error("Required parameter ".concat(e.name," is not provided"));if(v&&e.schema&&"object"===e.schema.type&&"string"==typeof n)try{n=JSON.parse(n)}catch(e){throw new Error("Could not parse object parameter value string as JSON")}r&&r({req:g,parameter:e,value:n,operation:_,spec:t})}});var C=tr({},e,{operation:_});if((g=v?Xn(C,g):Qn(C,g)).cookies&&m()(g.cookies).length){var k=m()(g.cookies).reduce(function(e,t){var n=g.cookies[t];return e+(e?"&":"")+Dn.a.serialize(t,n)},"");g.headers.Cookie=k}return g.cookies&&delete g.cookies,xe(g),g}var ur=function(e){return e?e.replace(/\W/g,""):null};function cr(e){return sn(e.spec)?function(e){var t=e.spec,n=e.pathName,r=e.method,o=e.server,i=e.contextUrl,a=e.serverVariables,s=void 0===a?{}:a,u=vn()(t,["paths",n,(r||"").toLowerCase(),"servers"])||vn()(t,["paths",n,"servers"])||vn()(t,["servers"]),c="",l=null;if(o&&u&&u.length){var p=u.map(function(e){return e.url});p.indexOf(o)>-1&&(c=o,l=u[p.indexOf(o)])}!c&&u&&u.length&&(c=u[0].url,l=u[0]);if(c.indexOf("{")>-1){(function(e){var t,n=[],r=/{([^}]+)}/g;for(;t=r.exec(e);)n.push(t[1]);return n})(c).forEach(function(e){if(l.variables&&l.variables[e]){var t=l.variables[e],n=s[e]||t.default,r=new RegExp("{".concat(e,"}"),"g");c=c.replace(r,n)}})}return function(){var e,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"",n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",r=E.a.parse(t),o=E.a.parse(n),i=ur(r.protocol)||ur(o.protocol)||"",a=r.host||o.host,s=r.pathname||"";e=i&&a?"".concat(i,"://").concat(a+s):s;return"/"===e[e.length-1]?e.slice(0,-1):e}(c,i)}(e):function(e){var t,n=e.spec,r=e.scheme,o=e.contextUrl,i=void 0===o?"":o,a=E.a.parse(i),s=T()(n.schemes)?n.schemes[0]:null,u=r||s||ur(a.protocol)||"http",c=n.host||a.host||"",l=n.basePath||"";t=u&&c?"".concat(u,"://").concat(c+l):l;return"/"===t[t.length-1]?t.slice(0,-1):t}(e)}function lr(e,t){var n=m()(e);if(h.a){var r=h()(e);t&&(r=r.filter(function(t){return p()(e,t).enumerable})),n.push.apply(n,r)}return n}function pr(e){var t=this,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if("string"==typeof e?n.url=e:n=e,!(this instanceof pr))return new pr(n);b()(this,n);var r=this.resolve().then(function(){return t.disableInterfaces||b()(t,pr.makeApisTagOperation(t)),t});return r.client=this,r}pr.http=he,pr.makeHttp=function(e,t,n){return n=n||function(e){return e},t=t||function(e){return e},function(r){return"string"==typeof r&&(r={url:r}),fe.mergeInQueryOrForm(r),r=t(r),n(e(r))}}.bind(null,pr.http),pr.resolve=dn,pr.resolveSubtree=function(e,t){return bn.apply(this,arguments)},pr.execute=function(e){var t=e.http,n=e.fetch,r=e.spec,o=e.operationId,i=e.pathName,a=e.method,s=e.parameters,u=e.securities,c=An()(e,["http","fetch","spec","operationId","pathName","method","parameters","securities"]),l=t||n||he;i&&a&&!o&&(o=cn(i,a));var p=ar.buildRequest(tr({spec:r,operationId:o,parameters:s,securities:u,http:l},c));return p.body&&(jn()(p.body)||Pn()(p.body))&&(p.body=I()(p.body)),l(p)},pr.serializeRes=ve,pr.serializeHeaders=ge,pr.clearCache=function(){Qt.refs.clearCache()},pr.makeApisTagOperation=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=kn.makeExecute(e);return{apis:kn.mapTagOperations({v2OperationIdCompatibilityMode:e.v2OperationIdCompatibilityMode,spec:e.spec,cb:t})}},pr.buildRequest=sr,pr.helpers={opId:un},pr.getBaseUrl=cr,pr.prototype={http:he,execute:function(e){return this.applyDefaults(),pr.execute(function(e){for(var t=1;t + * @license MIT + */ +var r=n(572),o=n(573),i=n(360);function a(){return u.TYPED_ARRAY_SUPPORT?2147483647:1073741823}function s(e,t){if(a()=a())throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+a().toString(16)+" bytes");return 0|e}function d(e,t){if(u.isBuffer(e))return e.length;if("undefined"!=typeof ArrayBuffer&&"function"==typeof ArrayBuffer.isView&&(ArrayBuffer.isView(e)||e instanceof ArrayBuffer))return e.byteLength;"string"!=typeof e&&(e=""+e);var n=e.length;if(0===n)return 0;for(var r=!1;;)switch(t){case"ascii":case"latin1":case"binary":return n;case"utf8":case"utf-8":case void 0:return B(e).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*n;case"hex":return n>>>1;case"base64":return z(e).length;default:if(r)return B(e).length;t=(""+t).toLowerCase(),r=!0}}function m(e,t,n){var r=!1;if((void 0===t||t<0)&&(t=0),t>this.length)return"";if((void 0===n||n>this.length)&&(n=this.length),n<=0)return"";if((n>>>=0)<=(t>>>=0))return"";for(e||(e="utf8");;)switch(e){case"hex":return j(this,t,n);case"utf8":case"utf-8":return k(this,t,n);case"ascii":return A(this,t,n);case"latin1":case"binary":return T(this,t,n);case"base64":return C(this,t,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return I(this,t,n);default:if(r)throw new TypeError("Unknown encoding: "+e);e=(e+"").toLowerCase(),r=!0}}function v(e,t,n){var r=e[t];e[t]=e[n],e[n]=r}function g(e,t,n,r,o){if(0===e.length)return-1;if("string"==typeof n?(r=n,n=0):n>2147483647?n=2147483647:n<-2147483648&&(n=-2147483648),n=+n,isNaN(n)&&(n=o?0:e.length-1),n<0&&(n=e.length+n),n>=e.length){if(o)return-1;n=e.length-1}else if(n<0){if(!o)return-1;n=0}if("string"==typeof t&&(t=u.from(t,r)),u.isBuffer(t))return 0===t.length?-1:y(e,t,n,r,o);if("number"==typeof t)return t&=255,u.TYPED_ARRAY_SUPPORT&&"function"==typeof Uint8Array.prototype.indexOf?o?Uint8Array.prototype.indexOf.call(e,t,n):Uint8Array.prototype.lastIndexOf.call(e,t,n):y(e,[t],n,r,o);throw new TypeError("val must be string, number or Buffer")}function y(e,t,n,r,o){var i,a=1,s=e.length,u=t.length;if(void 0!==r&&("ucs2"===(r=String(r).toLowerCase())||"ucs-2"===r||"utf16le"===r||"utf-16le"===r)){if(e.length<2||t.length<2)return-1;a=2,s/=2,u/=2,n/=2}function c(e,t){return 1===a?e[t]:e.readUInt16BE(t*a)}if(o){var l=-1;for(i=n;is&&(n=s-u),i=n;i>=0;i--){for(var p=!0,f=0;fo&&(r=o):r=o;var i=t.length;if(i%2!=0)throw new TypeError("Invalid hex string");r>i/2&&(r=i/2);for(var a=0;a>8,o=n%256,i.push(o),i.push(r);return i}(t,e.length-n),e,n,r)}function C(e,t,n){return 0===t&&n===e.length?r.fromByteArray(e):r.fromByteArray(e.slice(t,n))}function k(e,t,n){n=Math.min(e.length,n);for(var r=[],o=t;o239?4:c>223?3:c>191?2:1;if(o+p<=n)switch(p){case 1:c<128&&(l=c);break;case 2:128==(192&(i=e[o+1]))&&(u=(31&c)<<6|63&i)>127&&(l=u);break;case 3:i=e[o+1],a=e[o+2],128==(192&i)&&128==(192&a)&&(u=(15&c)<<12|(63&i)<<6|63&a)>2047&&(u<55296||u>57343)&&(l=u);break;case 4:i=e[o+1],a=e[o+2],s=e[o+3],128==(192&i)&&128==(192&a)&&128==(192&s)&&(u=(15&c)<<18|(63&i)<<12|(63&a)<<6|63&s)>65535&&u<1114112&&(l=u)}null===l?(l=65533,p=1):l>65535&&(l-=65536,r.push(l>>>10&1023|55296),l=56320|1023&l),r.push(l),o+=p}return function(e){var t=e.length;if(t<=O)return String.fromCharCode.apply(String,e);var n="",r=0;for(;r0&&(e=this.toString("hex",0,n).match(/.{2}/g).join(" "),this.length>n&&(e+=" ... ")),""},u.prototype.compare=function(e,t,n,r,o){if(!u.isBuffer(e))throw new TypeError("Argument must be a Buffer");if(void 0===t&&(t=0),void 0===n&&(n=e?e.length:0),void 0===r&&(r=0),void 0===o&&(o=this.length),t<0||n>e.length||r<0||o>this.length)throw new RangeError("out of range index");if(r>=o&&t>=n)return 0;if(r>=o)return-1;if(t>=n)return 1;if(this===e)return 0;for(var i=(o>>>=0)-(r>>>=0),a=(n>>>=0)-(t>>>=0),s=Math.min(i,a),c=this.slice(r,o),l=e.slice(t,n),p=0;po)&&(n=o),e.length>0&&(n<0||t<0)||t>this.length)throw new RangeError("Attempt to write outside buffer bounds");r||(r="utf8");for(var i=!1;;)switch(r){case"hex":return b(this,e,t,n);case"utf8":case"utf-8":return _(this,e,t,n);case"ascii":return w(this,e,t,n);case"latin1":case"binary":return x(this,e,t,n);case"base64":return E(this,e,t,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return S(this,e,t,n);default:if(i)throw new TypeError("Unknown encoding: "+r);r=(""+r).toLowerCase(),i=!0}},u.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}};var O=4096;function A(e,t,n){var r="";n=Math.min(e.length,n);for(var o=t;or)&&(n=r);for(var o="",i=t;in)throw new RangeError("Trying to access beyond buffer length")}function M(e,t,n,r,o,i){if(!u.isBuffer(e))throw new TypeError('"buffer" argument must be a Buffer instance');if(t>o||te.length)throw new RangeError("Index out of range")}function N(e,t,n,r){t<0&&(t=65535+t+1);for(var o=0,i=Math.min(e.length-n,2);o>>8*(r?o:1-o)}function R(e,t,n,r){t<0&&(t=4294967295+t+1);for(var o=0,i=Math.min(e.length-n,4);o>>8*(r?o:3-o)&255}function D(e,t,n,r,o,i){if(n+r>e.length)throw new RangeError("Index out of range");if(n<0)throw new RangeError("Index out of range")}function L(e,t,n,r,i){return i||D(e,0,n,4),o.write(e,t,n,r,23,4),n+4}function U(e,t,n,r,i){return i||D(e,0,n,8),o.write(e,t,n,r,52,8),n+8}u.prototype.slice=function(e,t){var n,r=this.length;if((e=~~e)<0?(e+=r)<0&&(e=0):e>r&&(e=r),(t=void 0===t?r:~~t)<0?(t+=r)<0&&(t=0):t>r&&(t=r),t0&&(o*=256);)r+=this[e+--t]*o;return r},u.prototype.readUInt8=function(e,t){return t||P(e,1,this.length),this[e]},u.prototype.readUInt16LE=function(e,t){return t||P(e,2,this.length),this[e]|this[e+1]<<8},u.prototype.readUInt16BE=function(e,t){return t||P(e,2,this.length),this[e]<<8|this[e+1]},u.prototype.readUInt32LE=function(e,t){return t||P(e,4,this.length),(this[e]|this[e+1]<<8|this[e+2]<<16)+16777216*this[e+3]},u.prototype.readUInt32BE=function(e,t){return t||P(e,4,this.length),16777216*this[e]+(this[e+1]<<16|this[e+2]<<8|this[e+3])},u.prototype.readIntLE=function(e,t,n){e|=0,t|=0,n||P(e,t,this.length);for(var r=this[e],o=1,i=0;++i=(o*=128)&&(r-=Math.pow(2,8*t)),r},u.prototype.readIntBE=function(e,t,n){e|=0,t|=0,n||P(e,t,this.length);for(var r=t,o=1,i=this[e+--r];r>0&&(o*=256);)i+=this[e+--r]*o;return i>=(o*=128)&&(i-=Math.pow(2,8*t)),i},u.prototype.readInt8=function(e,t){return t||P(e,1,this.length),128&this[e]?-1*(255-this[e]+1):this[e]},u.prototype.readInt16LE=function(e,t){t||P(e,2,this.length);var n=this[e]|this[e+1]<<8;return 32768&n?4294901760|n:n},u.prototype.readInt16BE=function(e,t){t||P(e,2,this.length);var n=this[e+1]|this[e]<<8;return 32768&n?4294901760|n:n},u.prototype.readInt32LE=function(e,t){return t||P(e,4,this.length),this[e]|this[e+1]<<8|this[e+2]<<16|this[e+3]<<24},u.prototype.readInt32BE=function(e,t){return t||P(e,4,this.length),this[e]<<24|this[e+1]<<16|this[e+2]<<8|this[e+3]},u.prototype.readFloatLE=function(e,t){return t||P(e,4,this.length),o.read(this,e,!0,23,4)},u.prototype.readFloatBE=function(e,t){return t||P(e,4,this.length),o.read(this,e,!1,23,4)},u.prototype.readDoubleLE=function(e,t){return t||P(e,8,this.length),o.read(this,e,!0,52,8)},u.prototype.readDoubleBE=function(e,t){return t||P(e,8,this.length),o.read(this,e,!1,52,8)},u.prototype.writeUIntLE=function(e,t,n,r){(e=+e,t|=0,n|=0,r)||M(this,e,t,n,Math.pow(2,8*n)-1,0);var o=1,i=0;for(this[t]=255&e;++i=0&&(i*=256);)this[t+o]=e/i&255;return t+n},u.prototype.writeUInt8=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,1,255,0),u.TYPED_ARRAY_SUPPORT||(e=Math.floor(e)),this[t]=255&e,t+1},u.prototype.writeUInt16LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,65535,0),u.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8):N(this,e,t,!0),t+2},u.prototype.writeUInt16BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,65535,0),u.TYPED_ARRAY_SUPPORT?(this[t]=e>>>8,this[t+1]=255&e):N(this,e,t,!1),t+2},u.prototype.writeUInt32LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,4294967295,0),u.TYPED_ARRAY_SUPPORT?(this[t+3]=e>>>24,this[t+2]=e>>>16,this[t+1]=e>>>8,this[t]=255&e):R(this,e,t,!0),t+4},u.prototype.writeUInt32BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,4294967295,0),u.TYPED_ARRAY_SUPPORT?(this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e):R(this,e,t,!1),t+4},u.prototype.writeIntLE=function(e,t,n,r){if(e=+e,t|=0,!r){var o=Math.pow(2,8*n-1);M(this,e,t,n,o-1,-o)}var i=0,a=1,s=0;for(this[t]=255&e;++i>0)-s&255;return t+n},u.prototype.writeIntBE=function(e,t,n,r){if(e=+e,t|=0,!r){var o=Math.pow(2,8*n-1);M(this,e,t,n,o-1,-o)}var i=n-1,a=1,s=0;for(this[t+i]=255&e;--i>=0&&(a*=256);)e<0&&0===s&&0!==this[t+i+1]&&(s=1),this[t+i]=(e/a>>0)-s&255;return t+n},u.prototype.writeInt8=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,1,127,-128),u.TYPED_ARRAY_SUPPORT||(e=Math.floor(e)),e<0&&(e=255+e+1),this[t]=255&e,t+1},u.prototype.writeInt16LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,32767,-32768),u.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8):N(this,e,t,!0),t+2},u.prototype.writeInt16BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,2,32767,-32768),u.TYPED_ARRAY_SUPPORT?(this[t]=e>>>8,this[t+1]=255&e):N(this,e,t,!1),t+2},u.prototype.writeInt32LE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,2147483647,-2147483648),u.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8,this[t+2]=e>>>16,this[t+3]=e>>>24):R(this,e,t,!0),t+4},u.prototype.writeInt32BE=function(e,t,n){return e=+e,t|=0,n||M(this,e,t,4,2147483647,-2147483648),e<0&&(e=4294967295+e+1),u.TYPED_ARRAY_SUPPORT?(this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e):R(this,e,t,!1),t+4},u.prototype.writeFloatLE=function(e,t,n){return L(this,e,t,!0,n)},u.prototype.writeFloatBE=function(e,t,n){return L(this,e,t,!1,n)},u.prototype.writeDoubleLE=function(e,t,n){return U(this,e,t,!0,n)},u.prototype.writeDoubleBE=function(e,t,n){return U(this,e,t,!1,n)},u.prototype.copy=function(e,t,n,r){if(n||(n=0),r||0===r||(r=this.length),t>=e.length&&(t=e.length),t||(t=0),r>0&&r=this.length)throw new RangeError("sourceStart out of bounds");if(r<0)throw new RangeError("sourceEnd out of bounds");r>this.length&&(r=this.length),e.length-t=0;--o)e[o+t]=this[o+n];else if(i<1e3||!u.TYPED_ARRAY_SUPPORT)for(o=0;o>>=0,n=void 0===n?this.length:n>>>0,e||(e=0),"number"==typeof e)for(i=t;i55295&&n<57344){if(!o){if(n>56319){(t-=3)>-1&&i.push(239,191,189);continue}if(a+1===r){(t-=3)>-1&&i.push(239,191,189);continue}o=n;continue}if(n<56320){(t-=3)>-1&&i.push(239,191,189),o=n;continue}n=65536+(o-55296<<10|n-56320)}else o&&(t-=3)>-1&&i.push(239,191,189);if(o=null,n<128){if((t-=1)<0)break;i.push(n)}else if(n<2048){if((t-=2)<0)break;i.push(n>>6|192,63&n|128)}else if(n<65536){if((t-=3)<0)break;i.push(n>>12|224,n>>6&63|128,63&n|128)}else{if(!(n<1114112))throw new Error("Invalid code point");if((t-=4)<0)break;i.push(n>>18|240,n>>12&63|128,n>>6&63|128,63&n|128)}}return i}function z(e){return r.toByteArray(function(e){if((e=function(e){return e.trim?e.trim():e.replace(/^\s+|\s+$/g,"")}(e).replace(q,"")).length<2)return"";for(;e.length%4!=0;)e+="=";return e}(e))}function V(e,t,n,r){for(var o=0;o=t.length||o>=e.length);++o)t[o+n]=e[o];return o}}).call(this,n(38))},function(e,t,n){"use strict";e.exports={current:null}},function(e,t){e.exports=function(e){return null!=e&&"object"==typeof e}},function(e,t){var n,r,o=e.exports={};function i(){throw new Error("setTimeout has not been defined")}function a(){throw new Error("clearTimeout has not been defined")}function s(e){if(n===setTimeout)return setTimeout(e,0);if((n===i||!n)&&setTimeout)return n=setTimeout,setTimeout(e,0);try{return n(e,0)}catch(t){try{return n.call(null,e,0)}catch(t){return n.call(this,e,0)}}}!function(){try{n="function"==typeof setTimeout?setTimeout:i}catch(e){n=i}try{r="function"==typeof clearTimeout?clearTimeout:a}catch(e){r=a}}();var u,c=[],l=!1,p=-1;function f(){l&&u&&(l=!1,u.length?c=u.concat(c):p=-1,c.length&&h())}function h(){if(!l){var e=s(f);l=!0;for(var t=c.length;t;){for(u=c,c=[];++p1)for(var n=1;n0&&"/"!==t[0]});function oe(e,t,n){return t=t||[],te.apply(void 0,[e].concat(u()(t))).get("parameters",Object(p.List)()).reduce(function(e,t){var r=n&&"body"===t.get("in")?t.get("value_xml"):t.get("value");return e.set(Object(l.B)(t,{allowHashes:!1}),r)},Object(p.fromJS)({}))}function ie(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";if(p.List.isList(e))return e.some(function(e){return p.Map.isMap(e)&&e.get("in")===t})}function ae(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";if(p.List.isList(e))return e.some(function(e){return p.Map.isMap(e)&&e.get("type")===t})}function se(e,t){t=t||[];var n=x(e).getIn(["paths"].concat(u()(t)),Object(p.fromJS)({})),r=e.getIn(["meta","paths"].concat(u()(t)),Object(p.fromJS)({})),o=ue(e,t),i=n.get("parameters")||new p.List,a=r.get("consumes_value")?r.get("consumes_value"):ae(i,"file")?"multipart/form-data":ae(i,"formData")?"application/x-www-form-urlencoded":void 0;return Object(p.fromJS)({requestContentType:a,responseContentType:o})}function ue(e,t){t=t||[];var n=x(e).getIn(["paths"].concat(u()(t)),null);if(null!==n){var r=e.getIn(["meta","paths"].concat(u()(t),["produces_value"]),null),o=n.getIn(["produces",0],null);return r||o||"application/json"}}function ce(e,t){t=t||[];var n=x(e),r=n.getIn(["paths"].concat(u()(t)),null);if(null!==r){var o=t,i=a()(o,1)[0],s=r.get("produces",null),c=n.getIn(["paths",i,"produces"],null),l=n.getIn(["produces"],null);return s||c||l}}function le(e,t){t=t||[];var n=x(e),r=n.getIn(["paths"].concat(u()(t)),null);if(null!==r){var o=t,i=a()(o,1)[0],s=r.get("consumes",null),c=n.getIn(["paths",i,"consumes"],null),l=n.getIn(["consumes"],null);return s||c||l}}var pe=function(e,t,n){var r=e.get("url").match(/^([a-z][a-z0-9+\-.]*):/),i=o()(r)?r[1]:null;return e.getIn(["scheme",t,n])||e.getIn(["scheme","_defaultScheme"])||i||""},fe=function(e,t,n){return["http","https"].indexOf(pe(e,t,n))>-1},he=function(e,t){t=t||[];var n=e.getIn(["meta","paths"].concat(u()(t),["parameters"]),Object(p.fromJS)([])),r=!0;return n.forEach(function(e){var t=e.get("errors");t&&t.count()&&(r=!1)}),r};function de(e){return p.Map.isMap(e)?e:new p.Map}},function(e,t,n){"use strict";n.r(t),n.d(t,"SHOW_AUTH_POPUP",function(){return d}),n.d(t,"AUTHORIZE",function(){return m}),n.d(t,"LOGOUT",function(){return v}),n.d(t,"PRE_AUTHORIZE_OAUTH2",function(){return g}),n.d(t,"AUTHORIZE_OAUTH2",function(){return y}),n.d(t,"VALIDATE",function(){return b}),n.d(t,"CONFIGURE_AUTH",function(){return _}),n.d(t,"showDefinitions",function(){return w}),n.d(t,"authorize",function(){return x}),n.d(t,"logout",function(){return E}),n.d(t,"preAuthorizeImplicit",function(){return S}),n.d(t,"authorizeOauth2",function(){return C}),n.d(t,"authorizePassword",function(){return k}),n.d(t,"authorizeApplication",function(){return O}),n.d(t,"authorizeAccessCodeWithFormParams",function(){return A}),n.d(t,"authorizeAccessCodeWithBasicAuthentication",function(){return T}),n.d(t,"authorizeRequest",function(){return j}),n.d(t,"configureAuth",function(){return I});var r=n(27),o=n.n(r),i=n(17),a=n.n(i),s=n(29),u=n.n(s),c=n(96),l=n.n(c),p=n(18),f=n.n(p),h=n(3),d="show_popup",m="authorize",v="logout",g="pre_authorize_oauth2",y="authorize_oauth2",b="validate",_="configure_auth";function w(e){return{type:d,payload:e}}function x(e){return{type:m,payload:e}}function E(e){return{type:v,payload:e}}var S=function(e){return function(t){var n=t.authActions,r=t.errActions,o=e.auth,i=e.token,a=e.isValid,s=o.schema,c=o.name,l=s.get("flow");delete f.a.swaggerUIRedirectOauth2,"accessCode"===l||a||r.newAuthErr({authId:c,source:"auth",level:"warning",message:"Authorization may be unsafe, passed state was changed in server Passed state wasn't returned from auth server"}),i.error?r.newAuthErr({authId:c,source:"auth",level:"error",message:u()(i)}):n.authorizeOauth2({auth:o,token:i})}};function C(e){return{type:y,payload:e}}var k=function(e){return function(t){var n=t.authActions,r=e.schema,o=e.name,i=e.username,s=e.password,u=e.passwordType,c=e.clientId,l=e.clientSecret,p={grant_type:"password",scope:e.scopes.join(" "),username:i,password:s},f={};switch(u){case"request-body":!function(e,t,n){t&&a()(e,{client_id:t});n&&a()(e,{client_secret:n})}(p,c,l);break;case"basic":f.Authorization="Basic "+Object(h.a)(c+":"+l);break;default:console.warn("Warning: invalid passwordType ".concat(u," was passed, not including client id and secret"))}return n.authorizeRequest({body:Object(h.b)(p),url:r.get("tokenUrl"),name:o,headers:f,query:{},auth:e})}};var O=function(e){return function(t){var n=t.authActions,r=e.schema,o=e.scopes,i=e.name,a=e.clientId,s=e.clientSecret,u={Authorization:"Basic "+Object(h.a)(a+":"+s)},c={grant_type:"client_credentials",scope:o.join(" ")};return n.authorizeRequest({body:Object(h.b)(c),name:i,url:r.get("tokenUrl"),auth:e,headers:u})}},A=function(e){var t=e.auth,n=e.redirectUrl;return function(e){var r=e.authActions,o=t.schema,i=t.name,a=t.clientId,s=t.clientSecret,u=t.codeVerifier,c={grant_type:"authorization_code",code:t.code,client_id:a,client_secret:s,redirect_uri:n,code_verifier:u};return r.authorizeRequest({body:Object(h.b)(c),name:i,url:o.get("tokenUrl"),auth:t})}},T=function(e){var t=e.auth,n=e.redirectUrl;return function(e){var r=e.authActions,o=t.schema,i=t.name,a=t.clientId,s=t.clientSecret,u={Authorization:"Basic "+Object(h.a)(a+":"+s)},c={grant_type:"authorization_code",code:t.code,client_id:a,redirect_uri:n};return r.authorizeRequest({body:Object(h.b)(c),name:i,url:o.get("tokenUrl"),auth:t,headers:u})}},j=function(e){return function(t){var n,r=t.fn,i=t.getConfigs,s=t.authActions,c=t.errActions,p=t.oas3Selectors,f=t.specSelectors,h=t.authSelectors,d=e.body,m=e.query,v=void 0===m?{}:m,g=e.headers,y=void 0===g?{}:g,b=e.name,_=e.url,w=e.auth,x=(h.getConfigs()||{}).additionalQueryStringParams;n=f.isOAS3()?l()(_,p.selectedServer(),!0):l()(_,f.url(),!0),"object"===o()(x)&&(n.query=a()({},n.query,x));var E=n.toString(),S=a()({Accept:"application/json, text/plain, */*","Content-Type":"application/x-www-form-urlencoded","X-Requested-With":"XMLHttpRequest"},y);r.fetch({url:E,method:"post",headers:S,query:v,body:d,requestInterceptor:i().requestInterceptor,responseInterceptor:i().responseInterceptor}).then(function(e){var t=JSON.parse(e.data),n=t&&(t.error||""),r=t&&(t.parseError||"");e.ok?n||r?c.newAuthErr({authId:b,level:"error",source:"auth",message:u()(t)}):s.authorizeOauth2({auth:w,token:t}):c.newAuthErr({authId:b,level:"error",source:"auth",message:e.statusText})}).catch(function(e){var t=new Error(e).message;if(e.response&&e.response.data){var n=e.response.data;try{var r="string"==typeof n?JSON.parse(n):n;r.error&&(t+=", error: ".concat(r.error)),r.error_description&&(t+=", description: ".concat(r.error_description))}catch(e){}}c.newAuthErr({authId:b,level:"error",source:"auth",message:t})})}};function I(e){return{type:_,payload:e}}},function(e,t){var n=e.exports={version:"2.6.5"};"number"==typeof __e&&(__e=n)},function(e,t){e.exports=function(e){if(null==e)throw TypeError("Can't call method on "+e);return e}},function(e,t,n){var r=n(130),o=Math.min;e.exports=function(e){return e>0?o(r(e),9007199254740991):0}},function(e,t,n){var r=n(49),o=n(134);e.exports=n(50)?function(e,t,n){return r.f(e,t,o(1,n))}:function(e,t,n){return e[t]=n,e}},function(e,t){e.exports=function(e){try{return!!e()}catch(e){return!0}}},function(e,t,n){"use strict";e.exports=function(e){if("function"!=typeof e)throw new TypeError(e+" is not a function");return e}},function(e,t,n){e.exports=n(602)},function(e,t,n){"use strict";n.r(t),n.d(t,"UPDATE_LAYOUT",function(){return o}),n.d(t,"UPDATE_FILTER",function(){return i}),n.d(t,"UPDATE_MODE",function(){return a}),n.d(t,"SHOW",function(){return s}),n.d(t,"updateLayout",function(){return u}),n.d(t,"updateFilter",function(){return c}),n.d(t,"show",function(){return l}),n.d(t,"changeMode",function(){return p});var r=n(3),o="layout_update_layout",i="layout_update_filter",a="layout_update_mode",s="layout_show";function u(e){return{type:o,payload:e}}function c(e){return{type:i,payload:e}}function l(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return e=Object(r.w)(e),{type:s,payload:{thing:e,shown:t}}}function p(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";return e=Object(r.w)(e),{type:a,payload:{thing:e,mode:t}}}},function(e,t,n){"use strict";(function(t){ +/*! + * @description Recursive object extending + * @author Viacheslav Lotsmanov + * @license MIT + * + * The MIT License (MIT) + * + * Copyright (c) 2013-2018 Viacheslav Lotsmanov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +function n(e){return e instanceof t||e instanceof Date||e instanceof RegExp}function r(e){if(e instanceof t){var n=t.alloc?t.alloc(e.length):new t(e.length);return e.copy(n),n}if(e instanceof Date)return new Date(e.getTime());if(e instanceof RegExp)return new RegExp(e);throw new Error("Unexpected situation")}function o(e){var t=[];return e.forEach(function(e,i){"object"==typeof e&&null!==e?Array.isArray(e)?t[i]=o(e):n(e)?t[i]=r(e):t[i]=a({},e):t[i]=e}),t}function i(e,t){return"__proto__"===t?void 0:e[t]}var a=e.exports=function(){if(arguments.length<1||"object"!=typeof arguments[0])return!1;if(arguments.length<2)return arguments[0];var e,t,s=arguments[0],u=Array.prototype.slice.call(arguments,1);return u.forEach(function(u){"object"!=typeof u||null===u||Array.isArray(u)||Object.keys(u).forEach(function(c){return t=i(s,c),(e=i(u,c))===s?void 0:"object"!=typeof e||null===e?void(s[c]=e):Array.isArray(e)?void(s[c]=o(e)):n(e)?void(s[c]=r(e)):"object"!=typeof t||null===t||Array.isArray(t)?void(s[c]=a({},e)):void(s[c]=a(t,e))})}),s}}).call(this,n(66).Buffer)},function(e,t,n){var r=n(155),o=n(340);e.exports=n(129)?function(e,t,n){return r.f(e,t,o(1,n))}:function(e,t,n){return e[t]=n,e}},function(e,t){e.exports=function(e){if("function"!=typeof e)throw TypeError(e+" is not a function!");return e}},function(e,t,n){var r=n(108),o=n(606),i=n(607),a="[object Null]",s="[object Undefined]",u=r?r.toStringTag:void 0;e.exports=function(e){return null==e?void 0===e?s:a:u&&u in Object(e)?o(e):i(e)}},function(e,t,n){var r=n(624),o=n(627);e.exports=function(e,t){var n=o(e,t);return r(n)?n:void 0}},function(e,t,n){var r=n(384),o=n(664),i=n(109);e.exports=function(e){return i(e)?r(e):o(e)}},function(e,t,n){"use strict";var r=n(179),o=Object.keys||function(e){var t=[];for(var n in e)t.push(n);return t};e.exports=p;var i=n(141);i.inherits=n(47);var a=n(394),s=n(243);i.inherits(p,a);for(var u=o(s.prototype),c=0;c=t.length?{value:void 0,done:!0}:(e=r(t,n),this._i+=e.length,{value:e,done:!1})})},function(e,t){e.exports={}},function(e,t,n){n(565);for(var r=n(31),o=n(77),i=n(104),a=n(35)("toStringTag"),s="CSSRuleList,CSSStyleDeclaration,CSSValueList,ClientRectList,DOMRectList,DOMStringList,DOMTokenList,DataTransferItemList,FileList,HTMLAllCollection,HTMLCollection,HTMLFormElement,HTMLSelectElement,MediaList,MimeTypeArray,NamedNodeMap,NodeList,PaintRequestList,Plugin,PluginArray,SVGLengthList,SVGNumberList,SVGPathSegList,SVGPointList,SVGStringList,SVGTransformList,SourceBufferList,StyleSheetList,TextTrackCueList,TextTrackList,TouchList".split(","),u=0;u1){for(var d=Array(h),m=0;m1){for(var g=Array(v),y=0;y=this._finalSize&&(this._update(this._block),this._block.fill(0));var n=8*this._len;if(n<=4294967295)this._block.writeUInt32BE(n,this._blockSize-4);else{var r=(4294967295&n)>>>0,o=(n-r)/4294967296;this._block.writeUInt32BE(o,this._blockSize-8),this._block.writeUInt32BE(r,this._blockSize-4)}this._update(this._block);var i=this._hash();return e?i.toString(e):i},o.prototype._update=function(){throw new Error("_update must be implemented by subclass")},e.exports=o},function(e,t,n){var r=n(65),o=n(411),i=n(412),a=n(37),s=n(162),u=n(228),c={},l={};(t=e.exports=function(e,t,n,p,f){var h,d,m,v,g=f?function(){return e}:u(e),y=r(n,p,t?2:1),b=0;if("function"!=typeof g)throw TypeError(e+" is not iterable!");if(i(g)){for(h=s(e.length);h>b;b++)if((v=t?y(a(d=e[b])[0],d[1]):y(e[b]))===c||v===l)return v}else for(m=g.call(e);!(d=m.next()).done;)if((v=o(m,y,d.value,t))===c||v===l)return v}).BREAK=c,t.RETURN=l},function(e,t,n){"use strict";function r(e){return null==e}e.exports.isNothing=r,e.exports.isObject=function(e){return"object"==typeof e&&null!==e},e.exports.toArray=function(e){return Array.isArray(e)?e:r(e)?[]:[e]},e.exports.repeat=function(e,t){var n,r="";for(n=0;n1&&void 0!==arguments[1]?arguments[1]:{},r=Object(i.A)(t),a=r.type,s=r.example,u=r.properties,c=r.additionalProperties,l=r.items,p=n.includeReadOnly,f=n.includeWriteOnly;if(void 0!==s)return Object(i.e)(s,"$$ref",function(e){return"string"==typeof e&&e.indexOf("#")>-1});if(!a)if(u)a="object";else{if(!l)return;a="array"}if("object"===a){var d=Object(i.A)(u),m={};for(var v in d)d[v]&&d[v].deprecated||d[v]&&d[v].readOnly&&!p||d[v]&&d[v].writeOnly&&!f||(m[v]=e(d[v],n));if(!0===c)m.additionalProp1={};else if(c)for(var g=Object(i.A)(c),y=e(g,n),b=1;b<4;b++)m["additionalProp"+b]=y;return m}return"array"===a?o()(l.anyOf)?l.anyOf.map(function(t){return e(t,n)}):o()(l.oneOf)?l.oneOf.map(function(t){return e(t,n)}):[e(l,n)]:t.enum?t.default?t.default:Object(i.w)(t.enum)[0]:"file"!==a?h(t):void 0},m=function(e){return e.schema&&(e=e.schema),e.properties&&(e.type="object"),e},v=function e(t){var n,r,a=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},s=p()({},Object(i.A)(t)),u=s.type,c=s.properties,l=s.additionalProperties,f=s.items,d=s.example,m=a.includeReadOnly,v=a.includeWriteOnly,g=s.default,y={},b={},_=t.xml,w=_.name,x=_.prefix,E=_.namespace,S=s.enum;if(!u)if(c||l)u="object";else{if(!f)return;u="array"}if(n=(x?x+":":"")+(w=w||"notagname"),E){var C=x?"xmlns:"+x:"xmlns";b[C]=E}if("array"===u&&f){if(f.xml=f.xml||_||{},f.xml.name=f.xml.name||_.name,_.wrapped)return y[n]=[],o()(d)?d.forEach(function(t){f.example=t,y[n].push(e(f,a))}):o()(g)?g.forEach(function(t){f.default=t,y[n].push(e(f,a))}):y[n]=[e(f,a)],b&&y[n].push({_attr:b}),y;var k=[];return o()(d)?(d.forEach(function(t){f.example=t,k.push(e(f,a))}),k):o()(g)?(g.forEach(function(t){f.default=t,k.push(e(f,a))}),k):e(f,a)}if("object"===u){var O=Object(i.A)(c);for(var A in y[n]=[],d=d||{},O)if(O.hasOwnProperty(A)&&(!O[A].readOnly||m)&&(!O[A].writeOnly||v))if(O[A].xml=O[A].xml||{},O[A].xml.attribute){var T=o()(O[A].enum)&&O[A].enum[0],j=O[A].example,I=O[A].default;b[O[A].xml.name||A]=void 0!==j&&j||void 0!==d[A]&&d[A]||void 0!==I&&I||T||h(O[A])}else{O[A].xml.name=O[A].xml.name||A,void 0===O[A].example&&void 0!==d[A]&&(O[A].example=d[A]);var P=e(O[A]);o()(P)?y[n]=y[n].concat(P):y[n].push(P)}return!0===l?y[n].push({additionalProp:"Anything can be here"}):l&&y[n].push({additionalProp:h(l)}),b&&y[n].push({_attr:b}),y}return r=void 0!==d?d:void 0!==g?g:o()(S)?S[0]:h(t),y[n]=b?[{_attr:b},r]:r,y};function g(e,t){var n=v(e,t);if(n)return s()(n,{declaration:!0,indent:"\t"})}var y=c()(g),b=c()(d)},function(e,t,n){"use strict";n.r(t),n.d(t,"UPDATE_CONFIGS",function(){return i}),n.d(t,"TOGGLE_CONFIGS",function(){return a}),n.d(t,"update",function(){return s}),n.d(t,"toggle",function(){return u}),n.d(t,"loaded",function(){return c});var r=n(2),o=n.n(r),i="configs_update",a="configs_toggle";function s(e,t){return{type:i,payload:o()({},e,t)}}function u(e){return{type:a,payload:e}}var c=function(){return function(){}}},function(e,t,n){"use strict";n.d(t,"a",function(){return a});var r=n(1),o=n.n(r),i=o.a.Set.of("type","format","items","default","maximum","exclusiveMaximum","minimum","exclusiveMinimum","maxLength","minLength","pattern","maxItems","minItems","uniqueItems","enum","multipleOf");function a(e){var t=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}).isOAS3;if(!o.a.Map.isMap(e))return{schema:o.a.Map(),parameterContentMediaType:null};if(!t)return"body"===e.get("in")?{schema:e.get("schema",o.a.Map()),parameterContentMediaType:null}:{schema:e.filter(function(e,t){return i.includes(t)}),parameterContentMediaType:null};if(e.get("content")){var n=e.get("content",o.a.Map({})).keySeq().first();return{schema:e.getIn(["content",n,"schema"],o.a.Map()),parameterContentMediaType:n}}return{schema:e.get("schema",o.a.Map()),parameterContentMediaType:null}}},function(e,t,n){e.exports=n(782)},function(e,t,n){"use strict";n.r(t);var r=n(475),o="object"==typeof self&&self&&self.Object===Object&&self,i=(r.a||o||Function("return this")()).Symbol,a=Object.prototype,s=a.hasOwnProperty,u=a.toString,c=i?i.toStringTag:void 0;var l=function(e){var t=s.call(e,c),n=e[c];try{e[c]=void 0;var r=!0}catch(e){}var o=u.call(e);return r&&(t?e[c]=n:delete e[c]),o},p=Object.prototype.toString;var f=function(e){return p.call(e)},h="[object Null]",d="[object Undefined]",m=i?i.toStringTag:void 0;var v=function(e){return null==e?void 0===e?d:h:m&&m in Object(e)?l(e):f(e)};var g=function(e,t){return function(n){return e(t(n))}}(Object.getPrototypeOf,Object);var y=function(e){return null!=e&&"object"==typeof e},b="[object Object]",_=Function.prototype,w=Object.prototype,x=_.toString,E=w.hasOwnProperty,S=x.call(Object);var C=function(e){if(!y(e)||v(e)!=b)return!1;var t=g(e);if(null===t)return!0;var n=E.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&x.call(n)==S},k=n(333),O={INIT:"@@redux/INIT"};function A(e,t,n){var r;if("function"==typeof t&&void 0===n&&(n=t,t=void 0),void 0!==n){if("function"!=typeof n)throw new Error("Expected the enhancer to be a function.");return n(A)(e,t)}if("function"!=typeof e)throw new Error("Expected the reducer to be a function.");var o=e,i=t,a=[],s=a,u=!1;function c(){s===a&&(s=a.slice())}function l(){return i}function p(e){if("function"!=typeof e)throw new Error("Expected listener to be a function.");var t=!0;return c(),s.push(e),function(){if(t){t=!1,c();var n=s.indexOf(e);s.splice(n,1)}}}function f(e){if(!C(e))throw new Error("Actions must be plain objects. Use custom middleware for async actions.");if(void 0===e.type)throw new Error('Actions may not have an undefined "type" property. Have you misspelled a constant?');if(u)throw new Error("Reducers may not dispatch actions.");try{u=!0,i=o(i,e)}finally{u=!1}for(var t=a=s,n=0;n0&&void 0!==arguments[0]?arguments[0]:{},t=arguments[1];if(a)throw a;for(var r=!1,o={},s=0;s0?r:n)(e)}},function(e,t){e.exports={}},function(e,t){var n={}.toString;e.exports=function(e){return n.call(e).slice(8,-1)}},function(e,t){e.exports=!0},function(e,t){e.exports=function(e,t){return{enumerable:!(1&e),configurable:!(2&e),writable:!(4&e),value:t}}},function(e,t,n){var r=n(37),o=n(355),i=n(217),a=n(215)("IE_PROTO"),s=function(){},u=function(){var e,t=n(219)("iframe"),r=i.length;for(t.style.display="none",n(356).appendChild(t),t.src="javascript:",(e=t.contentWindow.document).open(),e.write(" + + + + diff --git a/internal/bminventory/inventory.go b/internal/bminventory/inventory.go index 0fb1fdaf9..6c4fc1349 100644 --- a/internal/bminventory/inventory.go +++ b/internal/bminventory/inventory.go @@ -3,28 +3,42 @@ package bminventory import ( "bytes" "context" + "crypto/x509" "encoding/json" "fmt" "io/ioutil" + "net" "net/http" + "net/url" + "regexp" + "sort" + "strconv" "strings" "sync" "text/template" "time" - "github.com/filanov/bm-inventory/restapi" - - "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/pkg/auth" - "github.com/filanov/bm-inventory/internal/events" + "github.com/filanov/bm-inventory/internal/identity" + "github.com/danielerez/go-dns-client/pkg/dnsproviders" "github.com/filanov/bm-inventory/internal/cluster" + "github.com/filanov/bm-inventory/internal/cluster/validations" + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/internal/events" "github.com/filanov/bm-inventory/internal/host" "github.com/filanov/bm-inventory/internal/installcfg" + "github.com/filanov/bm-inventory/internal/metrics" + "github.com/filanov/bm-inventory/internal/network" "github.com/filanov/bm-inventory/models" "github.com/filanov/bm-inventory/pkg/filemiddleware" "github.com/filanov/bm-inventory/pkg/job" logutil "github.com/filanov/bm-inventory/pkg/log" + "github.com/filanov/bm-inventory/pkg/requestid" + awsS3CLient "github.com/filanov/bm-inventory/pkg/s3Client" + "github.com/filanov/bm-inventory/pkg/transaction" + "github.com/filanov/bm-inventory/restapi" "github.com/filanov/bm-inventory/restapi/operations/installer" "github.com/go-openapi/runtime/middleware" "github.com/go-openapi/strfmt" @@ -33,44 +47,63 @@ import ( "github.com/jinzhu/gorm" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "github.com/thoas/go-funk" batch "k8s.io/api/batch/v1" core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/clientcmd" ) const kubeconfigPrefix = "generate-kubeconfig" - -const ( - ClusterStatusReady = "ready" - ClusterStatusInstalling = "installing" - ClusterStatusInstalled = "installed" - ClusterStatusError = "error" -) +const kubeconfig = "kubeconfig" const ( ResourceKindHost = "Host" ResourceKindCluster = "Cluster" ) -const defaultUser = "kubeadmin" +const DefaultUser = "kubeadmin" +const ConsoleUrlPrefix = "https://console-openshift-console.apps" + +var ( + DefaultClusterNetworkCidr = "10.128.0.0/14" + DefaultClusterNetworkHostPrefix = int64(23) + DefaultServiceNetworkCidr = "172.30.0.0/16" +) type Config struct { - ImageBuilder string `envconfig:"IMAGE_BUILDER" default:"quay.io/oscohen/installer-image-build"` - ImageBuilderCmd string `envconfig:"IMAGE_BUILDER_CMD" default:"echo hello"` - AgentDockerImg string `envconfig:"AGENT_DOCKER_IMAGE" default:"quay.io/oamizur/agent:latest"` - KubeconfigGenerator string `envconfig:"KUBECONFIG_GENERATE_IMAGE" default:"quay.io/ocpmetal/ignition-manifests-and-kubeconfig-generate:stable"` - KubeconfigGenerator4_4 string `envconfig:"KUBECONFIG_GENERATE_IMAGE" default:"quay.io/oscohen/ignition-manifests-and-kubeconfig-generate"` - InventoryURL string `envconfig:"INVENTORY_URL" default:"10.35.59.36"` - InventoryPort string `envconfig:"INVENTORY_PORT" default:"30485"` - S3EndpointURL string `envconfig:"S3_ENDPOINT_URL" default:"http://10.35.59.36:30925"` - S3Bucket string `envconfig:"S3_BUCKET" default:"test"` - AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID" default:"accessKey1"` - AwsSecretAccessKey string `envconfig:"AWS_SECRET_ACCESS_KEY" default:"verySecretKey1"` - Namespace string `envconfig:"NAMESPACE" default:"assisted-installer"` + ImageBuilder string `envconfig:"IMAGE_BUILDER" default:"quay.io/ocpmetal/installer-image-build:latest"` + AgentDockerImg string `envconfig:"AGENT_DOCKER_IMAGE" default:"quay.io/ocpmetal/agent:latest"` + KubeconfigGenerator string `envconfig:"KUBECONFIG_GENERATE_IMAGE" default:"quay.io/ocpmetal/ignition-manifests-and-kubeconfig-generate:latest"` // TODO: update the latest once the repository has git workflow + //[TODO] - change the default of Releae image to "", once everyine wll update their environment + ReleaseImage string `envconfig:"OPENSHIFT_INSTALL_RELEASE_IMAGE" default:"quay.io/openshift-release-dev/ocp-release@sha256:eab93b4591699a5a4ff50ad3517892653f04fb840127895bb3609b3cc68f98f3"` + InventoryURL string `envconfig:"INVENTORY_URL" default:"10.35.59.36"` + InventoryPort string `envconfig:"INVENTORY_PORT" default:"30485"` + S3EndpointURL string `envconfig:"S3_ENDPOINT_URL" default:"http://10.35.59.36:30925"` + S3Bucket string `envconfig:"S3_BUCKET" default:"test"` + AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID" default:"accessKey1"` + AwsSecretAccessKey string `envconfig:"AWS_SECRET_ACCESS_KEY" default:"verySecretKey1"` + Namespace string `envconfig:"NAMESPACE" default:"assisted-installer"` + UseK8s bool `envconfig:"USE_K8S" default:"true"` // TODO remove when jobs running deprecated + BaseDNSDomains map[string]string `envconfig:"BASE_DNS_DOMAINS" default:""` + JobCPULimit string `envconfig:"JOB_CPU_LIMIT" default:"500m"` + JobMemoryLimit string `envconfig:"JOB_MEMORY_LIMIT" default:"1000Mi"` + JobCPURequests string `envconfig:"JOB_CPU_REQUESTS" default:"300m"` + JobMemoryRequests string `envconfig:"JOB_MEMORY_REQUESTS" default:"400Mi"` } +const agentMessageOfTheDay = ` +** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** +This is a host being installed by the OpenShift Assisted Installer. +It will be installed from scratch during the installation. +The primary service is agent.service. To watch its status run e.g +sudo journalctl -u agent.service +** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** +` + const ignitionConfigFormat = `{ -"ignition": { "version": "3.0.0" }, +"ignition": { "version": "2.2.0" }, "passwd": { "users": [ {{.userSshKey}} @@ -80,11 +113,30 @@ const ignitionConfigFormat = `{ "units": [{ "name": "agent.service", "enabled": true, -"contents": "[Service]\nType=simple\nRestart=always\nEnvironment=HTTPS_PROXY={{.ProxyURL}}\nEnvironment=HTTP_PROXY={{.ProxyURL}}\nEnvironment=http_proxy={{.ProxyURL}}\nEnvironment=https_proxy={{.ProxyURL}}\nExecStartPre=docker run --privileged --rm -v /usr/local/bin:/hostbin {{.AgentDockerImg}} cp /usr/bin/agent /hostbin\nExecStart=/usr/local/bin/agent --host {{.InventoryURL}} --port {{.InventoryPort}} --cluster-id {{.clusterId}}\n\n[Install]\nWantedBy=multi-user.target" +"contents": "[Service]\nType=simple\nRestart=always\nRestartSec=3\nStartLimitIntervalSec=0\nEnvironment=HTTPS_PROXY={{.ProxyURL}}\nEnvironment=HTTP_PROXY={{.ProxyURL}}\nEnvironment=http_proxy={{.ProxyURL}}\nEnvironment=https_proxy={{.ProxyURL}}\nEnvironment=PULL_SECRET_TOKEN={{.PullSecretToken}}\nExecStartPre=podman run --privileged --rm -v /usr/local/bin:/hostbin {{.AgentDockerImg}} cp /usr/bin/agent /hostbin\nExecStart=/usr/local/bin/agent --host {{.InventoryURL}} --port {{.InventoryPort}} --cluster-id {{.clusterId}} --agent-version {{.AgentDockerImg}}\n\n[Install]\nWantedBy=multi-user.target" }] -} +}, +"storage": { + "files": [{ + "filesystem": "root", + "path": "/etc/motd", + "mode": 644, + "contents": { "source": "data:,{{.AGENT_MOTD}}" } + }] + } }` +var clusterFileNames = []string{ + "kubeconfig", + "bootstrap.ign", + "master.ign", + "worker.ign", + "metadata.json", + "kubeadmin-password", + "kubeconfig-noingress", + "install-config.yaml", +} + type debugCmd struct { cmd string stepID string @@ -92,7 +144,6 @@ type debugCmd struct { type bareMetalInventory struct { Config - imageBuildCmd []string db *gorm.DB debugCmdMap map[strfmt.UUID]debugCmd debugCmdMux sync.Mutex @@ -101,6 +152,8 @@ type bareMetalInventory struct { hostApi host.API clusterApi cluster.API eventsHandler events.Handler + s3Client awsS3CLient.S3Client + metricApi metrics.API } var _ restapi.InstallerAPI = &bareMetalInventory{} @@ -113,6 +166,8 @@ func NewBareMetalInventory( cfg Config, jobApi job.API, eventsHandler events.Handler, + s3Client awsS3CLient.S3Client, + metricApi metrics.API, ) *bareMetalInventory { b := &bareMetalInventory{ @@ -124,16 +179,43 @@ func NewBareMetalInventory( clusterApi: clusterApi, job: jobApi, eventsHandler: eventsHandler, + s3Client: s3Client, + metricApi: metricApi, } - if cfg.ImageBuilderCmd != "" { - b.imageBuildCmd = strings.Split(cfg.ImageBuilderCmd, " ") + if b.Config.UseK8s { + //Run first ISO dummy for image pull, this is done so that the image will be pulled and the api will take less time. + b.generateDummyISOImage() } return b } +func (b *bareMetalInventory) generateDummyISOImage() { + var ( + dummyId = "00000000-0000-0000-0000-000000000000" + jobName = fmt.Sprintf("dummyimage-%s-%s", dummyId, time.Now().Format("20060102150405")) + imgName = fmt.Sprintf("discovery-image-%s", dummyId) + requestID = requestid.NewID() + log = requestid.RequestIDLogger(b.log, requestID) + ) + // create dummy job without uploading to s3, we just need to pull the image + if err := b.job.Create(requestid.ToContext(context.Background(), requestID), + b.createImageJob(jobName, imgName, "Dummy", false)); err != nil { + log.WithError(err).Errorf("failed to generate dummy ISO image") + } +} + +func getQuantity(s string) resource.Quantity { + reply, _ := resource.ParseQuantity(s) + return reply +} + // create discovery image generation job, return job name and error -func (b *bareMetalInventory) createImageJob(cluster *models.Cluster, jobName, imgName, ignitionConfig string) *batch.Job { +func (b *bareMetalInventory) createImageJob(jobName, imgName, ignitionConfig string, performUpload bool) *batch.Job { + var command []string + if !performUpload { + command = []string{"echo", "pass"} + } return &batch.Job{ TypeMeta: meta.TypeMeta{ Kind: "Job", @@ -153,9 +235,19 @@ func (b *bareMetalInventory) createImageJob(cluster *models.Cluster, jobName, im Spec: core.PodSpec{ Containers: []core.Container{ { + Resources: core.ResourceRequirements{ + Limits: core.ResourceList{ + "cpu": getQuantity(b.JobCPULimit), + "memory": getQuantity(b.JobMemoryLimit), + }, + Requests: core.ResourceList{ + "cpu": getQuantity(b.JobCPURequests), + "memory": getQuantity(b.JobMemoryRequests), + }, + }, + Command: command, Name: "image-creator", Image: b.Config.ImageBuilder, - Command: b.imageBuildCmd, ImagePullPolicy: "IfNotPresent", Env: []core.EnvVar{ { @@ -192,21 +284,32 @@ func (b *bareMetalInventory) createImageJob(cluster *models.Cluster, jobName, im } } -func (b *bareMetalInventory) formatIgnitionFile(cluster *models.Cluster, params installer.GenerateClusterISOParams) (string, error) { +func (b *bareMetalInventory) formatIgnitionFile(cluster *common.Cluster, params installer.GenerateClusterISOParams) (string, error) { + creds, err := validations.ParsePullSecret(cluster.PullSecret) + if err != nil { + return "", err + } + r, ok := creds["cloud.openshift.com"] + if !ok { + return "", fmt.Errorf("Pull secret does not contain auth for cloud.openshift.com") + } + var ignitionParams = map[string]string{ - "userSshKey": b.getUserSshKey(params), - "AgentDockerImg": b.AgentDockerImg, - "InventoryURL": b.InventoryURL, - "InventoryPort": b.InventoryPort, - "clusterId": cluster.ID.String(), - "ProxyURL": params.ImageCreateParams.ProxyURL, + "userSshKey": b.getUserSshKey(params), + "AgentDockerImg": b.AgentDockerImg, + "InventoryURL": strings.TrimSpace(b.InventoryURL), + "InventoryPort": strings.TrimSpace(b.InventoryPort), + "clusterId": cluster.ID.String(), + "ProxyURL": params.ImageCreateParams.ProxyURL, + "PullSecretToken": r.AuthRaw, + "AGENT_MOTD": url.PathEscape(agentMessageOfTheDay), } tmpl, err := template.New("ignitionConfig").Parse(ignitionConfigFormat) if err != nil { return "", err } buf := &bytes.Buffer{} - if err := tmpl.Execute(buf, ignitionParams); err != nil { + if err = tmpl.Execute(buf, ignitionParams); err != nil { return "", err } return buf.String(), nil @@ -230,22 +333,44 @@ func (b *bareMetalInventory) RegisterCluster(ctx context.Context, params install id := strfmt.UUID(uuid.New().String()) url := installer.GetClusterURL{ClusterID: id} log.Infof("Register cluster: %s with id %s", swag.StringValue(params.NewClusterParams.Name), id) - cluster := models.Cluster{ + + if params.NewClusterParams.ClusterNetworkCidr == nil { + params.NewClusterParams.ClusterNetworkCidr = &DefaultClusterNetworkCidr + } + if params.NewClusterParams.ClusterNetworkHostPrefix == 0 { + params.NewClusterParams.ClusterNetworkHostPrefix = DefaultClusterNetworkHostPrefix + } + if params.NewClusterParams.ServiceNetworkCidr == nil { + params.NewClusterParams.ServiceNetworkCidr = &DefaultServiceNetworkCidr + } + + cluster := common.Cluster{Cluster: models.Cluster{ ID: &id, Href: swag.String(url.String()), Kind: swag.String(ResourceKindCluster), - APIVip: params.NewClusterParams.APIVip, BaseDNSDomain: params.NewClusterParams.BaseDNSDomain, - ClusterNetworkCidr: params.NewClusterParams.ClusterNetworkCidr, + ClusterNetworkCidr: swag.StringValue(params.NewClusterParams.ClusterNetworkCidr), ClusterNetworkHostPrefix: params.NewClusterParams.ClusterNetworkHostPrefix, - DNSVip: params.NewClusterParams.DNSVip, IngressVip: params.NewClusterParams.IngressVip, Name: swag.StringValue(params.NewClusterParams.Name), OpenshiftVersion: swag.StringValue(params.NewClusterParams.OpenshiftVersion), - PullSecret: params.NewClusterParams.PullSecret, - ServiceNetworkCidr: params.NewClusterParams.ServiceNetworkCidr, + ServiceNetworkCidr: swag.StringValue(params.NewClusterParams.ServiceNetworkCidr), SSHPublicKey: params.NewClusterParams.SSHPublicKey, UpdatedAt: strfmt.DateTime{}, + UserID: auth.UserIDFromContext(ctx), + OrgID: auth.OrgIDFromContext(ctx), + }} + if params.NewClusterParams.PullSecret != "" { + err := validations.ValidatePullSecret(params.NewClusterParams.PullSecret) + if err != nil { + log.WithError(err).Errorf("Pull-secret for new cluster has invalid format") + return installer.NewRegisterClusterBadRequest(). + WithPayload(common.GenerateError(http.StatusBadRequest, errors.New("Pull-secret has invalid format"))) + } + setPullSecret(&cluster, params.NewClusterParams.PullSecret) + } + if err := validations.ValidateClusterNameFormat(swag.StringValue(params.NewClusterParams.Name)); err != nil { + return common.NewApiError(http.StatusBadRequest, err) } err := b.clusterApi.RegisterCluster(ctx, &cluster) @@ -255,18 +380,24 @@ func (b *bareMetalInventory) RegisterCluster(ctx context.Context, params install WithPayload(common.GenerateError(http.StatusInternalServerError, err)) } - return installer.NewRegisterClusterCreated().WithPayload(&cluster) + b.metricApi.ClusterRegistered(swag.StringValue(params.NewClusterParams.OpenshiftVersion)) + return installer.NewRegisterClusterCreated().WithPayload(&cluster.Cluster) } func (b *bareMetalInventory) DeregisterCluster(ctx context.Context, params installer.DeregisterClusterParams) middleware.Responder { log := logutil.FromContext(ctx, b.log) - var cluster models.Cluster + var cluster common.Cluster + log.Infof("Deregister cluster id %s", params.ClusterID) if err := b.db.First(&cluster, "id = ?", params.ClusterID).Error; err != nil { return installer.NewDeregisterClusterNotFound(). WithPayload(common.GenerateError(http.StatusNotFound, err)) } + if err := b.deleteDNSRecordSets(ctx, cluster); err != nil { + log.Warnf("failed to delete DNS record sets for base domain: %s", cluster.BaseDNSDomain) + } + err := b.clusterApi.DeregisterCluster(ctx, &cluster) if err != nil { log.WithError(err).Errorf("failed to deregister cluster cluster %s", params.ClusterID) @@ -279,7 +410,7 @@ func (b *bareMetalInventory) DeregisterCluster(ctx context.Context, params insta func (b *bareMetalInventory) DownloadClusterISO(ctx context.Context, params installer.DownloadClusterISOParams) middleware.Responder { log := logutil.FromContext(ctx, b.log) - if err := b.db.First(&models.Cluster{}, "id = ?", params.ClusterID).Error; err != nil { + if err := b.db.First(&common.Cluster{}, "id = ?", params.ClusterID).Error; err != nil { log.WithError(err).Errorf("failed to get cluster %s", params.ClusterID) return installer.NewDownloadClusterISONotFound(). WithPayload(common.GenerateError(http.StatusNotFound, err)) @@ -291,33 +422,47 @@ func (b *bareMetalInventory) DownloadClusterISO(ctx context.Context, params inst resp, err := http.Get(imageURL) if err != nil { log.WithError(err).Errorf("Failed to get ISO: %s", imgName) + msg := "Failed to download image: error fetching from storage backend" + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityError, msg, time.Now()) return installer.NewDownloadClusterISOInternalServerError(). WithPayload(common.GenerateError(http.StatusInternalServerError, err)) } if resp.StatusCode != http.StatusOK { defer resp.Body.Close() - b, _ := ioutil.ReadAll(resp.Body) - log.WithError(fmt.Errorf("%d - %s", resp.StatusCode, string(b))). + body, _ := ioutil.ReadAll(resp.Body) + log.WithError(fmt.Errorf("%d - %s", resp.StatusCode, string(body))). Errorf("Failed to get ISO: %s", imgName) if resp.StatusCode == http.StatusNotFound { + msg := "Failed to download image: the image was not found (perhaps it expired) - please generate the image and try again" + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityError, msg, time.Now()) return installer.NewDownloadClusterISONotFound(). - WithPayload(common.GenerateError(http.StatusNotFound, errors.New(string(b)))) + WithPayload(common.GenerateError(http.StatusNotFound, errors.New("The image was not found "+ + "(perhaps it expired) - please generate the image and try again"))) } + msg := fmt.Sprintf("Failed to download image: error fetching from storage backend (%d)", resp.StatusCode) + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityError, msg, time.Now()) return installer.NewDownloadClusterISOInternalServerError(). - WithPayload(common.GenerateError(http.StatusInternalServerError, errors.New(string(b)))) + WithPayload(common.GenerateError(http.StatusInternalServerError, errors.New(string(body)))) } + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityInfo, "Started image download", time.Now()) return filemiddleware.NewResponder(installer.NewDownloadClusterISOOK().WithPayload(resp.Body), - fmt.Sprintf("cluster-%s-discovery.iso", params.ClusterID.String())) + fmt.Sprintf("cluster-%s-discovery.iso", params.ClusterID.String()), + resp.ContentLength) } func (b *bareMetalInventory) GenerateClusterISO(ctx context.Context, params installer.GenerateClusterISOParams) middleware.Responder { log := logutil.FromContext(ctx, b.log) log.Infof("prepare image for cluster %s", params.ClusterID) - var cluster models.Cluster + var cluster common.Cluster + txSuccess := false tx := b.db.Begin() defer func() { + if !txSuccess { + log.Error("generate cluster ISO failed") + tx.Rollback() + } if r := recover(); r != nil { log.Error("generate cluster ISO failed") tx.Rollback() @@ -325,6 +470,8 @@ func (b *bareMetalInventory) GenerateClusterISO(ctx context.Context, params inst }() if tx.Error != nil { + msg := "Failed to generate image: error starting DB transaction" + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityError, msg, time.Now()) log.WithError(tx.Error).Errorf("failed to start db transaction") return installer.NewInstallClusterInternalServerError(). WithPayload(common.GenerateError(http.StatusInternalServerError, errors.New("DB error, failed to start transaction"))) @@ -332,7 +479,6 @@ func (b *bareMetalInventory) GenerateClusterISO(ctx context.Context, params inst if err := tx.First(&cluster, "id = ?", params.ClusterID).Error; err != nil { log.WithError(err).Errorf("failed to get cluster: %s", params.ClusterID) - tx.Rollback() return installer.NewGenerateClusterISONotFound(). WithPayload(common.GenerateError(http.StatusNotFound, err)) } @@ -345,38 +491,88 @@ func (b *bareMetalInventory) GenerateClusterISO(ctx context.Context, params inst previousCreatedAt := time.Time(cluster.ImageInfo.CreatedAt) if previousCreatedAt.Add(10 * time.Second).After(now) { log.Error("request came too soon after previous request") - tx.Rollback() - return installer.NewGenerateClusterISOConflict() + msg := "Failed to generate image: another request to generate an image has been recently submitted - please wait a few seconds and try again" + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityError, msg, time.Now()) + return installer.NewGenerateClusterISOConflict().WithPayload(common.GenerateError(http.StatusConflict, + errors.New("Another request to generate an image has been recently submitted. Please wait a few seconds and try again."))) + } + + if !cluster.PullSecretSet { + errMsg := "Can't generate cluster ISO without pull secret" + log.Error(errMsg) + return installer.NewGenerateClusterISOBadRequest(). + WithPayload(common.GenerateError(http.StatusBadRequest, errors.New(errMsg))) } - cluster.ImageInfo.ProxyURL = params.ImageCreateParams.ProxyURL - cluster.ImageInfo.SSHPublicKey = params.ImageCreateParams.SSHPublicKey - cluster.ImageInfo.CreatedAt = strfmt.DateTime(now) + /* If the request has the same parameters as the previous request and the image is still in S3, + just refresh the timestamp. + */ + var imageExists bool + if cluster.ImageInfo.ProxyURL == params.ImageCreateParams.ProxyURL && + cluster.ImageInfo.SSHPublicKey == params.ImageCreateParams.SSHPublicKey && + cluster.ImageInfo.GeneratorVersion == b.Config.ImageBuilder { + var err error + imgName := getImageName(params.ClusterID) + imageExists, err = b.s3Client.UpdateObjectTag(ctx, imgName, b.S3Bucket, "create_sec_since_epoch", strconv.FormatInt(now.Unix(), 10)) + if err != nil { + log.WithError(tx.Error).Errorf("failed to contact storage backend") + msg := "Failed to generate image: error contacting storage backend" + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityError, msg, time.Now()) + return installer.NewInstallClusterInternalServerError(). + WithPayload(common.GenerateError(http.StatusInternalServerError, errors.New("failed to contact storage backend"))) + } + } - if err := tx.Model(&cluster).Update(cluster).Error; err != nil { - tx.Rollback() - log.WithError(err).Errorf("failed to update cluster: %s", params.ClusterID) + updates := map[string]interface{}{} + updates["image_proxy_url"] = params.ImageCreateParams.ProxyURL + updates["image_ssh_public_key"] = params.ImageCreateParams.SSHPublicKey + updates["image_created_at"] = strfmt.DateTime(now) + updates["image_generator_version"] = b.Config.ImageBuilder + dbReply := tx.Model(&common.Cluster{}).Where("id = ?", cluster.ID.String()).Updates(updates) + if dbReply.Error != nil { + log.WithError(dbReply.Error).Errorf("failed to update cluster: %s", params.ClusterID) + msg := "Failed to generate image: error updating metadata" + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityError, msg, time.Now()) return installer.NewGenerateClusterISOInternalServerError() } - if tx.Commit().Error != nil { - tx.Rollback() + if err := tx.Commit().Error; err != nil { + log.Error(err) + msg := "Failed to generate image: error committing the transaction" + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityError, msg, time.Now()) return installer.NewGenerateClusterISOInternalServerError() } + txSuccess = true + if err := b.db.Preload("Hosts").First(&cluster, "id = ?", params.ClusterID).Error; err != nil { + log.WithError(err).Errorf("failed to get cluster %s after update", params.ClusterID) + msg := "Failed to generate image: error fetching updated cluster metadata" + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityError, msg, time.Now()) + return installer.NewUpdateClusterInternalServerError(). + WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + } + + if imageExists { + log.Infof("Re-used existing cluster <%s> image", params.ClusterID) + b.eventsHandler.AddEvent(ctx, cluster.ID.String(), models.EventSeverityInfo, "Re-used existing image rather than generating a new one", time.Now()) + return installer.NewGenerateClusterISOCreated().WithPayload(&cluster.Cluster) + } // Kill the previous job in case it's still running prevJobName := fmt.Sprintf("createimage-%s-%s", cluster.ID, previousCreatedAt.Format("20060102150405")) log.Info("Attempting to delete job %s", prevJobName) if err := b.job.Delete(ctx, prevJobName, b.Namespace); err != nil { log.WithError(err).Errorf("failed to kill previous job in cluster %s", cluster.ID) + msg := "Failed to generate image: error stopping previous image generation" + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityError, msg, time.Now()) return installer.NewGenerateClusterISOInternalServerError(). WithPayload(common.GenerateError(http.StatusInternalServerError, err)) } - log.Info("Finished attempting to delete job %s", prevJobName) ignitionConfig, formatErr := b.formatIgnitionFile(&cluster, params) if formatErr != nil { log.WithError(formatErr).Errorf("failed to format ignition config file for cluster %s", cluster.ID) + msg := "Failed to generate image: error formatting ignition file" + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityError, msg, time.Now()) return installer.NewGenerateClusterISOInternalServerError(). WithPayload(common.GenerateError(http.StatusInternalServerError, formatErr)) } @@ -384,103 +580,267 @@ func (b *bareMetalInventory) GenerateClusterISO(ctx context.Context, params inst // This job name is exactly 63 characters which is the maximum for a job - be careful if modifying jobName := fmt.Sprintf("createimage-%s-%s", cluster.ID, now.Format("20060102150405")) imgName := getImageName(params.ClusterID) - log.Info("Creating job %s", jobName) - if err := b.job.Create(ctx, b.createImageJob(&cluster, jobName, imgName, ignitionConfig)); err != nil { + log.Infof("Creating job %s", jobName) + if err := b.job.Create(ctx, b.createImageJob(jobName, imgName, ignitionConfig, true)); err != nil { log.WithError(err).Error("failed to create image job") + msg := "Failed to generate image: error creating image generation job" + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityError, msg, time.Now()) return installer.NewGenerateClusterISOInternalServerError(). WithPayload(common.GenerateError(http.StatusInternalServerError, err)) } if err := b.job.Monitor(ctx, jobName, b.Namespace); err != nil { log.WithError(err).Error("image creation failed") + msg := "Failed to generate image: error during image generation job" + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityError, msg, time.Now()) return installer.NewGenerateClusterISOInternalServerError(). WithPayload(common.GenerateError(http.StatusInternalServerError, err)) } log.Infof("Generated cluster <%s> image with ignition config %s", params.ClusterID, ignitionConfig) - return installer.NewGenerateClusterISOCreated().WithPayload(&cluster) + msg := fmt.Sprintf("Generated image (proxy URL is \"%s\", ", params.ImageCreateParams.ProxyURL) + if params.ImageCreateParams.SSHPublicKey != "" { + msg += "SSH public key is set)" + } else { + msg += "SSH public key is not set)" + } + b.eventsHandler.AddEvent(ctx, cluster.ID.String(), models.EventSeverityInfo, msg, time.Now()) + return installer.NewGenerateClusterISOCreated().WithPayload(&cluster.Cluster) } func getImageName(clusterID strfmt.UUID) string { return fmt.Sprintf("discovery-image-%s", clusterID.String()) } -func (b *bareMetalInventory) InstallCluster(ctx context.Context, params installer.InstallClusterParams) middleware.Responder { - log := logutil.FromContext(ctx, b.log) - var cluster models.Cluster - var err error +type clusterInstaller struct { + ctx context.Context + b *bareMetalInventory + log logrus.FieldLogger + params installer.InstallClusterParams +} - tx := b.db.Begin() - if tx.Error != nil { - log.WithError(tx.Error).Errorf("failed to start db transaction") - return installer.NewInstallClusterInternalServerError(). - WithPayload(common.GenerateInternalFromError(err)) +func (b *bareMetalInventory) verifyClusterNetworkConfig(ctx context.Context, cluster *common.Cluster) error { + cidr, err := network.CalculateMachineNetworkCIDR(cluster.APIVip, cluster.IngressVip, cluster.Hosts) + if err != nil { + return common.NewApiError(http.StatusBadRequest, err) } - defer func() { - if r := recover(); r != nil { - log.Error("update cluster failed") - tx.Rollback() + if cidr != cluster.MachineNetworkCidr { + return common.NewApiError(http.StatusBadRequest, + fmt.Errorf("Cluster machine CIDR %s is different than the calculated CIDR %s", cluster.MachineNetworkCidr, cidr)) + } + if err = network.VerifyVips(cluster.Hosts, cluster.MachineNetworkCidr, cluster.APIVip, cluster.IngressVip, + true, b.log); err != nil { + return common.NewApiError(http.StatusBadRequest, err) + } + machineCidrHosts, err := network.GetMachineCIDRHosts(b.log, cluster) + if err != nil { + return common.NewApiError(http.StatusBadRequest, err) + } + masterNodesIds, err := b.clusterApi.GetMasterNodesIds(ctx, cluster, b.db) + if err != nil { + return common.NewApiError(http.StatusInternalServerError, err) + } + hostIDInCidrHosts := func(id strfmt.UUID, hosts []*models.Host) bool { + for _, h := range hosts { + if *h.ID == id { + return true + } } - }() + return false + } - if err = tx.Preload("Hosts").First(&cluster, "id = ?", params.ClusterID).Error; err != nil { - return installer.NewInstallClusterNotFound(). - WithPayload(common.GenerateError(http.StatusNotFound, err)) + for _, id := range masterNodesIds { + if !hostIDInCidrHosts(*id, machineCidrHosts) { + return common.NewApiError(http.StatusBadRequest, + fmt.Errorf("Master id %s does not have an interface with IP belonging to machine CIDR %s", + *id, cluster.MachineNetworkCidr)) + } + } + return nil +} + +func (c *clusterInstaller) installHosts(cluster *common.Cluster, tx *gorm.DB) error { + success := true + err := errors.Errorf("Failed to install cluster <%s>", cluster.ID.String()) + for i := range cluster.Hosts { + if installErr := c.b.hostApi.Install(c.ctx, cluster.Hosts[i], tx); installErr != nil { + success = false + // collect multiple errors + err = errors.Wrap(installErr, err.Error()) + } + } + if !success { + return common.NewApiError(http.StatusConflict, err) + } + return nil +} + +func (b *bareMetalInventory) refreshAllHosts(ctx context.Context, cluster *common.Cluster) error { + for _, chost := range cluster.Hosts { + if swag.StringValue(chost.Status) != host.HostStatusKnown { + return common.NewApiError(http.StatusBadRequest, errors.Errorf("Host %s is in status %s and not ready for install", chost.ID.String(), + swag.StringValue(chost.Status))) + } + err := b.hostApi.RefreshStatus(ctx, chost, b.db) + if err != nil { + return err + } + } + return nil +} + +func (c clusterInstaller) install(tx *gorm.DB) error { + var cluster common.Cluster + var err error + + // in case host monitor already updated the state we need to use FOR UPDATE option + transaction.AddForUpdateQueryOption(tx) + + if err = tx.Preload("Hosts").First(&cluster, "id = ?", c.params.ClusterID).Error; err != nil { + return errors.Wrapf(err, "failed to find cluster %s", c.params.ClusterID) + } + + if err = c.b.createDNSRecordSets(c.ctx, cluster); err != nil { + return errors.Wrapf(err, "failed to create DNS record sets for base domain: %s", cluster.BaseDNSDomain) } - if err = b.clusterApi.Install(ctx, &cluster, tx); err != nil { - log.WithError(err).Errorf("failed to install cluster %s", cluster.ID.String()) - tx.Rollback() - return installer.NewInstallClusterConflict().WithPayload(common.GenerateError(http.StatusConflict, err)) + if err = c.b.clusterApi.Install(c.ctx, &cluster, tx); err != nil { + return errors.Wrapf(err, "failed to install cluster %s", cluster.ID.String()) } // set one of the master nodes as bootstrap - if err = b.setBootstrapHost(ctx, cluster, tx); err != nil { - tx.Rollback() - return installer.NewInstallClusterInternalServerError(). - WithPayload(common.GenerateInternalFromError(err)) + if err = c.b.setBootstrapHost(c.ctx, cluster, tx); err != nil { + return err } // move hosts states to installing - for i := range cluster.Hosts { - if _, err = b.hostApi.Install(ctx, cluster.Hosts[i], tx); err != nil { - log.WithError(err).Errorf("failed to install hosts <%s> in cluster: %s", - cluster.Hosts[i].ID.String(), cluster.ID.String()) - tx.Rollback() - return installer.NewInstallClusterConflict().WithPayload(common.GenerateError(http.StatusConflict, err)) + if err = c.installHosts(&cluster, tx); err != nil { + return err + } + + return nil +} + +func (b *bareMetalInventory) validateAllHostsCanBeInstalled(cluster *common.Cluster) error { + notInstallableHosts := make([]string, 0, len(cluster.Hosts)) + for _, h := range cluster.Hosts { + if !b.hostApi.IsInstallable(h) { + notInstallableHosts = append(notInstallableHosts, h.ID.String()) } } - if err = b.generateClusterInstallConfig(ctx, cluster); err != nil { - tx.Rollback() - return installer.NewInstallClusterInternalServerError(). - WithPayload(common.GenerateInternalFromError(err)) + + if len(notInstallableHosts) > 0 { + return common.NewApiError(http.StatusConflict, + errors.Errorf("Not all hosts are ready for installation: %s", notInstallableHosts)) } - if err = tx.Commit().Error; err != nil { - tx.Rollback() - log.WithError(err).Errorf("failed to commit cluster %s changes on installation", cluster.ID.String()) - return installer.NewInstallClusterInternalServerError(). - WithPayload(common.GenerateInternalFromError(err)) + return nil +} + +func (b *bareMetalInventory) InstallCluster(ctx context.Context, params installer.InstallClusterParams) middleware.Responder { + log := logutil.FromContext(ctx, b.log) + var cluster common.Cluster + var err error + + if err = b.db.Preload("Hosts", "status <> ?", host.HostStatusDisabled).First(&cluster, "id = ?", params.ClusterID).Error; err != nil { + return common.NewApiError(http.StatusNotFound, err) + } + if err = b.refreshAllHosts(ctx, &cluster); err != nil { + return common.GenerateErrorResponder(err) + } + + // Reload again after refresh + if err = b.db.Preload("Hosts", "status <> ?", host.HostStatusDisabled).First(&cluster, "id = ?", params.ClusterID).Error; err != nil { + return common.NewApiError(http.StatusNotFound, err) + } + if err = b.verifyClusterNetworkConfig(ctx, &cluster); err != nil { + return common.GenerateErrorResponder(err) + } + + if err = b.validateAllHostsCanBeInstalled(&cluster); err != nil { + return common.GenerateErrorResponder(err) + } + + // prepare cluster and hosts for installation + err = b.db.Transaction(func(tx *gorm.DB) error { + // in case host monitor already updated the state we need to use FOR UPDATE option + transaction.AddForUpdateQueryOption(tx) + + if err = b.clusterApi.PrepareForInstallation(ctx, &cluster, tx); err != nil { + return err + } + + for i := range cluster.Hosts { + if err = b.hostApi.PrepareForInstallation(ctx, cluster.Hosts[i], tx); err != nil { + return err + } + } + return nil + }) + + if err != nil { + return common.GenerateErrorResponder(err) } + if err = b.db.Preload("Hosts").First(&cluster, "id = ?", params.ClusterID).Error; err != nil { - return installer.NewInstallClusterInternalServerError(). - WithPayload(common.GenerateInternalFromError(err)) + return common.GenerateErrorResponder(err) } - return installer.NewInstallClusterAccepted().WithPayload(&cluster) + + go func() { + var err error + asyncCtx := requestid.ToContext(context.Background(), requestid.FromContext(ctx)) + + defer func() { + if err != nil { + log.WithError(err).Warn("Cluster install") + b.clusterApi.HandlePreInstallError(asyncCtx, &cluster, err) + } + }() + + if err = b.generateClusterInstallConfig(asyncCtx, cluster); err != nil { + return + } + + cInstaller := clusterInstaller{ + ctx: asyncCtx, // Need a new context for async part + b: b, + log: log, + params: params, + } + err = b.db.Transaction(cInstaller.install) + if err == nil { + //send metric when the installation process has been started + b.metricApi.InstallationStarted(cluster.OpenshiftVersion) + } + }() + + log.Infof("Successfully prepared cluster <%s> for installation", params.ClusterID.String()) + return installer.NewInstallClusterAccepted().WithPayload(&cluster.Cluster) } -func (b *bareMetalInventory) setBootstrapHost(ctx context.Context, cluster models.Cluster, db *gorm.DB) error { +func (b *bareMetalInventory) setBootstrapHost(ctx context.Context, cluster common.Cluster, db *gorm.DB) error { log := logutil.FromContext(ctx, b.log) + // check if cluster already has bootstrap + for _, h := range cluster.Hosts { + if h.Bootstrap { + log.Infof("Bootstrap ID is %s", h.ID) + return nil + } + } + masterNodesIds, err := b.clusterApi.GetMasterNodesIds(ctx, &cluster, db) if err != nil { log.WithError(err).Errorf("failed to get cluster %s master node id's", cluster.ID) return errors.Wrapf(err, "Failed to get cluster %s master node id's", cluster.ID) } + if len(masterNodesIds) == 0 { + return errors.Errorf("Cluster have no master hosts that can operate as bootstrap") + } bootstrapId := masterNodesIds[len(masterNodesIds)-1] log.Infof("Bootstrap ID is %s", bootstrapId) for i := range cluster.Hosts { if cluster.Hosts[i].ID.String() == bootstrapId.String() { - err = b.hostApi.SetBootstrap(ctx, cluster.Hosts[i], true) + err = b.hostApi.SetBootstrap(ctx, cluster.Hosts[i], true, db) if err != nil { log.WithError(err).Errorf("failed to update bootstrap host for cluster %s", cluster.ID) return errors.Wrapf(err, "Failed to update bootstrap host for cluster %s", cluster.ID) @@ -490,15 +850,18 @@ func (b *bareMetalInventory) setBootstrapHost(ctx context.Context, cluster model return nil } -func (b *bareMetalInventory) generateClusterInstallConfig(ctx context.Context, cluster models.Cluster) error { +func (b *bareMetalInventory) generateClusterInstallConfig(ctx context.Context, cluster common.Cluster) error { log := logutil.FromContext(ctx, b.log) - cfg, err := installcfg.GetInstallConfig(&cluster) + cfg, err := installcfg.GetInstallConfig(log, &cluster) if err != nil { log.WithError(err).Errorf("failed to get install config for cluster %s", cluster.ID) return errors.Wrapf(err, "failed to get install config for cluster %s", cluster.ID) } - jobName := fmt.Sprintf("%s-%s-%s", kubeconfigPrefix, cluster.ID.String(), uuid.New().String())[:63] + + ctime := time.Time(cluster.CreatedAt) + cTimestamp := strconv.FormatInt(ctime.Unix(), 10) + jobName := fmt.Sprintf("%s-%s-%s", kubeconfigPrefix, cluster.ID.String(), cTimestamp)[:63] if err := b.job.Create(ctx, b.createKubeconfigJob(&cluster, jobName, cfg)); err != nil { log.WithError(err).Errorf("Failed to create kubeconfig generation job %s for cluster %s", jobName, cluster.ID) return errors.Wrapf(err, "Failed to create kubeconfig generation job %s for cluster %s", jobName, cluster.ID) @@ -508,16 +871,55 @@ func (b *bareMetalInventory) generateClusterInstallConfig(ctx context.Context, c log.WithError(err).Errorf("Generating kubeconfig files %s failed for cluster %s", jobName, cluster.ID) return errors.Wrapf(err, "Generating kubeconfig files %s failed for cluster %s", jobName, cluster.ID) } + + return b.clusterApi.SetGeneratorVersion(&cluster, b.Config.KubeconfigGenerator, b.db) +} + +func (b *bareMetalInventory) refreshClusterHosts(ctx context.Context, cluster *common.Cluster, tx *gorm.DB, log logrus.FieldLogger) error { + for _, h := range cluster.Hosts { + var host models.Host + var err error + if err = tx.Take(&host, "id = ? and cluster_id = ?", + h.ID.String(), cluster.ID.String()).Error; err != nil { + log.WithError(err).Errorf("failed to find host <%s> in cluster <%s>", + h.ID.String(), cluster.ID.String()) + return common.NewApiError(http.StatusNotFound, err) + } + if err = b.hostApi.RefreshStatus(ctx, &host, tx); err != nil { + log.WithError(err).Errorf("failed to refresh state of host %s cluster %s", *h.ID, cluster.ID.String()) + return common.NewApiError(http.StatusInternalServerError, err) + } + } return nil } func (b *bareMetalInventory) UpdateCluster(ctx context.Context, params installer.UpdateClusterParams) middleware.Responder { log := logutil.FromContext(ctx, b.log) - var cluster models.Cluster + var cluster common.Cluster + var err error log.Info("update cluster ", params.ClusterID) + if swag.StringValue(params.ClusterUpdateParams.PullSecret) != "" { + err = validations.ValidatePullSecret(*params.ClusterUpdateParams.PullSecret) + if err != nil { + log.WithError(err).Errorf("Pull-secret for cluster %s, has invalid format", params.ClusterID) + return installer.NewUpdateClusterBadRequest(). + WithPayload(common.GenerateError(http.StatusBadRequest, errors.New("Pull-secret has invalid format"))) + } + } + if newClusterName := swag.StringValue(params.ClusterUpdateParams.Name); newClusterName != "" { + if err = validations.ValidateClusterNameFormat(newClusterName); err != nil { + return common.NewApiError(http.StatusBadRequest, err) + } + } + + txSuccess := false tx := b.db.Begin() defer func() { + if !txSuccess { + log.Error("update cluster failed") + tx.Rollback() + } if r := recover(); r != nil { log.Error("update cluster failed") tx.Rollback() @@ -530,124 +932,320 @@ func (b *bareMetalInventory) UpdateCluster(ctx context.Context, params installer WithPayload(common.GenerateError(http.StatusInternalServerError, errors.New("DB error, failed to start transaction"))) } - if err := tx.First(&cluster, "id = ?", params.ClusterID).Error; err != nil { + // in case host monitor already updated the state we need to use FOR UPDATE option + transaction.AddForUpdateQueryOption(tx) + + if err = tx.Preload("Hosts").First(&cluster, "id = ?", params.ClusterID).Error; err != nil { log.WithError(err).Errorf("failed to get cluster: %s", params.ClusterID) - tx.Rollback() return installer.NewUpdateClusterNotFound().WithPayload(common.GenerateError(http.StatusNotFound, err)) } - cluster.Name = params.ClusterUpdateParams.Name - cluster.APIVip = params.ClusterUpdateParams.APIVip - cluster.BaseDNSDomain = params.ClusterUpdateParams.BaseDNSDomain - cluster.ClusterNetworkCidr = params.ClusterUpdateParams.ClusterNetworkCidr - cluster.ClusterNetworkHostPrefix = params.ClusterUpdateParams.ClusterNetworkHostPrefix - cluster.DNSVip = params.ClusterUpdateParams.DNSVip - cluster.IngressVip = params.ClusterUpdateParams.IngressVip - cluster.PullSecret = params.ClusterUpdateParams.PullSecret - cluster.ServiceNetworkCidr = params.ClusterUpdateParams.ServiceNetworkCidr - cluster.SSHPublicKey = params.ClusterUpdateParams.SSHPublicKey - - if err := tx.Model(&cluster).Update(cluster).Error; err != nil { - tx.Rollback() - log.WithError(err).Errorf("failed to update cluster: %s", params.ClusterID) - return installer.NewUpdateClusterInternalServerError(). - WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + if err = b.clusterApi.VerifyClusterUpdatability(&cluster); err != nil { + log.WithError(err).Errorf("cluster %s can't be updated in current state", params.ClusterID) + return installer.NewUpdateClusterConflict().WithPayload(common.GenerateError(http.StatusConflict, err)) + } + + if updateClusterConflict := b.validateDNSDomain(params, log); updateClusterConflict != nil { + return updateClusterConflict + } + + err = b.updateClusterData(ctx, &cluster, params, tx, log) + if err != nil { + return common.GenerateErrorResponder(err) + } + + err = b.updateHostsData(ctx, params, tx, log) + if err != nil { + return common.GenerateErrorResponder(err) + } + + err = b.updateHostsAndClusterStatus(ctx, &cluster, tx, log) + if err != nil { + return common.GenerateErrorResponder(err) + } + + if err := tx.Commit().Error; err != nil { + log.Error(err) + return common.GenerateErrorResponder(common.NewApiError(http.StatusInternalServerError, fmt.Errorf("DB error, failed to commit"))) + } + txSuccess = true + + if err := b.db.Preload("Hosts").First(&cluster, "id = ?", params.ClusterID).Error; err != nil { + log.WithError(err).Errorf("failed to get cluster %s after update", params.ClusterID) + return common.GenerateErrorResponder(common.NewApiError(http.StatusInternalServerError, err)) + } + + cluster.HostNetworks = calculateHostNetworks(log, &cluster) + for _, host := range cluster.Hosts { + if err := b.customizeHost(host); err != nil { + return common.GenerateErrorResponder(common.NewApiError(http.StatusInternalServerError, err)) + } + } + + return installer.NewUpdateClusterCreated().WithPayload(&cluster.Cluster) +} + +func (b *bareMetalInventory) updateClusterData(ctx context.Context, cluster *common.Cluster, params installer.UpdateClusterParams, db *gorm.DB, log logrus.FieldLogger) error { + updates := map[string]interface{}{} + apiVip := cluster.APIVip + ingressVip := cluster.IngressVip + if params.ClusterUpdateParams.Name != nil { + updates["name"] = *params.ClusterUpdateParams.Name + } + if params.ClusterUpdateParams.APIVip != nil { + updates["api_vip"] = *params.ClusterUpdateParams.APIVip + apiVip = *params.ClusterUpdateParams.APIVip + } + if params.ClusterUpdateParams.BaseDNSDomain != nil { + updates["base_dns_domain"] = *params.ClusterUpdateParams.BaseDNSDomain + } + if params.ClusterUpdateParams.ClusterNetworkCidr != nil { + updates["cluster_network_cidr"] = *params.ClusterUpdateParams.ClusterNetworkCidr + } + if params.ClusterUpdateParams.ClusterNetworkHostPrefix != nil { + updates["cluster_network_host_prefix"] = *params.ClusterUpdateParams.ClusterNetworkHostPrefix + } + if params.ClusterUpdateParams.ServiceNetworkCidr != nil { + updates["service_network_cidr"] = *params.ClusterUpdateParams.ServiceNetworkCidr + } + if params.ClusterUpdateParams.IngressVip != nil { + updates["ingress_vip"] = *params.ClusterUpdateParams.IngressVip + ingressVip = *params.ClusterUpdateParams.IngressVip + } + if params.ClusterUpdateParams.SSHPublicKey != nil { + updates["ssh_public_key"] = *params.ClusterUpdateParams.SSHPublicKey + } + + var machineCidr string + + machineCidr, err := network.CalculateMachineNetworkCIDR(apiVip, ingressVip, cluster.Hosts) + if err != nil { + log.WithError(err).Errorf("failed to calculate machine network cidr for cluster: %s", params.ClusterID) + return common.NewApiError(http.StatusBadRequest, err) + } + updates["machine_network_cidr"] = machineCidr + + err = network.VerifyVips(cluster.Hosts, machineCidr, apiVip, ingressVip, false, log) + if err != nil { + log.WithError(err).Errorf("VIP verification failed for cluster: %s", params.ClusterID) + return common.NewApiError(http.StatusBadRequest, err) + } + + if params.ClusterUpdateParams.PullSecret != nil { + cluster.PullSecret = *params.ClusterUpdateParams.PullSecret + updates["pull_secret"] = *params.ClusterUpdateParams.PullSecret + if cluster.PullSecret != "" { + updates["pull_secret_set"] = true + } else { + updates["pull_secret_set"] = false + } } + dbReply := db.Model(&common.Cluster{}).Where("id = ?", cluster.ID.String()).Updates(updates) + if dbReply.Error != nil { + log.WithError(dbReply.Error).Errorf("failed to update cluster: %s", params.ClusterID) + return common.NewApiError(http.StatusInternalServerError, err) + } + + return nil +} + +func (b *bareMetalInventory) updateHostsData(ctx context.Context, params installer.UpdateClusterParams, db *gorm.DB, log logrus.FieldLogger) error { for i := range params.ClusterUpdateParams.HostsRoles { log.Infof("Update host %s to role: %s", params.ClusterUpdateParams.HostsRoles[i].ID, params.ClusterUpdateParams.HostsRoles[i].Role) var host models.Host - if err := tx.First(&host, "id = ? and cluster_id = ?", - params.ClusterUpdateParams.HostsRoles[i].ID, params.ClusterID).Error; err != nil { - tx.Rollback() + err := db.First(&host, "id = ? and cluster_id = ?", + params.ClusterUpdateParams.HostsRoles[i].ID, params.ClusterID).Error + if err != nil { log.WithError(err).Errorf("failed to find host <%s> in cluster <%s>", params.ClusterUpdateParams.HostsRoles[i].ID, params.ClusterID) - return installer.NewUpdateClusterNotFound().WithPayload(common.GenerateError(http.StatusNotFound, err)) + return common.NewApiError(http.StatusNotFound, err) } - if _, err := b.hostApi.UpdateRole(ctx, &host, params.ClusterUpdateParams.HostsRoles[i].Role, tx); err != nil { - tx.Rollback() + err = b.hostApi.UpdateRole(ctx, &host, models.HostRole(params.ClusterUpdateParams.HostsRoles[i].Role), db) + if err != nil { log.WithError(err).Errorf("failed to set role <%s> host <%s> in cluster <%s>", params.ClusterUpdateParams.HostsRoles[i].Role, params.ClusterUpdateParams.HostsRoles[i].ID, params.ClusterID) - return installer.NewUpdateClusterConflict().WithPayload(common.GenerateError(http.StatusConflict, err)) + return common.NewApiError(http.StatusInternalServerError, err) } } - if _, err := b.clusterApi.RefreshStatus(ctx, &cluster, tx); err != nil { - tx.Rollback() - log.WithError(err).Errorf("failed to validate or update cluster %s state", params.ClusterID) - return installer.NewRegisterClusterInternalServerError(). - WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + for i := range params.ClusterUpdateParams.HostsNames { + log.Infof("Update host %s to request hostname %s", params.ClusterUpdateParams.HostsNames[i].ID, + params.ClusterUpdateParams.HostsNames[i].Hostname) + var host models.Host + err := db.First(&host, "id = ? and cluster_id = ?", + params.ClusterUpdateParams.HostsNames[i].ID, params.ClusterID).Error + if err != nil { + log.WithError(err).Errorf("failed to find host <%s> in cluster <%s>", + params.ClusterUpdateParams.HostsRoles[i].ID, params.ClusterID) + return common.NewApiError(http.StatusNotFound, err) + } + err = b.hostApi.UpdateHostname(ctx, &host, params.ClusterUpdateParams.HostsNames[i].Hostname, db) + if err != nil { + log.WithError(err).Errorf("failed to set hostname <%s> host <%s> in cluster <%s>", + params.ClusterUpdateParams.HostsNames[i].Hostname, params.ClusterUpdateParams.HostsNames[i].ID, + params.ClusterID) + return common.NewApiError(http.StatusConflict, err) + } } - if tx.Commit().Error != nil { - tx.Rollback() - return installer.NewUpdateClusterInternalServerError(). - WithPayload(common.GenerateError(http.StatusInternalServerError, errors.New("DB error, failed to commit"))) + return nil +} + +func (b *bareMetalInventory) updateHostsAndClusterStatus(ctx context.Context, cluster *common.Cluster, db *gorm.DB, log logrus.FieldLogger) error { + err := b.refreshClusterHosts(ctx, cluster, db, log) + if err != nil { + return err } - if err := b.db.Preload("Hosts").First(&cluster, "id = ?", params.ClusterID).Error; err != nil { - log.WithError(err).Errorf("failed to get cluster %s after update", params.ClusterID) - return installer.NewUpdateClusterInternalServerError(). - WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + if _, err = b.clusterApi.RefreshStatus(ctx, cluster, db); err != nil { + log.WithError(err).Errorf("failed to validate or update cluster %s state", cluster.ID) + return common.NewApiError(http.StatusInternalServerError, err) } - return installer.NewUpdateClusterCreated().WithPayload(&cluster) + return nil +} + +func calculateHostNetworks(log logrus.FieldLogger, cluster *common.Cluster) []*models.HostNetwork { + cidrHostsMap := make(map[string][]strfmt.UUID) + for _, h := range cluster.Hosts { + if h.Inventory == "" { + continue + } + var inventory models.Inventory + err := json.Unmarshal([]byte(h.Inventory), &inventory) + if err != nil { + log.WithError(err).Warnf("Could not parse inventory of host %s", *h.ID) + continue + } + for _, intf := range inventory.Interfaces { + for _, ipv4Address := range intf.IPV4Addresses { + _, ipnet, err := net.ParseCIDR(ipv4Address) + if err != nil { + log.WithError(err).Warnf("Could not parse CIDR %s", ipv4Address) + continue + } + cidr := ipnet.String() + cidrHostsMap[cidr] = append(cidrHostsMap[cidr], *h.ID) + } + } + } + ret := make([]*models.HostNetwork, 0) + for k, v := range cidrHostsMap { + ret = append(ret, &models.HostNetwork{ + Cidr: k, + HostIds: v, + }) + } + return ret } func (b *bareMetalInventory) ListClusters(ctx context.Context, params installer.ListClustersParams) middleware.Responder { log := logutil.FromContext(ctx, b.log) - var clusters []*models.Cluster - if err := b.db.Preload("Hosts").Find(&clusters).Error; err != nil { + var clusters []*common.Cluster + query := identity.GetUserIDFilter(ctx) + if err := b.db.Preload("Hosts").Find(&clusters).Where(query).Error; err != nil { log.WithError(err).Error("failed to list clusters") return installer.NewListClustersInternalServerError(). WithPayload(common.GenerateError(http.StatusInternalServerError, err)) } + var mClusters []*models.Cluster = make([]*models.Cluster, len(clusters)) + for i, c := range clusters { + mClusters[i] = &c.Cluster + } - return installer.NewListClustersOK().WithPayload(clusters) + return installer.NewListClustersOK().WithPayload(mClusters) } func (b *bareMetalInventory) GetCluster(ctx context.Context, params installer.GetClusterParams) middleware.Responder { - var cluster models.Cluster + log := logutil.FromContext(ctx, b.log) + var cluster common.Cluster if err := b.db.Preload("Hosts").First(&cluster, "id = ?", params.ClusterID).Error; err != nil { // TODO: check for the right error return installer.NewGetClusterNotFound(). WithPayload(common.GenerateError(http.StatusNotFound, err)) } - return installer.NewGetClusterOK().WithPayload(&cluster) + + cluster.HostNetworks = calculateHostNetworks(log, &cluster) + for _, host := range cluster.Hosts { + if err := b.customizeHost(host); err != nil { + return common.GenerateErrorResponder(common.NewApiError(http.StatusInternalServerError, err)) + } + } + + return installer.NewGetClusterOK().WithPayload(&cluster.Cluster) } func (b *bareMetalInventory) RegisterHost(ctx context.Context, params installer.RegisterHostParams) middleware.Responder { log := logutil.FromContext(ctx, b.log) var host models.Host + var cluster common.Cluster log.Infof("Register host: %+v", params) - if err := b.db.First(&models.Cluster{}, "id = ?", params.ClusterID.String()).Error; err != nil { + if err := b.db.First(&cluster, "id = ?", params.ClusterID.String()).Error; err != nil { log.WithError(err).Errorf("failed to get cluster: %s", params.ClusterID.String()) - return installer.NewRegisterHostBadRequest(). - WithPayload(common.GenerateError(http.StatusBadRequest, err)) + if gorm.IsRecordNotFoundError(err) { + return common.NewApiError(http.StatusNotFound, err) + } + return common.NewApiError(http.StatusInternalServerError, err) + } + err := b.db.First(&host, "id = ? and cluster_id = ?", *params.NewHostParams.HostID, params.ClusterID).Error + if err != nil && !gorm.IsRecordNotFoundError(err) { + log.WithError(err).Errorf("failed to get host %s in cluster: %s", + *params.NewHostParams.HostID, params.ClusterID.String()) + return installer.NewRegisterHostInternalServerError(). + WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + } + + // In case host doesn't exists check if the cluster accept new hosts registration + if err != nil && gorm.IsRecordNotFoundError(err) { + if err := b.clusterApi.AcceptRegistration(&cluster); err != nil { + log.WithError(err).Errorf("failed to register host <%s> to cluster %s due to: %s", + params.NewHostParams.HostID, params.ClusterID.String(), err.Error()) + b.eventsHandler.AddEvent(ctx, params.NewHostParams.HostID.String(), models.EventSeverityError, + "Failed to register host: cluster cannot accept new hosts in its current state", time.Now(), params.ClusterID.String()) + return installer.NewRegisterHostForbidden(). + WithPayload(common.GenerateError(http.StatusForbidden, err)) + } } url := installer.GetHostURL{ClusterID: params.ClusterID, HostID: *params.NewHostParams.HostID} host = models.Host{ - ID: params.NewHostParams.HostID, - Href: swag.String(url.String()), - Kind: swag.String(ResourceKindHost), - ClusterID: params.ClusterID, + ID: params.NewHostParams.HostID, + Href: swag.String(url.String()), + Kind: swag.String(ResourceKindHost), + ClusterID: params.ClusterID, + CheckedInAt: strfmt.DateTime(time.Now()), + DiscoveryAgentVersion: params.NewHostParams.DiscoveryAgentVersion, } if err := b.hostApi.RegisterHost(ctx, &host); err != nil { log.WithError(err).Errorf("failed to register host <%s> cluster <%s>", params.NewHostParams.HostID.String(), params.ClusterID.String()) + b.eventsHandler.AddEvent(ctx, params.NewHostParams.HostID.String(), models.EventSeverityError, + "Failed to register host: error creating host metadata", time.Now(), params.ClusterID.String()) return installer.NewRegisterHostBadRequest(). WithPayload(common.GenerateError(http.StatusBadRequest, err)) } + if err := b.customizeHost(&host); err != nil { + b.eventsHandler.AddEvent(ctx, params.NewHostParams.HostID.String(), models.EventSeverityError, + "Failed to register host: error setting host properties", time.Now(), params.ClusterID.String()) + return common.GenerateErrorResponder(common.NewApiError(http.StatusInternalServerError, err)) + } + + b.eventsHandler.AddEvent(ctx, params.NewHostParams.HostID.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: registered to cluster", common.GetHostnameForMsg(&host)), + time.Now(), params.ClusterID.String()) return installer.NewRegisterHostCreated().WithPayload(&host) } func (b *bareMetalInventory) DeregisterHost(ctx context.Context, params installer.DeregisterHostParams) middleware.Responder { + log := logutil.FromContext(ctx, b.log) + log.Infof("Deregister host: %s cluster %s", params.HostID, params.ClusterID) + if err := b.db.Where("id = ? and cluster_id = ?", params.HostID, params.ClusterID). Delete(&models.Host{}).Error; err != nil { // TODO: check error type @@ -656,6 +1254,8 @@ func (b *bareMetalInventory) DeregisterHost(ctx context.Context, params installe } // TODO: need to check that host can be deleted from the cluster + b.eventsHandler.AddEvent(ctx, params.HostID.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: deregistered from cluster", params.HostID.String()), time.Now(), params.ClusterID.String()) return installer.NewDeregisterHostNoContent() } @@ -667,6 +1267,10 @@ func (b *bareMetalInventory) GetHost(ctx context.Context, params installer.GetHo return installer.NewGetHostNotFound().WithPayload(common.GenerateError(http.StatusNotFound, err)) } + if err := b.customizeHost(&host); err != nil { + return common.GenerateErrorResponder(common.NewApiError(http.StatusInternalServerError, err)) + } + return installer.NewGetHostOK().WithPayload(&host) } @@ -678,6 +1282,13 @@ func (b *bareMetalInventory) ListHosts(ctx context.Context, params installer.Lis return installer.NewListHostsInternalServerError(). WithPayload(common.GenerateError(http.StatusInternalServerError, err)) } + + for _, host := range hosts { + if err := b.customizeHost(host); err != nil { + return common.GenerateErrorResponder(common.NewApiError(http.StatusInternalServerError, err)) + } + } + return installer.NewListHostsOK().WithPayload(hosts) } @@ -690,13 +1301,44 @@ func (b *bareMetalInventory) GetNextSteps(ctx context.Context, params installer. var steps models.Steps var host models.Host + txSuccess := false + tx := b.db.Begin() + defer func() { + if !txSuccess { + log.Error("get next steps failed") + tx.Rollback() + } + if r := recover(); r != nil { + log.Error("get next steps failed") + tx.Rollback() + } + }() + + if tx.Error != nil { + log.WithError(tx.Error).Errorf("failed to start db transaction") + return installer.NewUpdateClusterInternalServerError(). + WithPayload(common.GenerateError(http.StatusInternalServerError, errors.New("DB error, failed to start transaction"))) + } + //TODO check the error type - if err := b.db.First(&host, "id = ? and cluster_id = ?", params.HostID, params.ClusterID).Error; err != nil { - log.WithError(err).Errorf("failed to find host %s", params.HostID) + if err := tx.First(&host, "id = ? and cluster_id = ?", params.HostID, params.ClusterID).Error; err != nil { + log.WithError(err).Errorf("failed to find host: %s", params.HostID) return installer.NewGetNextStepsNotFound(). WithPayload(common.GenerateError(http.StatusNotFound, err)) } + host.CheckedInAt = strfmt.DateTime(time.Now()) + if err := tx.Model(&host).Update("checked_in_at", host.CheckedInAt).Error; err != nil { + log.WithError(err).Errorf("failed to update host: %s", params.ClusterID) + return installer.NewGetNextStepsInternalServerError() + } + + if err := tx.Commit().Error; err != nil { + log.Error(err) + return installer.NewGetNextStepsInternalServerError() + } + txSuccess = true + var err error steps, err = b.hostApi.GetNextSteps(ctx, &host) if err != nil { @@ -710,37 +1352,41 @@ func (b *bareMetalInventory) GetNextSteps(ctx context.Context, params installer. step.StepID = cmd.stepID step.Command = "bash" step.Args = []string{"-c", cmd.cmd} - steps = append(steps, step) + steps.Instructions = append(steps.Instructions, step) delete(b.debugCmdMap, params.HostID) } b.debugCmdMux.Unlock() - return installer.NewGetNextStepsOK().WithPayload(steps) + return installer.NewGetNextStepsOK().WithPayload(&steps) } func (b *bareMetalInventory) PostStepReply(ctx context.Context, params installer.PostStepReplyParams) middleware.Responder { var err error log := logutil.FromContext(ctx, b.log) - log.Infof("Received step reply <%s> from cluster <%s> host <%s> exit-code <%d> stdout <%s> stderr <%s>", params.Reply.StepID, params.ClusterID, + msg := fmt.Sprintf("Received step reply <%s> from cluster <%s> host <%s> exit-code <%d> stdout <%s> stderr <%s>", params.Reply.StepID, params.ClusterID, params.HostID, params.Reply.ExitCode, params.Reply.Output, params.Reply.Error) + var host models.Host + if err = b.db.First(&host, "id = ? and cluster_id = ?", params.HostID, params.ClusterID).Error; err != nil { + log.WithError(err).Errorf("Failed to find host <%s> cluster <%s> step <%s> exit code %d stdout <%s> stderr <%s>", + params.HostID, params.ClusterID, params.Reply.StepID, params.Reply.ExitCode, params.Reply.Output, params.Reply.Error) + return installer.NewPostStepReplyNotFound(). + WithPayload(common.GenerateError(http.StatusNotFound, err)) + } + //check the output exit code if params.Reply.ExitCode != 0 { - err = fmt.Errorf("Exit code is %d reply error is %s for %s reply for host %s cluster %s", - params.Reply.ExitCode, params.Reply.Error, params.Reply.StepID, params.HostID, params.ClusterID) - log.WithError(err).Errorf("Exit code is <%d> , reply error is <%s> for <%s> reply for host <%s> cluster <%s>", - params.Reply.ExitCode, params.Reply.Error, params.Reply.StepID, params.HostID, params.ClusterID) + err = fmt.Errorf(msg) + log.WithError(err).Errorf("Exit code is <%d> ", params.Reply.ExitCode) + handlingError := handleReplyError(params, b, ctx, &host) + if handlingError != nil { + log.WithError(handlingError).Errorf("Failed handling reply error for host <%s> cluster <%s>", params.HostID, params.ClusterID) + } return installer.NewPostStepReplyBadRequest(). WithPayload(common.GenerateError(http.StatusBadRequest, err)) } - var host models.Host - if err = b.db.First(&host, "id = ? and cluster_id = ?", params.HostID, params.ClusterID).Error; err != nil { - log.WithError(err).Errorf("Failed to find host <%s> cluster <%s> step <%s>", - params.HostID, params.ClusterID, params.Reply.StepID) - return installer.NewPostStepReplyNotFound(). - WithPayload(common.GenerateError(http.StatusNotFound, err)) - } + log.Infof(msg) var stepReply string stepReply, err = filterReplyByType(params) @@ -762,16 +1408,49 @@ func (b *bareMetalInventory) PostStepReply(ctx context.Context, params installer return installer.NewPostStepReplyNoContent() } +func handleReplyError(params installer.PostStepReplyParams, b *bareMetalInventory, ctx context.Context, h *models.Host) error { + + if params.Reply.StepType == models.StepTypeInstall { + //if it's install step - need to move host to error + return b.hostApi.HandleInstallationFailure(ctx, h) + } + return nil +} + +func (b *bareMetalInventory) updateFreeAddressesReport(ctx context.Context, host *models.Host, freeAddressesReport string) error { + var ( + err error + freeAddresses models.FreeNetworksAddresses + ) + log := logutil.FromContext(ctx, b.log) + if err = json.Unmarshal([]byte(freeAddressesReport), &freeAddresses); err != nil { + log.WithError(err).Warnf("Json unmarshal free addresses of host %s", host.ID.String()) + return err + } + if len(freeAddresses) == 0 { + err = fmt.Errorf("Free addresses for host %s is empty", host.ID.String()) + log.WithError(err).Warn("Update free addresses") + return err + } + if err = b.db.Model(&models.Host{}).Where("id = ? and cluster_id = ?", host.ID.String(), + host.ClusterID.String()).Updates(map[string]interface{}{"free_addresses": freeAddressesReport}).Error; err != nil { + log.WithError(err).Warnf("Update free addresses of host %s", host.ID.String()) + return err + } + // Gorm sets the number of changed rows in AffectedRows and not the number of matched rows. Therefore, if the report hasn't changed + // from the previous report, the AffectedRows will be 0 but it will still be correct. So no error reporting needed for AffectedRows == 0 + return nil +} + func handleReplyByType(params installer.PostStepReplyParams, b *bareMetalInventory, ctx context.Context, host models.Host, stepReply string) error { var err error - if strings.HasPrefix(params.Reply.StepID, string(models.StepTypeHardwareInfo)) { - _, err = b.hostApi.UpdateHwInfo(ctx, &host, stepReply) - } - if strings.HasPrefix(params.Reply.StepID, string(models.StepTypeConnectivityCheck)) { + switch params.Reply.StepType { + case models.StepTypeInventory: + err = b.hostApi.UpdateInventory(ctx, &host, stepReply) + case models.StepTypeConnectivityCheck: err = b.hostApi.UpdateConnectivityReport(ctx, &host, stepReply) - } - if strings.HasPrefix(params.Reply.StepID, string(models.StepTypeInventory)) { - _, err = b.hostApi.UpdateInventory(ctx, &host, stepReply) + case models.StepTypeFreeNetworkAddresses: + err = b.updateFreeAddressesReport(ctx, &host, stepReply) } return err } @@ -779,17 +1458,15 @@ func handleReplyByType(params installer.PostStepReplyParams, b *bareMetalInvento func filterReplyByType(params installer.PostStepReplyParams) (string, error) { var stepReply string var err error - // To make sure we store only information defined in swagger we unmarshal and marshal the stepReplyParams. - if strings.HasPrefix(params.Reply.StepID, string(models.StepTypeHardwareInfo)) { - stepReply, err = filterReply(&models.Introspection{}, params.Reply.Output) - } - if strings.HasPrefix(params.Reply.StepID, string(models.StepTypeConnectivityCheck)) { - stepReply, err = filterReply(&models.ConnectivityReport{}, params.Reply.Output) - } - - if strings.HasPrefix(params.Reply.StepID, string(models.StepTypeInventory)) { + // To make sure we store only information defined in swagger we unmarshal and marshal the stepReplyParams. + switch params.Reply.StepType { + case models.StepTypeInventory: stepReply, err = filterReply(&models.Inventory{}, params.Reply.Output) + case models.StepTypeConnectivityCheck: + stepReply, err = filterReply(&models.ConnectivityReport{}, params.Reply.Output) + case models.StepTypeFreeNetworkAddresses: + stepReply, err = filterReply(&models.FreeNetworksAddresses{}, params.Reply.Output) } return stepReply, err } @@ -817,6 +1494,7 @@ func (b *bareMetalInventory) SetDebugStep(ctx context.Context, params installer. b.debugCmdMux.Unlock() log.Infof("Added new debug command <%s> for cluster <%s> host <%s>: <%s>", stepID, params.ClusterID, params.HostID, swag.StringValue(params.Step.Command)) + b.eventsHandler.AddEvent(ctx, params.ClusterID.String(), models.EventSeverityInfo, "Added debug command", time.Now(), params.HostID.String()) return installer.NewSetDebugStepNoContent() } @@ -826,16 +1504,32 @@ func (b *bareMetalInventory) DisableHost(ctx context.Context, params installer.D log.Info("disabling host: ", params.HostID) if err := b.db.First(&host, "id = ? and cluster_id = ?", params.HostID, params.ClusterID).Error; err != nil { - return installer.NewDisableHostNotFound(). - WithPayload(common.GenerateError(http.StatusNotFound, err)) + if gorm.IsRecordNotFoundError(err) { + log.WithError(err).Errorf("host %s not found", params.HostID) + return common.NewApiError(http.StatusNotFound, err) + } + log.WithError(err).Errorf("failed to get host %s", params.HostID) + msg := "Failed to disable host: error fetching host from DB" + b.eventsHandler.AddEvent(ctx, params.HostID.String(), models.EventSeverityError, msg, time.Now(), params.ClusterID.String()) + return common.NewApiError(http.StatusInternalServerError, err) } - if _, err := b.hostApi.DisableHost(ctx, &host); err != nil { + if err := b.hostApi.DisableHost(ctx, &host); err != nil { log.WithError(err).Errorf("failed to disable host <%s> from cluster <%s>", params.HostID, params.ClusterID) - return installer.NewDisableHostConflict(). - WithPayload(common.GenerateError(http.StatusConflict, err)) + msg := "Failed to disable host: error disabling host in current status" + b.eventsHandler.AddEvent(ctx, params.HostID.String(), models.EventSeverityError, msg, time.Now(), params.ClusterID.String()) + return common.GenerateErrorResponderWithDefault(err, http.StatusConflict) + } + + if err := b.customizeHost(&host); err != nil { + msg := "Failed to disable host: error setting host properties" + b.eventsHandler.AddEvent(ctx, params.HostID.String(), models.EventSeverityError, msg, time.Now(), params.ClusterID.String()) + return common.GenerateErrorResponder(common.NewApiError(http.StatusInternalServerError, err)) } - return installer.NewDisableHostNoContent() + + msg := "Host disabled by user" + b.eventsHandler.AddEvent(ctx, params.HostID.String(), models.EventSeverityInfo, msg, time.Now(), params.ClusterID.String()) + return installer.NewDisableHostOK().WithPayload(&host) } func (b *bareMetalInventory) EnableHost(ctx context.Context, params installer.EnableHostParams) middleware.Responder { @@ -844,28 +1538,38 @@ func (b *bareMetalInventory) EnableHost(ctx context.Context, params installer.En log.Info("enable host: ", params.HostID) if err := b.db.First(&host, "id = ? and cluster_id = ?", params.HostID, params.ClusterID).Error; err != nil { - return installer.NewEnableHostNotFound(). - WithPayload(common.GenerateError(http.StatusNotFound, err)) + if gorm.IsRecordNotFoundError(err) { + log.WithError(err).Errorf("host %s not found", params.HostID) + return common.NewApiError(http.StatusNotFound, err) + } + log.WithError(err).Errorf("failed to get host %s", params.HostID) + msg := "Failed to enable host: error fetching host from DB" + b.eventsHandler.AddEvent(ctx, params.HostID.String(), models.EventSeverityError, msg, time.Now(), params.ClusterID.String()) + return common.NewApiError(http.StatusInternalServerError, err) } - if _, err := b.hostApi.EnableHost(ctx, &host); err != nil { + if err := b.hostApi.EnableHost(ctx, &host); err != nil { log.WithError(err).Errorf("failed to enable host <%s> from cluster <%s>", params.HostID, params.ClusterID) - return installer.NewEnableHostConflict(). - WithPayload(common.GenerateError(http.StatusConflict, err)) + msg := "Failed to enable host: error disabling host in current status" + b.eventsHandler.AddEvent(ctx, params.HostID.String(), models.EventSeverityError, msg, time.Now(), params.ClusterID.String()) + return common.GenerateErrorResponderWithDefault(err, http.StatusConflict) + } + + if err := b.customizeHost(&host); err != nil { + msg := "Failed to enable host: error setting host properties" + b.eventsHandler.AddEvent(ctx, params.HostID.String(), models.EventSeverityError, msg, time.Now(), params.ClusterID.String()) + return common.GenerateErrorResponder(common.NewApiError(http.StatusInternalServerError, err)) } - return installer.NewEnableHostNoContent() + + msg := "Host enabled by user" + b.eventsHandler.AddEvent(ctx, params.HostID.String(), models.EventSeverityInfo, msg, time.Now(), params.ClusterID.String()) + return installer.NewEnableHostOK().WithPayload(&host) } -func (b *bareMetalInventory) createKubeconfigJob(cluster *models.Cluster, jobName string, cfg []byte) *batch.Job { +func (b *bareMetalInventory) createKubeconfigJob(cluster *common.Cluster, jobName string, cfg []byte) *batch.Job { id := cluster.ID - overrideImageName := "registry.svc.ci.openshift.org/ocp/release:4.5.0-0.nightly-2020-05-21-015458" - if cluster.OpenshiftVersion == models.ClusterOpenshiftVersionNr44 { - overrideImageName = "quay.io/openshift-release-dev/ocp-release:4.4.0-rc.7-x86_64" - } + // [TODO] make sure that we use openshift-installer from the release image, otherwise the KubeconfigGenerator image must be updated here per opnshift version kubeConfigGeneratorImage := b.Config.KubeconfigGenerator - if cluster.OpenshiftVersion == models.ClusterOpenshiftVersionNr44 { - kubeConfigGeneratorImage = b.Config.KubeconfigGenerator4_4 - } return &batch.Job{ TypeMeta: meta.TypeMeta{ Kind: "Job", @@ -887,7 +1591,6 @@ func (b *bareMetalInventory) createKubeconfigJob(cluster *models.Cluster, jobNam { Name: kubeconfigPrefix, Image: kubeConfigGeneratorImage, - Command: b.imageBuildCmd, ImagePullPolicy: "IfNotPresent", Env: []core.EnvVar{ { @@ -898,6 +1601,10 @@ func (b *bareMetalInventory) createKubeconfigJob(cluster *models.Cluster, jobNam Name: "INSTALLER_CONFIG", Value: string(cfg), }, + { + Name: "INVENTORY_ENDPOINT", + Value: "http://" + strings.TrimSpace(b.InventoryURL) + ":" + strings.TrimSpace(b.InventoryPort) + "/api/assisted-install/v1", + }, { Name: "IMAGE_NAME", Value: jobName, @@ -912,7 +1619,7 @@ func (b *bareMetalInventory) createKubeconfigJob(cluster *models.Cluster, jobNam }, { Name: "OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE", - Value: overrideImageName, //TODO: change this to match the cluster openshift version + Value: b.ReleaseImage, //TODO: change this to match the cluster openshift version }, { Name: "aws_access_key_id", @@ -923,6 +1630,16 @@ func (b *bareMetalInventory) createKubeconfigJob(cluster *models.Cluster, jobNam Value: b.AwsSecretAccessKey, }, }, + Resources: core.ResourceRequirements{ + Limits: core.ResourceList{ + "cpu": getQuantity(b.JobCPULimit), + "memory": getQuantity(b.JobMemoryLimit), + }, + Requests: core.ResourceList{ + "cpu": getQuantity(b.JobCPURequests), + "memory": getQuantity(b.JobMemoryRequests), + }, + }, }, }, RestartPolicy: "Never", @@ -934,7 +1651,14 @@ func (b *bareMetalInventory) createKubeconfigJob(cluster *models.Cluster, jobNam func (b *bareMetalInventory) DownloadClusterFiles(ctx context.Context, params installer.DownloadClusterFilesParams) middleware.Responder { log := logutil.FromContext(ctx, b.log) - var cluster models.Cluster + var cluster common.Cluster + log.Infof("Download cluster files: %s for cluster %s", params.FileName, params.ClusterID) + + if !funk.Contains(clusterFileNames, params.FileName) { + err := fmt.Errorf("invalid cluster file %s", params.FileName) + log.WithError(err).Errorf("failed download file: %s from cluster: %s", params.FileName, params.ClusterID) + return common.NewApiError(http.StatusBadRequest, err) + } if err := b.db.First(&cluster, "id = ?", params.ClusterID).Error; err != nil { log.WithError(err).Errorf("failed to find cluster %s", params.ClusterID) @@ -946,37 +1670,52 @@ func (b *bareMetalInventory) DownloadClusterFiles(ctx context.Context, params in WithPayload(common.GenerateError(http.StatusInternalServerError, err)) } } - clusterStatus := swag.StringValue(cluster.Status) - if clusterStatus != ClusterStatusInstalling && clusterStatus != ClusterStatusInstalled { - msg := fmt.Sprintf("Cluster %s is in %s state, files can be downloaded only in installing or installed state", params.ClusterID, clusterStatus) - log.Warn(msg) + if err := b.clusterApi.DownloadFiles(&cluster); err != nil { + log.WithError(err).Errorf("failed to download cluster files %s", params.ClusterID) return installer.NewDownloadClusterFilesConflict(). - WithPayload(common.GenerateError(http.StatusConflict, errors.New(msg))) + WithPayload(common.GenerateError(http.StatusConflict, err)) } - filesUrl := fmt.Sprintf("%s/%s/%s", b.S3EndpointURL, b.S3Bucket, - fmt.Sprintf("%s/%s", params.ClusterID, params.FileName)) - log.Info("File URL: ", filesUrl) - resp, err := http.Get(filesUrl) + respBody, contentLength, err := b.s3Client.DownloadFileFromS3(ctx, fmt.Sprintf("%s/%s", params.ClusterID, params.FileName), b.S3Bucket) if err != nil { - log.WithError(err).Errorf("Failed to get clusters %s %s file", params.ClusterID, params.FileName) return installer.NewDownloadClusterFilesInternalServerError(). WithPayload(common.GenerateError(http.StatusInternalServerError, err)) } - if resp.StatusCode != http.StatusOK { - defer resp.Body.Close() - b, _ := ioutil.ReadAll(resp.Body) - log.WithError(fmt.Errorf("%s", string(b))). - Errorf("Failed to get clusters %s %s", params.ClusterID, params.FileName) - return installer.NewDownloadClusterFilesConflict(). - WithPayload(common.GenerateError(http.StatusConflict, errors.New(string(b)))) + return filemiddleware.NewResponder(installer.NewDownloadClusterFilesOK().WithPayload(respBody), params.FileName, contentLength) +} + +func (b *bareMetalInventory) DownloadClusterKubeconfig(ctx context.Context, params installer.DownloadClusterKubeconfigParams) middleware.Responder { + log := logutil.FromContext(ctx, b.log) + var cluster common.Cluster + log.Infof("Download cluster kubeconfig for cluster %s", params.ClusterID) + + if err := b.db.First(&cluster, "id = ?", params.ClusterID).Error; err != nil { + log.WithError(err).Errorf("failed to find cluster %s", params.ClusterID) + if gorm.IsRecordNotFoundError(err) { + return installer.NewDownloadClusterKubeconfigNotFound(). + WithPayload(common.GenerateError(http.StatusNotFound, err)) + } else { + return installer.NewDownloadClusterKubeconfigInternalServerError(). + WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + } + } + if err := b.clusterApi.DownloadKubeconfig(&cluster); err != nil { + return installer.NewDownloadClusterKubeconfigConflict(). + WithPayload(common.GenerateError(http.StatusConflict, err)) } - return filemiddleware.NewResponder(installer.NewDownloadClusterFilesOK().WithPayload(resp.Body), params.FileName) + + respBody, contentLength, err := b.s3Client.DownloadFileFromS3(ctx, fmt.Sprintf("%s/%s", params.ClusterID, kubeconfig), b.S3Bucket) + + if err != nil { + return installer.NewDownloadClusterKubeconfigConflict(). + WithPayload(common.GenerateError(http.StatusConflict, errors.Wrap(err, "failed to download kubeconfig"))) + } + return filemiddleware.NewResponder(installer.NewDownloadClusterKubeconfigOK().WithPayload(respBody), kubeconfig, contentLength) } func (b *bareMetalInventory) GetCredentials(ctx context.Context, params installer.GetCredentialsParams) middleware.Responder { log := logutil.FromContext(ctx, b.log) - var cluster models.Cluster + var cluster common.Cluster if err := b.db.First(&cluster, "id = ?", params.ClusterID).Error; err != nil { log.WithError(err).Errorf("failed to find cluster %s", params.ClusterID) @@ -988,18 +1727,15 @@ func (b *bareMetalInventory) GetCredentials(ctx context.Context, params installe WithPayload(common.GenerateError(http.StatusInternalServerError, err)) } } - clusterStatus := swag.StringValue(cluster.Status) - if clusterStatus != ClusterStatusInstalling && clusterStatus != ClusterStatusInstalled { - msg := fmt.Sprintf("Cluster %s is in %s state, credentials are available only in installing or installed state", params.ClusterID, clusterStatus) - log.Warn(msg) + if err := b.clusterApi.GetCredentials(&cluster); err != nil { return installer.NewGetCredentialsConflict(). - WithPayload(common.GenerateError(http.StatusConflict, errors.New(msg))) + WithPayload(common.GenerateError(http.StatusConflict, err)) } fileName := "kubeadmin-password" - filesUrl := fmt.Sprintf("%s/%s/%s", b.S3EndpointURL, b.S3Bucket, + filesURL := fmt.Sprintf("%s/%s/%s", b.S3EndpointURL, b.S3Bucket, fmt.Sprintf("%s/%s", params.ClusterID, fileName)) - log.Info("File URL: ", filesUrl) - resp, err := http.Get(filesUrl) + log.Info("File URL: ", filesURL) + resp, err := http.Get(filesURL) if err != nil { log.WithError(err).Errorf("Failed to get clusters %s %s file", params.ClusterID, fileName) return installer.NewGetCredentialsInternalServerError(). @@ -1014,7 +1750,9 @@ func (b *bareMetalInventory) GetCredentials(ctx context.Context, params installe WithPayload(common.GenerateError(http.StatusConflict, errors.New(string(password)))) } return installer.NewGetCredentialsOK().WithPayload( - &models.Credentials{Username: defaultUser, Password: string(password)}) + &models.Credentials{Username: DefaultUser, + Password: string(password), + ConsoleURL: fmt.Sprintf("%s.%s.%s", ConsoleUrlPrefix, cluster.Name, cluster.BaseDNSDomain)}) } func (b *bareMetalInventory) UpdateHostInstallProgress(ctx context.Context, params installer.UpdateHostInstallProgressParams) middleware.Responder { @@ -1022,15 +1760,499 @@ func (b *bareMetalInventory) UpdateHostInstallProgress(ctx context.Context, para var host models.Host if err := b.db.First(&host, "id = ? and cluster_id = ?", params.HostID, params.ClusterID).Error; err != nil { log.WithError(err).Errorf("failed to find host %s", params.HostID) - // host have nothing to do with the error so we just log it - return installer.NewUpdateHostInstallProgressOK() + return installer.NewUpdateHostInstallProgressNotFound(). + WithPayload(common.GenerateError(http.StatusNotFound, err)) } - if err := b.hostApi.UpdateInstallProgress(ctx, &host, string(params.HostInstallProgressParams)); err != nil { + if err := b.hostApi.UpdateInstallProgress(ctx, &host, params.HostProgress); err != nil { log.WithError(err).Errorf("failed to update host %s progress", params.HostID) - // host have nothing to do with the error so we just log it - return installer.NewUpdateHostInstallProgressOK() + return installer.NewUpdateHostInstallProgressInternalServerError(). + WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + } + + event := fmt.Sprintf("reached installation stage %s", params.HostProgress.CurrentStage) + + if params.HostProgress.ProgressInfo != "" { + event += fmt.Sprintf(": %s", params.HostProgress.ProgressInfo) } - msg := fmt.Sprintf("Host %s in cluster %s reached installation step %s", host.ID, host.ClusterID, params.HostInstallProgressParams) - b.eventsHandler.AddEvent(ctx, host.ID.String(), msg, time.Now(), host.ClusterID.String()) + + log.Info(fmt.Sprintf("Host %s in cluster %s: %s", host.ID, host.ClusterID, event)) + msg := fmt.Sprintf("Host %s: %s", common.GetHostnameForMsg(&host), event) + + b.eventsHandler.AddEvent(ctx, host.ID.String(), models.EventSeverityInfo, msg, time.Now(), host.ClusterID.String()) return installer.NewUpdateHostInstallProgressOK() } + +func (b *bareMetalInventory) UploadClusterIngressCert(ctx context.Context, params installer.UploadClusterIngressCertParams) middleware.Responder { + log := logutil.FromContext(ctx, b.log) + log.Infof("UploadClusterIngressCert for cluster %s with params %s", params.ClusterID, params.IngressCertParams) + var cluster common.Cluster + + if err := b.db.First(&cluster, "id = ?", params.ClusterID).Error; err != nil { + log.WithError(err).Errorf("failed to find cluster %s", params.ClusterID) + if gorm.IsRecordNotFoundError(err) { + return installer.NewUploadClusterIngressCertNotFound().WithPayload(common.GenerateError(http.StatusNotFound, err)) + } else { + return installer.NewUploadClusterIngressCertInternalServerError(). + WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + } + } + + if err := b.clusterApi.UploadIngressCert(&cluster); err != nil { + return installer.NewUploadClusterIngressCertBadRequest(). + WithPayload(common.GenerateError(http.StatusBadRequest, err)) + } + + fileName := fmt.Sprintf("%s/%s", cluster.ID, kubeconfig) + exists, err := b.s3Client.DoesObjectExist(ctx, fileName, b.S3Bucket) + if err != nil { + log.WithError(err).Errorf("Failed to upload ingress ca") + return installer.NewUploadClusterIngressCertInternalServerError(). + WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + } + + if exists { + log.Infof("Ingress ca for cluster %s already exists", cluster.ID) + return installer.NewUploadClusterIngressCertCreated() + } + + noigress := fmt.Sprintf("%s/%s-noingress", cluster.ID, kubeconfig) + resp, _, err := b.s3Client.DownloadFileFromS3(ctx, noigress, b.S3Bucket) + if err != nil { + return installer.NewUploadClusterIngressCertInternalServerError(). + WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + } + + kubeconfigData, err := ioutil.ReadAll(resp) + if err != nil { + log.WithError(err).Infof("Failed to convert kubeconfig s3 response to io reader") + return installer.NewUploadClusterIngressCertInternalServerError(). + WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + } + + mergedKubeConfig, err := mergeIngressCaIntoKubeconfig(kubeconfigData, []byte(params.IngressCertParams), log) + if err != nil { + return installer.NewUploadClusterIngressCertInternalServerError(). + WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + } + + if err := b.s3Client.PushDataToS3(ctx, mergedKubeConfig, fileName, b.S3Bucket); err != nil { + return installer.NewUploadClusterIngressCertInternalServerError(). + WithPayload(common.GenerateError(http.StatusInternalServerError, fmt.Errorf("failed to upload %s to s3", fileName))) + } + return installer.NewUploadClusterIngressCertCreated() +} + +// Merging given ingress ca certificate into kubeconfig +// Code was taken from openshift installer +func mergeIngressCaIntoKubeconfig(kubeconfigData []byte, ingressCa []byte, log logrus.FieldLogger) ([]byte, error) { + + kconfig, err := clientcmd.Load(kubeconfigData) + if err != nil { + log.WithError(err).Errorf("Failed to convert kubeconfig data") + return nil, err + } + if kconfig == nil || len(kconfig.Clusters) == 0 { + err = errors.Errorf("kubeconfig is missing expected data") + log.Error(err) + return nil, err + } + + for _, c := range kconfig.Clusters { + clusterCABytes := c.CertificateAuthorityData + if len(clusterCABytes) == 0 { + err = errors.Errorf("kubeconfig CertificateAuthorityData not found") + log.Errorf("%e, data %s", err, c.CertificateAuthorityData) + return nil, err + } + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(clusterCABytes) { + err = errors.Errorf("cluster CA found in kubeconfig not valid PEM format") + log.Errorf("%e, ca :%s", err, clusterCABytes) + return nil, err + } + if !certPool.AppendCertsFromPEM(ingressCa) { + err = errors.Errorf("given ingress-ca is not valid PEM format") + log.Errorf("%e %s", err, ingressCa) + return nil, err + } + + newCA := append(ingressCa, clusterCABytes...) + c.CertificateAuthorityData = newCA + } + + kconfigAsByteArray, err := clientcmd.Write(*kconfig) + if err != nil { + return nil, errors.Wrap(err, "failed to convert kubeconfig") + } + return kconfigAsByteArray, nil +} + +func setPullSecret(cluster *common.Cluster, pullSecret string) { + cluster.PullSecret = pullSecret + if pullSecret != "" { + cluster.PullSecretSet = true + } else { + cluster.PullSecretSet = false + } +} + +func (b *bareMetalInventory) CancelInstallation(ctx context.Context, params installer.CancelInstallationParams) middleware.Responder { + log := logutil.FromContext(ctx, b.log) + log.Infof("canceling installation for cluster %s", params.ClusterID) + + var c common.Cluster + + txSuccess := false + tx := b.db.Begin() + defer func() { + if !txSuccess { + log.Error("cancel installation failed") + tx.Rollback() + } + if r := recover(); r != nil { + log.Error("cancel installation failed") + tx.Rollback() + } + }() + + if tx.Error != nil { + msg := "Failed to cancel installation: error starting DB transaction" + log.WithError(tx.Error).Errorf(msg) + b.eventsHandler.AddEvent(ctx, c.ID.String(), models.EventSeverityError, msg, time.Now()) + return installer.NewCancelInstallationInternalServerError().WithPayload( + common.GenerateError(http.StatusInternalServerError, errors.New(msg))) + } + + if err := tx.Preload("Hosts").First(&c, "id = ?", params.ClusterID).Error; err != nil { + log.WithError(err).Errorf("Failed to cancel installation: could not find cluster %s", params.ClusterID) + if gorm.IsRecordNotFoundError(err) { + return installer.NewCancelInstallationNotFound().WithPayload(common.GenerateError(http.StatusNotFound, err)) + } + return installer.NewCancelInstallationInternalServerError().WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + } + + // cancellation is made by setting the cluster and and hosts states to error. + if err := b.clusterApi.CancelInstallation(ctx, &c, "Installation was canceled by user", tx); err != nil { + return common.GenerateErrorResponder(err) + } + for _, h := range c.Hosts { + if err := b.hostApi.CancelInstallation(ctx, h, "Installation was canceled by user", tx); err != nil { + return common.GenerateErrorResponder(err) + } + if err := b.customizeHost(h); err != nil { + return installer.NewCancelInstallationInternalServerError().WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + } + } + + if err := tx.Commit().Error; err != nil { + log.Errorf("Failed to cancel installation: error committing DB transaction (%s)", err) + msg := "Failed to cancel installation: error committing DB transaction" + b.eventsHandler.AddEvent(ctx, c.ID.String(), models.EventSeverityError, msg, time.Now()) + return installer.NewCancelInstallationInternalServerError().WithPayload( + common.GenerateError(http.StatusInternalServerError, errors.New("DB error, failed to commit transaction"))) + } + txSuccess = true + + return installer.NewCancelInstallationAccepted().WithPayload(&c.Cluster) +} + +func (b *bareMetalInventory) ResetCluster(ctx context.Context, params installer.ResetClusterParams) middleware.Responder { + log := logutil.FromContext(ctx, b.log) + log.Infof("resetting cluster %s", params.ClusterID) + + var c common.Cluster + + txSuccess := false + tx := b.db.Begin() + defer func() { + if !txSuccess { + log.Error("reset cluster failed") + tx.Rollback() + } + if r := recover(); r != nil { + log.Error("reset cluster failed") + tx.Rollback() + } + }() + + if tx.Error != nil { + log.WithError(tx.Error).Errorf("failed to start db transaction") + return installer.NewResetClusterInternalServerError().WithPayload( + common.GenerateError(http.StatusInternalServerError, errors.New("DB error, failed to start transaction"))) + } + + if err := tx.Preload("Hosts").First(&c, "id = ?", params.ClusterID).Error; err != nil { + log.WithError(err).Errorf("failed to find cluster %s", params.ClusterID) + if gorm.IsRecordNotFoundError(err) { + return installer.NewResetClusterNotFound().WithPayload(common.GenerateError(http.StatusNotFound, err)) + } + return installer.NewResetClusterInternalServerError().WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + } + + if err := b.clusterApi.ResetCluster(ctx, &c, "cluster was reset by user", tx); err != nil { + return common.GenerateErrorResponder(err) + } + + // abort installation files generation job if running. + ctime := time.Time(c.CreatedAt) + cTimestamp := strconv.FormatInt(ctime.Unix(), 10) + jobName := fmt.Sprintf("%s-%s-%s", kubeconfigPrefix, c.ID.String(), cTimestamp)[:63] + if err := b.job.Delete(ctx, jobName, b.Namespace); err != nil { + return installer.NewResetClusterInternalServerError().WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + } + + for _, h := range c.Hosts { + if err := b.hostApi.ResetHost(ctx, h, "cluster was reset by user", tx); err != nil { + return common.GenerateErrorResponder(err) + } + if err := b.customizeHost(h); err != nil { + return installer.NewResetClusterInternalServerError().WithPayload(common.GenerateError(http.StatusInternalServerError, err)) + } + } + + if err := b.deleteS3ClusterFiles(ctx, &c); err != nil { + return common.NewApiError(http.StatusInternalServerError, err) + } + if err := b.deleteDNSRecordSets(ctx, c); err != nil { + log.Warnf("failed to delete DNS record sets for base domain: %s", c.BaseDNSDomain) + } + + if err := tx.Commit().Error; err != nil { + log.Error(err) + return installer.NewResetClusterInternalServerError().WithPayload( + common.GenerateError(http.StatusInternalServerError, errors.New("DB error, failed to commit transaction"))) + } + txSuccess = true + + return installer.NewResetClusterAccepted().WithPayload(&c.Cluster) +} + +func (b *bareMetalInventory) CompleteInstallation(ctx context.Context, params installer.CompleteInstallationParams) middleware.Responder { + log := logutil.FromContext(ctx, b.log) + + log.Infof("complete cluster %s installation", params.ClusterID) + + var c common.Cluster + if err := b.db.Preload("Hosts").First(&c, "id = ?", params.ClusterID).Error; err != nil { + return common.GenerateErrorResponder(err) + } + + if err := b.clusterApi.CompleteInstallation(ctx, &c, *params.CompletionParams.IsSuccess, params.CompletionParams.ErrorInfo); err != nil { + log.WithError(err).Errorf("Failed to set complete cluster state on %s ", params.ClusterID.String()) + return common.GenerateErrorResponder(err) + } + + return installer.NewCompleteInstallationAccepted().WithPayload(&c.Cluster) +} + +func (b *bareMetalInventory) deleteS3ClusterFiles(ctx context.Context, c *common.Cluster) error { + for _, name := range clusterFileNames { + if err := b.s3Client.DeleteFileFromS3(ctx, fmt.Sprintf("%s/%s", c.ID, name), b.S3Bucket); err != nil { + return err + } + } + return nil +} + +func (b *bareMetalInventory) createDNSRecordSets(ctx context.Context, cluster common.Cluster) error { + return b.changeDNSRecordSets(ctx, cluster, false) +} + +func (b *bareMetalInventory) deleteDNSRecordSets(ctx context.Context, cluster common.Cluster) error { + return b.changeDNSRecordSets(ctx, cluster, true) +} + +func (b *bareMetalInventory) changeDNSRecordSets(ctx context.Context, cluster common.Cluster, delete bool) error { + log := logutil.FromContext(ctx, b.log) + + domain, err := b.getDNSDomain(cluster.Name, cluster.BaseDNSDomain) + if err != nil { + return err + } + if domain == nil { + // No supported base DNS domain specified + return nil + } + + switch domain.Provider { + case "route53": + var dnsProvider dnsproviders.Provider = dnsproviders.Route53{ + RecordSet: dnsproviders.RecordSet{ + RecordSetType: "A", + TTL: 60, + }, + HostedZoneID: domain.ID, + SharedCreds: true, + } + + dnsRecordSetFunc := dnsProvider.CreateRecordSet + if delete { + dnsRecordSetFunc = dnsProvider.DeleteRecordSet + } + + // Create/Delete A record for API Virtual IP + _, err := dnsRecordSetFunc(domain.APIDomainName, cluster.APIVip) + if err != nil { + log.WithError(err).Errorf("failed to update DNS record: (%s, %s)", + domain.APIDomainName, cluster.APIVip) + return err + } + // Create/Delete A record for Ingress Virtual IP + _, err = dnsRecordSetFunc(domain.IngressDomainName, cluster.IngressVip) + if err != nil { + log.WithError(err).Errorf("failed to update DNS record: (%s, %s)", + domain.IngressDomainName, cluster.IngressVip) + return err + } + log.Infof("Successfully created DNS records for base domain: %s", cluster.BaseDNSDomain) + } + return nil +} + +type dnsDomain struct { + Name string + ID string + Provider string + APIDomainName string + IngressDomainName string +} + +func (b *bareMetalInventory) getDNSDomain(clusterName, baseDNSDomainName string) (*dnsDomain, error) { + var dnsDomainID string + var dnsProvider string + + // Parse base domains from config + if val, ok := b.Config.BaseDNSDomains[baseDNSDomainName]; ok { + re := regexp.MustCompile("/") + if !re.MatchString(val) { + return nil, errors.New(fmt.Sprintf("Invalid DNS domain: %s", val)) + } + s := re.Split(val, 2) + dnsDomainID = s[0] + dnsProvider = s[1] + } else { + // No base domains defined in config + return nil, nil + } + + if dnsDomainID == "" || dnsProvider == "" { + // Specified domain is not defined in config + return nil, nil + } + + return &dnsDomain{ + Name: baseDNSDomainName, + ID: dnsDomainID, + Provider: dnsProvider, + APIDomainName: fmt.Sprintf("%s.%s.%s", "api", clusterName, baseDNSDomainName), + IngressDomainName: fmt.Sprintf("*.%s.%s.%s", "apps", clusterName, baseDNSDomainName), + }, nil +} + +func (b *bareMetalInventory) validateDNSDomain(params installer.UpdateClusterParams, log logrus.FieldLogger) *installer.UpdateClusterConflict { + clusterName := swag.StringValue(params.ClusterUpdateParams.Name) + clusterBaseDomain := swag.StringValue(params.ClusterUpdateParams.BaseDNSDomain) + dnsDomain, err := b.getDNSDomain(clusterName, clusterBaseDomain) + if err == nil && dnsDomain != nil { + // Cluster's baseDNSDomain is defined in config (BaseDNSDomains map) + if err = b.validateBaseDNS(dnsDomain); err != nil { + log.WithError(err).Errorf("Invalid base DNS domain: %s", clusterBaseDomain) + return installer.NewUpdateClusterConflict(). + WithPayload(common.GenerateError(http.StatusConflict, + errors.New("Base DNS domain isn't configured properly"))) + } + if err = b.validateDNSRecords(dnsDomain); err != nil { + log.WithError(err).Errorf("DNS records already exist for cluster: %s", params.ClusterID) + return installer.NewUpdateClusterConflict(). + WithPayload(common.GenerateError(http.StatusConflict, + errors.New("DNS records already exist for cluster - please change 'Cluster Name'"))) + } + } + return nil +} + +func (b *bareMetalInventory) validateBaseDNS(domain *dnsDomain) error { + return validations.ValidateBaseDNS(domain.Name, domain.ID, domain.Provider) +} + +func (b *bareMetalInventory) validateDNSRecords(domain *dnsDomain) error { + vipAddresses := []string{domain.APIDomainName, domain.IngressDomainName} + return validations.CheckDNSRecordsExistence(vipAddresses, domain.ID, domain.Provider) +} + +func ipAsUint(ipStr string, log logrus.FieldLogger) uint64 { + parts := strings.Split(ipStr, ".") + if len(parts) != 4 { + log.Warnf("Invalid ip %s", ipStr) + return 0 + } + var result uint64 = 0 + for _, p := range parts { + result = result << 8 + converted, err := strconv.ParseUint(p, 10, 64) + if err != nil { + log.WithError(err).Warnf("Conversion of %s to uint", p) + return 0 + } + result += converted + } + return result +} + +func applyLimit(ret models.FreeAddressesList, limitParam *int64) models.FreeAddressesList { + if limitParam != nil && *limitParam >= 0 && *limitParam < int64(len(ret)) { + return ret[:*limitParam] + } + return ret +} + +func (b *bareMetalInventory) getFreeAddresses(params installer.GetFreeAddressesParams, log logrus.FieldLogger) (models.FreeAddressesList, error) { + var hosts []*models.Host + err := b.db.Select("free_addresses").Find(&hosts, "cluster_id = ? and status in (?)", params.ClusterID.String(), []string{host.HostStatusInsufficient, host.HostStatusKnown}).Error + if err != nil { + return nil, common.NewApiError(http.StatusInternalServerError, errors.Wrapf(err, "Error retreiving hosts for cluster %s", params.ClusterID.String())) + } + if len(hosts) == 0 { + return nil, common.NewApiError(http.StatusNotFound, errors.Errorf("No hosts where found for cluster %s", params.ClusterID)) + } + resultingSet := network.MakeFreeAddressesSet(hosts, params.Network, params.Prefix, log) + + ret := models.FreeAddressesList{} + for a := range resultingSet { + ret = append(ret, a) + } + + // Sort addresses + sort.Slice(ret, func(i, j int) bool { + return ipAsUint(ret[i].String(), log) < ipAsUint(ret[j].String(), log) + }) + + ret = applyLimit(ret, params.Limit) + + return ret, nil +} + +func (b *bareMetalInventory) GetFreeAddresses(ctx context.Context, params installer.GetFreeAddressesParams) middleware.Responder { + log := logutil.FromContext(ctx, b.log) + + results, err := b.getFreeAddresses(params, log) + if err != nil { + log.WithError(err).Warn("GetFreeAddresses") + return common.GenerateErrorResponder(err) + } + return installer.NewGetFreeAddressesOK().WithPayload(results) +} + +func (b *bareMetalInventory) customizeHost(host *models.Host) error { + b.customizeHostStages(host) + b.customizeHostname(host) + return nil +} + +func (b *bareMetalInventory) customizeHostStages(host *models.Host) { + host.ProgressStages = b.hostApi.GetStagesByRole(host.Role, host.Bootstrap) +} + +func (b *bareMetalInventory) customizeHostname(host *models.Host) { + host.RequestedHostname = common.GetHostnameForMsg(host) +} diff --git a/internal/bminventory/inventory_test.go b/internal/bminventory/inventory_test.go index 313d07538..4ec6a65c9 100644 --- a/internal/bminventory/inventory_test.go +++ b/internal/bminventory/inventory_test.go @@ -1,14 +1,26 @@ package bminventory import ( + "bytes" "context" "encoding/json" "fmt" "io/ioutil" - "reflect" + "net/http" + "os" + "sort" "testing" + "time" + + "github.com/filanov/bm-inventory/internal/metrics" + + "github.com/filanov/bm-inventory/internal/common" + "github.com/go-openapi/runtime/middleware" "github.com/filanov/bm-inventory/internal/events" + "github.com/filanov/bm-inventory/pkg/filemiddleware" + + awsS3Client "github.com/filanov/bm-inventory/pkg/s3Client" "github.com/filanov/bm-inventory/internal/cluster" "github.com/filanov/bm-inventory/internal/host" @@ -20,7 +32,8 @@ import ( "github.com/golang/mock/gomock" "github.com/google/uuid" "github.com/jinzhu/gorm" - _ "github.com/jinzhu/gorm/dialects/sqlite" + _ "github.com/jinzhu/gorm/dialects/postgres" + "github.com/kelseyhightower/envconfig" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -28,19 +41,15 @@ import ( "github.com/sirupsen/logrus" ) +const ClusterStatusInstalled = "installed" + func TestValidator(t *testing.T) { RegisterFailHandler(Fail) + common.InitializeDBTest() + defer common.TerminateDBTest() RunSpecs(t, "inventory_test") } -func prepareDB() *gorm.DB { - db, err := gorm.Open("sqlite3", ":memory:") - Expect(err).ShouldNot(HaveOccurred()) - //db = db.Debug() - db.AutoMigrate(&models.Cluster{}, &models.Host{}) - return db -} - func getTestLog() logrus.FieldLogger { l := logrus.New() l.SetOutput(ioutil.Discard) @@ -61,43 +70,56 @@ var _ = Describe("GenerateClusterISO", func() { ctrl *gomock.Controller mockJob *job.MockAPI mockEvents *events.MockHandler + dbName = "generate_cluster_iso" ) BeforeEach(func() { Expect(envconfig.Process("test", &cfg)).ShouldNot(HaveOccurred()) ctrl = gomock.NewController(GinkgoT()) - db = prepareDB() - mockJob = job.NewMockAPI(ctrl) + db = common.PrepareTestDB(dbName) mockEvents = events.NewMockHandler(ctrl) - bm = NewBareMetalInventory(db, getTestLog(), nil, nil, cfg, mockJob, mockEvents) + mockJob = job.NewMockAPI(ctrl) + mockJob.EXPECT().Create(gomock.Any(), gomock.Any()).Return(nil).Times(1) + bm = NewBareMetalInventory(db, getTestLog(), nil, nil, cfg, mockJob, mockEvents, nil, nil) + }) + + AfterEach(func() { + ctrl.Finish() + common.DeleteTestDB(db, dbName) }) - registerCluster := func() *models.Cluster { + registerCluster := func(pullSecretSet bool) *common.Cluster { clusterId := strfmt.UUID(uuid.New().String()) - cluster := models.Cluster{ - ID: &clusterId, - } + cluster := common.Cluster{Cluster: models.Cluster{ + ID: &clusterId, + PullSecretSet: pullSecretSet, + }, PullSecret: "{\"auths\":{\"cloud.openshift.com\":{\"auth\":\"dG9rZW46dGVzdAo=\",\"email\":\"coyote@acme.com\"}}}"} Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred()) return &cluster } It("success", func() { - clusterId := registerCluster().ID + clusterId := registerCluster(true).ID mockJob.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) mockJob.EXPECT().Create(gomock.Any(), gomock.Any()).Return(nil).Times(1) mockJob.EXPECT().Monitor(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) + mockEvents.EXPECT().AddEvent(gomock.Any(), clusterId.String(), models.EventSeverityInfo, "Generated image (proxy URL is \"\", SSH public key is not set)", gomock.Any()) generateReply := bm.GenerateClusterISO(ctx, installer.GenerateClusterISOParams{ ClusterID: *clusterId, ImageCreateParams: &models.ImageCreateParams{}, }) Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewGenerateClusterISOCreated())) + getReply := bm.GetCluster(ctx, installer.GetClusterParams{ClusterID: *clusterId}).(*installer.GetClusterOK) + Expect(getReply.Payload.ImageInfo.GeneratorVersion).To(Equal("quay.io/ocpmetal/installer-image-build:latest")) }) It("success with proxy", func() { - clusterId := registerCluster().ID + clusterId := registerCluster(true).ID mockJob.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) mockJob.EXPECT().Create(gomock.Any(), gomock.Any()).Return(nil).Times(1) mockJob.EXPECT().Monitor(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) + mockEvents.EXPECT().AddEvent(gomock.Any(), clusterId.String(), models.EventSeverityInfo, "Generated image (proxy URL is \"http://1.1.1.1:1234\", SSH public key "+ + "is not set)", gomock.Any()) generateReply := bm.GenerateClusterISO(ctx, installer.GenerateClusterISOParams{ ClusterID: *clusterId, ImageCreateParams: &models.ImageCreateParams{ProxyURL: "http://1.1.1.1:1234"}, @@ -113,9 +135,10 @@ var _ = Describe("GenerateClusterISO", func() { }) It("failed_to_create_job", func() { - clusterId := registerCluster().ID + clusterId := registerCluster(true).ID mockJob.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) mockJob.EXPECT().Create(gomock.Any(), gomock.Any()).Return(fmt.Errorf("error")).Times(1) + mockEvents.EXPECT().AddEvent(gomock.Any(), clusterId.String(), models.EventSeverityError, gomock.Any(), gomock.Any()) generateReply := bm.GenerateClusterISO(ctx, installer.GenerateClusterISOParams{ ClusterID: *clusterId, ImageCreateParams: &models.ImageCreateParams{}, @@ -124,42 +147,105 @@ var _ = Describe("GenerateClusterISO", func() { }) It("job_failed", func() { - clusterId := registerCluster().ID + clusterId := registerCluster(true).ID + mockJob.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) + mockJob.EXPECT().Create(gomock.Any(), gomock.Any()).Return(nil).Times(1) + mockJob.EXPECT().Monitor(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("error")).Times(1) + mockEvents.EXPECT().AddEvent(gomock.Any(), clusterId.String(), models.EventSeverityError, gomock.Any(), gomock.Any()) + generateReply := bm.GenerateClusterISO(ctx, installer.GenerateClusterISOParams{ + ClusterID: *clusterId, + ImageCreateParams: &models.ImageCreateParams{}, + }) + Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewGenerateClusterISOInternalServerError())) + }) + + It("failed_missing_pull_secret", func() { + clusterId := registerCluster(false).ID + generateReply := bm.GenerateClusterISO(ctx, installer.GenerateClusterISOParams{ + ClusterID: *clusterId, + ImageCreateParams: &models.ImageCreateParams{}, + }) + Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewGenerateClusterISOBadRequest())) + }) + + It("failed_missing_openshift_token", func() { + cluster := registerCluster(true) + cluster.PullSecret = "{\"auths\":{\"another.cloud.com\":{\"auth\":\"dG9rZW46dGVzdAo=\",\"email\":\"coyote@acme.com\"}}}" + clusterId := cluster.ID mockJob.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) mockJob.EXPECT().Create(gomock.Any(), gomock.Any()).Return(nil).Times(1) mockJob.EXPECT().Monitor(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("error")).Times(1) + mockEvents.EXPECT().AddEvent(gomock.Any(), clusterId.String(), models.EventSeverityError, gomock.Any(), gomock.Any()) generateReply := bm.GenerateClusterISO(ctx, installer.GenerateClusterISOParams{ ClusterID: *clusterId, ImageCreateParams: &models.ImageCreateParams{}, }) Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewGenerateClusterISOInternalServerError())) }) +}) + +var _ = Describe("RegisterHost", func() { + var ( + bm *bareMetalInventory + cfg Config + db *gorm.DB + ctx = context.Background() + dbName = "register_host_api" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName) + bm = NewBareMetalInventory(db, getTestLog(), nil, nil, cfg, nil, nil, nil, nil) + }) AfterEach(func() { - ctrl.Finish() - db.Close() + common.DeleteTestDB(db, dbName) }) + It("register host to none existing cluster", func() { + hostID := strfmt.UUID(uuid.New().String()) + reply := bm.RegisterHost(ctx, installer.RegisterHostParams{ + ClusterID: strfmt.UUID(uuid.New().String()), + NewHostParams: &models.HostCreateParams{ + DiscoveryAgentVersion: "v1", + HostID: &hostID, + }, + }) + apiErr, ok := reply.(*common.ApiErrorResponse) + Expect(ok).Should(BeTrue()) + Expect(apiErr.StatusCode()).Should(Equal(int32(http.StatusNotFound))) + }) }) var _ = Describe("GetNextSteps", func() { var ( - bm *bareMetalInventory - cfg Config - db *gorm.DB - ctx = context.Background() - ctrl *gomock.Controller - mockHostApi *host.MockAPI - mockEvents *events.MockHandler + bm *bareMetalInventory + cfg Config + db *gorm.DB + ctx = context.Background() + ctrl *gomock.Controller + mockHostApi *host.MockAPI + mockJob *job.MockAPI + mockEvents *events.MockHandler + defaultNextStepIn int64 + dbName = "get_next_steps" ) BeforeEach(func() { Expect(envconfig.Process("test", &cfg)).ShouldNot(HaveOccurred()) ctrl = gomock.NewController(GinkgoT()) - db = prepareDB() + defaultNextStepIn = 60 + db = common.PrepareTestDB(dbName) mockHostApi = host.NewMockAPI(ctrl) mockEvents = events.NewMockHandler(ctrl) - bm = NewBareMetalInventory(db, getTestLog(), mockHostApi, nil, cfg, nil, mockEvents) + mockJob = job.NewMockAPI(ctrl) + mockJob.EXPECT().Create(gomock.Any(), gomock.Any()).Return(nil).Times(1) + bm = NewBareMetalInventory(db, getTestLog(), mockHostApi, nil, cfg, mockJob, mockEvents, nil, nil) + }) + + AfterEach(func() { + ctrl.Finish() + common.DeleteTestDB(db, dbName) }) It("get_next_steps_unknown_host", func() { @@ -184,8 +270,8 @@ var _ = Describe("GetNextSteps", func() { Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) var err error - expectedStepsReply := models.Steps{&models.Step{StepType: models.StepTypeHardwareInfo}, - &models.Step{StepType: models.StepTypeConnectivityCheck}} + expectedStepsReply := models.Steps{NextInstructionSeconds: defaultNextStepIn, Instructions: []*models.Step{{StepType: models.StepTypeInventory}, + {StepType: models.StepTypeConnectivityCheck}}} mockHostApi.EXPECT().GetNextSteps(gomock.Any(), gomock.Any()).Return(expectedStepsReply, err) reply := bm.GetNextSteps(ctx, installer.GetNextStepsParams{ ClusterID: *clusterId, @@ -193,20 +279,114 @@ var _ = Describe("GetNextSteps", func() { }) Expect(reply).Should(BeAssignableToTypeOf(installer.NewGetNextStepsOK())) stepsReply := reply.(*installer.GetNextStepsOK).Payload - expectedStepsType := []models.StepType{models.StepTypeHardwareInfo, models.StepTypeConnectivityCheck} - Expect(stepsReply).To(HaveLen(len(expectedStepsType))) - for i, step := range stepsReply { + expectedStepsType := []models.StepType{models.StepTypeInventory, models.StepTypeConnectivityCheck} + Expect(stepsReply.Instructions).To(HaveLen(len(expectedStepsType))) + for i, step := range stepsReply.Instructions { Expect(step.StepType).Should(Equal(expectedStepsType[i])) } }) +}) + +func makeFreeAddresses(network string, ips ...strfmt.IPv4) *models.FreeNetworkAddresses { + return &models.FreeNetworkAddresses{ + FreeAddresses: ips, + Network: network, + } +} + +func makeFreeNetworksAddresses(elems ...*models.FreeNetworkAddresses) models.FreeNetworksAddresses { + return models.FreeNetworksAddresses(elems) +} + +func makeFreeNetworksAddressesStr(elems ...*models.FreeNetworkAddresses) string { + toMarshal := models.FreeNetworksAddresses(elems) + b, err := json.Marshal(&toMarshal) + Expect(err).ToNot(HaveOccurred()) + return string(b) +} + +var _ = Describe("PostStepReply", func() { + var ( + bm *bareMetalInventory + cfg Config + db *gorm.DB + ctx = context.Background() + ctrl *gomock.Controller + mockHostApi *host.MockAPI + mockJob *job.MockAPI + mockEvents *events.MockHandler + dbName = "post_step_reply" + ) + + BeforeEach(func() { + Expect(envconfig.Process("test", &cfg)).ShouldNot(HaveOccurred()) + ctrl = gomock.NewController(GinkgoT()) + db = common.PrepareTestDB(dbName) + mockHostApi = host.NewMockAPI(ctrl) + mockEvents = events.NewMockHandler(ctrl) + mockJob = job.NewMockAPI(ctrl) + mockJob.EXPECT().Create(gomock.Any(), gomock.Any()).Return(nil).Times(1) + bm = NewBareMetalInventory(db, getTestLog(), mockHostApi, nil, cfg, mockJob, mockEvents, nil, nil) + }) AfterEach(func() { ctrl.Finish() - db.Close() + common.DeleteTestDB(db, dbName) + }) + + var makeStepReply = func(clusterID, hostID strfmt.UUID, freeAddresses models.FreeNetworksAddresses) installer.PostStepReplyParams { + b, _ := json.Marshal(&freeAddresses) + return installer.PostStepReplyParams{ + ClusterID: clusterID, + HostID: hostID, + Reply: &models.StepReply{ + Output: string(b), + StepType: models.StepTypeFreeNetworkAddresses, + }, + } + } + + It("free addresses success", func() { + clusterId := strToUUID(uuid.New().String()) + hostId := strToUUID(uuid.New().String()) + host := models.Host{ + ID: hostId, + ClusterID: *clusterId, + Status: swag.String("discovering"), + } + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + toMarshal := makeFreeNetworksAddresses(makeFreeAddresses("10.0.0.0/24", "10.0.0.0", "10.0.0.1")) + params := makeStepReply(*clusterId, *hostId, toMarshal) + reply := bm.PostStepReply(ctx, params) + Expect(reply).Should(BeAssignableToTypeOf(installer.NewPostStepReplyNoContent())) + var h models.Host + Expect(db.Take(&h, "cluster_id = ? and id = ?", clusterId.String(), hostId.String()).Error).ToNot(HaveOccurred()) + var f models.FreeNetworksAddresses + Expect(json.Unmarshal([]byte(h.FreeAddresses), &f)).ToNot(HaveOccurred()) + Expect(&f).To(Equal(&toMarshal)) }) + + It("free addresses empty", func() { + clusterId := strToUUID(uuid.New().String()) + hostId := strToUUID(uuid.New().String()) + host := models.Host{ + ID: hostId, + ClusterID: *clusterId, + Status: swag.String("discovering"), + } + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + toMarshal := makeFreeNetworksAddresses() + params := makeStepReply(*clusterId, *hostId, toMarshal) + reply := bm.PostStepReply(ctx, params) + Expect(reply).Should(BeAssignableToTypeOf(installer.NewPostStepReplyInternalServerError())) + var h models.Host + Expect(db.Take(&h, "cluster_id = ? and id = ?", clusterId.String(), hostId.String()).Error).ToNot(HaveOccurred()) + Expect(h.FreeAddresses).To(BeEmpty()) + }) + }) -var _ = Describe("UpdateHostInstallProgress", func() { +var _ = Describe("GetFreeAddresses", func() { var ( bm *bareMetalInventory cfg Config @@ -214,23 +394,192 @@ var _ = Describe("UpdateHostInstallProgress", func() { ctx = context.Background() ctrl *gomock.Controller mockHostApi *host.MockAPI + mockJob *job.MockAPI mockEvents *events.MockHandler + dbName = "get_free_addresses" ) BeforeEach(func() { Expect(envconfig.Process("test", &cfg)).ShouldNot(HaveOccurred()) ctrl = gomock.NewController(GinkgoT()) - db = prepareDB() + db = common.PrepareTestDB(dbName) mockHostApi = host.NewMockAPI(ctrl) mockEvents = events.NewMockHandler(ctrl) - bm = NewBareMetalInventory(db, getTestLog(), mockHostApi, nil, cfg, nil, mockEvents) + mockJob = job.NewMockAPI(ctrl) + mockJob.EXPECT().Create(gomock.Any(), gomock.Any()).Return(nil).Times(1) + bm = NewBareMetalInventory(db, getTestLog(), mockHostApi, nil, cfg, mockJob, mockEvents, nil, nil) + }) + + AfterEach(func() { + ctrl.Finish() + common.DeleteTestDB(db, dbName) + }) + + var makeHost = func(clusterId *strfmt.UUID, freeAddresses, status string) *models.Host { + hostId := strToUUID(uuid.New().String()) + ret := models.Host{ + ID: hostId, + ClusterID: *clusterId, + FreeAddresses: freeAddresses, + Status: &status, + } + Expect(db.Create(&ret).Error).ToNot(HaveOccurred()) + return &ret + } + + var makeGetFreeAddressesParams = func(clusterID strfmt.UUID, network string) installer.GetFreeAddressesParams { + return installer.GetFreeAddressesParams{ + ClusterID: clusterID, + Network: network, + } + } + + It("success", func() { + clusterId := strToUUID(uuid.New().String()) + + _ = makeHost(clusterId, makeFreeNetworksAddressesStr(makeFreeAddresses("10.0.0.0/16", "10.0.10.1", "10.0.20.0", "10.0.9.250")), host.HostStatusInsufficient) + params := makeGetFreeAddressesParams(*clusterId, "10.0.0.0/16") + reply := bm.GetFreeAddresses(ctx, params) + Expect(reply).Should(BeAssignableToTypeOf(installer.NewGetFreeAddressesOK())) + actualReply := reply.(*installer.GetFreeAddressesOK) + Expect(len(actualReply.Payload)).To(Equal(3)) + Expect(actualReply.Payload[0]).To(Equal(strfmt.IPv4("10.0.9.250"))) + Expect(actualReply.Payload[1]).To(Equal(strfmt.IPv4("10.0.10.1"))) + Expect(actualReply.Payload[2]).To(Equal(strfmt.IPv4("10.0.20.0"))) + }) + + It("success with limit", func() { + clusterId := strToUUID(uuid.New().String()) + + _ = makeHost(clusterId, makeFreeNetworksAddressesStr(makeFreeAddresses("10.0.0.0/16", "10.0.10.1", "10.0.20.0", "10.0.9.250")), host.HostStatusInsufficient) + params := makeGetFreeAddressesParams(*clusterId, "10.0.0.0/16") + params.Limit = swag.Int64(2) + reply := bm.GetFreeAddresses(ctx, params) + Expect(reply).Should(BeAssignableToTypeOf(installer.NewGetFreeAddressesOK())) + actualReply := reply.(*installer.GetFreeAddressesOK) + Expect(len(actualReply.Payload)).To(Equal(2)) + Expect(actualReply.Payload[0]).To(Equal(strfmt.IPv4("10.0.9.250"))) + Expect(actualReply.Payload[1]).To(Equal(strfmt.IPv4("10.0.10.1"))) + }) + + It("success with limit and prefix", func() { + clusterId := strToUUID(uuid.New().String()) + + _ = makeHost(clusterId, makeFreeNetworksAddressesStr(makeFreeAddresses("10.0.0.0/16", "10.0.10.1", "10.0.20.0", "10.0.9.250", "10.0.1.0")), host.HostStatusInsufficient) + params := makeGetFreeAddressesParams(*clusterId, "10.0.0.0/16") + params.Limit = swag.Int64(2) + params.Prefix = swag.String("10.0.1") + reply := bm.GetFreeAddresses(ctx, params) + Expect(reply).Should(BeAssignableToTypeOf(installer.NewGetFreeAddressesOK())) + actualReply := reply.(*installer.GetFreeAddressesOK) + Expect(len(actualReply.Payload)).To(Equal(2)) + Expect(actualReply.Payload[0]).To(Equal(strfmt.IPv4("10.0.1.0"))) + Expect(actualReply.Payload[1]).To(Equal(strfmt.IPv4("10.0.10.1"))) + }) + + It("one disconnected", func() { + clusterId := strToUUID(uuid.New().String()) + + _ = makeHost(clusterId, makeFreeNetworksAddressesStr(makeFreeAddresses("10.0.0.0/24", "10.0.0.0", "10.0.0.1")), host.HostStatusInsufficient) + _ = makeHost(clusterId, makeFreeNetworksAddressesStr(makeFreeAddresses("10.0.0.0/24", "10.0.0.0", "10.0.0.2")), host.HostStatusKnown) + _ = makeHost(clusterId, makeFreeNetworksAddressesStr(makeFreeAddresses("10.0.0.0/24")), host.HostStatusDisconnected) + params := makeGetFreeAddressesParams(*clusterId, "10.0.0.0/24") + reply := bm.GetFreeAddresses(ctx, params) + Expect(reply).Should(BeAssignableToTypeOf(installer.NewGetFreeAddressesOK())) + actualReply := reply.(*installer.GetFreeAddressesOK) + Expect(len(actualReply.Payload)).To(Equal(1)) + Expect(actualReply.Payload).To(ContainElement(strfmt.IPv4("10.0.0.0"))) + }) + + It("empty result", func() { + clusterId := strToUUID(uuid.New().String()) + + _ = makeHost(clusterId, makeFreeNetworksAddressesStr(makeFreeAddresses("192.168.0.0/24"), + makeFreeAddresses("10.0.0.0/24", "10.0.0.0", "10.0.0.1")), host.HostStatusInsufficient) + _ = makeHost(clusterId, makeFreeNetworksAddressesStr(makeFreeAddresses("10.0.0.0/24", "10.0.0.0", "10.0.0.2"), + makeFreeAddresses("192.168.0.0/24")), host.HostStatusKnown) + _ = makeHost(clusterId, makeFreeNetworksAddressesStr(makeFreeAddresses("10.0.0.0/24", "10.0.0.1", "10.0.0.2")), host.HostStatusInsufficient) + params := makeGetFreeAddressesParams(*clusterId, "10.0.0.0/24") + reply := bm.GetFreeAddresses(ctx, params) + Expect(reply).Should(BeAssignableToTypeOf(installer.NewGetFreeAddressesOK())) + actualReply := reply.(*installer.GetFreeAddressesOK) + Expect(actualReply.Payload).To(BeEmpty()) + }) + + It("malformed", func() { + clusterId := strToUUID(uuid.New().String()) + + _ = makeHost(clusterId, makeFreeNetworksAddressesStr(makeFreeAddresses("192.168.0.0/24"), + makeFreeAddresses("10.0.0.0/24", "10.0.0.0", "10.0.0.1")), host.HostStatusInsufficient) + _ = makeHost(clusterId, makeFreeNetworksAddressesStr(makeFreeAddresses("10.0.0.0/24", "10.0.0.0", "10.0.0.2"), + makeFreeAddresses("192.168.0.0/24")), host.HostStatusKnown) + _ = makeHost(clusterId, "blah ", host.HostStatusInsufficient) + params := makeGetFreeAddressesParams(*clusterId, "10.0.0.0/24") + reply := bm.GetFreeAddresses(ctx, params) + Expect(reply).Should(BeAssignableToTypeOf(installer.NewGetFreeAddressesOK())) + actualReply := reply.(*installer.GetFreeAddressesOK) + Expect(len(actualReply.Payload)).To(Equal(1)) + Expect(actualReply.Payload).To(ContainElement(strfmt.IPv4("10.0.0.0"))) + }) + + It("no matching hosts", func() { + clusterId := strToUUID(uuid.New().String()) + + _ = makeHost(clusterId, makeFreeNetworksAddressesStr(makeFreeAddresses("192.168.0.0/24"), + makeFreeAddresses("10.0.0.0/24", "10.0.0.0", "10.0.0.1")), host.HostStatusDisconnected) + _ = makeHost(clusterId, makeFreeNetworksAddressesStr(makeFreeAddresses("10.0.0.0/24", "10.0.0.0", "10.0.0.2"), + makeFreeAddresses("192.168.0.0/24")), host.HostStatusDiscovering) + _ = makeHost(clusterId, makeFreeNetworksAddressesStr(makeFreeAddresses("10.0.0.1/24", "10.0.0.0", "10.0.0.2")), host.HostStatusInstalling) + params := makeGetFreeAddressesParams(*clusterId, "10.0.0.0/24") + verifyApiError(bm.GetFreeAddresses(ctx, params), http.StatusNotFound) + }) +}) + +var _ = Describe("UpdateHostInstallProgress", func() { + var ( + bm *bareMetalInventory + cfg Config + db *gorm.DB + ctx = context.Background() + ctrl *gomock.Controller + mockJob *job.MockAPI + mockHostApi *host.MockAPI + mockEvents *events.MockHandler + defaultProgressStage models.HostStage + dbName = "update_host_install_progress" + ) + + BeforeEach(func() { + Expect(envconfig.Process("test", &cfg)).ShouldNot(HaveOccurred()) + ctrl = gomock.NewController(GinkgoT()) + db = common.PrepareTestDB(dbName) + mockHostApi = host.NewMockAPI(ctrl) + mockEvents = events.NewMockHandler(ctrl) + mockJob = job.NewMockAPI(ctrl) + mockJob.EXPECT().Create(gomock.Any(), gomock.Any()).Return(nil).Times(1) + bm = NewBareMetalInventory(db, getTestLog(), mockHostApi, nil, cfg, mockJob, mockEvents, nil, nil) + defaultProgressStage = "some progress" + }) + + AfterEach(func() { + ctrl.Finish() + common.DeleteTestDB(db, dbName) }) Context("host exists", func() { - var hostID, clusterID strfmt.UUID + var ( + hostID strfmt.UUID + clusterID strfmt.UUID + progressParams *models.HostProgress + ) + BeforeEach(func() { hostID = strfmt.UUID(uuid.New().String()) clusterID = strfmt.UUID(uuid.New().String()) + progressParams = &models.HostProgress{ + CurrentStage: defaultProgressStage, + } + err := db.Create(&models.Host{ ID: &hostID, ClusterID: clusterID, @@ -240,12 +589,12 @@ var _ = Describe("UpdateHostInstallProgress", func() { }) It("success", func() { - mockEvents.EXPECT().AddEvent(gomock.Any(), hostID.String(), gomock.Any(), gomock.Any(), clusterID.String()) + mockEvents.EXPECT().AddEvent(gomock.Any(), hostID.String(), models.EventSeverityInfo, gomock.Any(), gomock.Any(), clusterID.String()) mockHostApi.EXPECT().UpdateInstallProgress(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) reply := bm.UpdateHostInstallProgress(ctx, installer.UpdateHostInstallProgressParams{ - ClusterID: clusterID, - HostInstallProgressParams: "some progress", - HostID: hostID, + ClusterID: clusterID, + HostProgress: progressParams, + HostID: hostID, }) Expect(reply).Should(BeAssignableToTypeOf(installer.NewUpdateHostInstallProgressOK())) }) @@ -253,26 +602,23 @@ var _ = Describe("UpdateHostInstallProgress", func() { It("update_failed", func() { mockHostApi.EXPECT().UpdateInstallProgress(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("some error")) reply := bm.UpdateHostInstallProgress(ctx, installer.UpdateHostInstallProgressParams{ - ClusterID: clusterID, - HostInstallProgressParams: "some progress", - HostID: hostID, + ClusterID: clusterID, + HostProgress: progressParams, + HostID: hostID, }) - Expect(reply).Should(BeAssignableToTypeOf(installer.NewUpdateHostInstallProgressOK())) + Expect(reply).Should(BeAssignableToTypeOf(installer.NewUpdateHostInstallProgressInternalServerError())) }) }) It("host_dont_exist", func() { reply := bm.UpdateHostInstallProgress(ctx, installer.UpdateHostInstallProgressParams{ - ClusterID: strfmt.UUID(uuid.New().String()), - HostInstallProgressParams: "some progress", - HostID: strfmt.UUID(uuid.New().String()), + ClusterID: strfmt.UUID(uuid.New().String()), + HostProgress: &models.HostProgress{ + CurrentStage: defaultProgressStage, + }, + HostID: strfmt.UUID(uuid.New().String()), }) - Expect(reply).Should(BeAssignableToTypeOf(installer.NewUpdateHostInstallProgressOK())) - }) - - AfterEach(func() { - ctrl.Finish() - db.Close() + Expect(reply).Should(BeAssignableToTypeOf(installer.NewUpdateHostInstallProgressNotFound())) }) }) @@ -280,6 +626,7 @@ var _ = Describe("cluster", func() { masterHostId1 := strfmt.UUID(uuid.New().String()) masterHostId2 := strfmt.UUID(uuid.New().String()) masterHostId3 := strfmt.UUID(uuid.New().String()) + masterHostId4 := strfmt.UUID(uuid.New().String()) var ( bm *bareMetalInventory @@ -289,12 +636,34 @@ var _ = Describe("cluster", func() { ctrl *gomock.Controller mockHostApi *host.MockAPI mockClusterApi *cluster.MockAPI + mockS3Client *awsS3Client.MockS3Client mockJob *job.MockAPI clusterID strfmt.UUID mockEvents *events.MockHandler + mockMetric *metrics.MockAPI + dbName = "inventory_cluster" ) - addHost := func(hostId strfmt.UUID, role string, state string, clusterId strfmt.UUID, inventory string, db *gorm.DB) models.Host { + BeforeEach(func() { + Expect(envconfig.Process("test", &cfg)).ShouldNot(HaveOccurred()) + ctrl = gomock.NewController(GinkgoT()) + db = common.PrepareTestDB(dbName) + mockClusterApi = cluster.NewMockAPI(ctrl) + mockHostApi = host.NewMockAPI(ctrl) + mockS3Client = awsS3Client.NewMockS3Client(ctrl) + mockEvents = events.NewMockHandler(ctrl) + mockJob = job.NewMockAPI(ctrl) + mockJob.EXPECT().Create(gomock.Any(), gomock.Any()).Return(nil).Times(1) + mockMetric = metrics.NewMockAPI(ctrl) + bm = NewBareMetalInventory(db, getTestLog(), mockHostApi, mockClusterApi, cfg, mockJob, mockEvents, mockS3Client, mockMetric) + }) + + AfterEach(func() { + ctrl.Finish() + common.DeleteTestDB(db, dbName) + }) + + addHost := func(hostId strfmt.UUID, role models.HostRole, state string, clusterId strfmt.UUID, inventory string, db *gorm.DB) models.Host { host := models.Host{ ID: &hostId, ClusterID: clusterId, @@ -305,15 +674,36 @@ var _ = Describe("cluster", func() { Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) return host } - getDisk := func() *models.Disk { - disk := models.Disk{DriveType: "SSD", Name: "loop0", SizeBytes: 0} - return &disk + + updateMachineCidr := func(clusterID strfmt.UUID, machineCidr string, db *gorm.DB) { + Expect(db.Model(&common.Cluster{Cluster: models.Cluster{ID: &clusterID}}).UpdateColumn("machine_network_cidr", machineCidr).Error).To(Not(HaveOccurred())) + } + + mockClusterPrepareForInstallationSuccess := func(mockClusterApi *cluster.MockAPI) { + mockClusterApi.EXPECT().PrepareForInstallation(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) + } + mockClusterPrepareForInstallationFailure := func(mockClusterApi *cluster.MockAPI) { + mockClusterApi.EXPECT().PrepareForInstallation(gomock.Any(), gomock.Any(), gomock.Any()). + Return(errors.Errorf("error")).Times(1) + } + mockHostPrepareForInstallationSuccess := func(mockHostApi *host.MockAPI, times int) { + mockHostApi.EXPECT().PrepareForInstallation(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(times) + } + mockHostPrepareForRefresh := func(mockHostApi *host.MockAPI) { + mockHostApi.EXPECT().RefreshStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + } + mockHostPrepareForInstallationFailure := func(mockHostApi *host.MockAPI, times int) { + mockHostApi.EXPECT().PrepareForInstallation(gomock.Any(), gomock.Any(), gomock.Any()). + Return(errors.Errorf("error")).Times(times) } setDefaultInstall := func(mockClusterApi *cluster.MockAPI) { mockClusterApi.EXPECT().Install(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) } - setDefaultGetMasterNodesIds := func(mockClusterApi *cluster.MockAPI) { - mockClusterApi.EXPECT().GetMasterNodesIds(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*strfmt.UUID{&masterHostId1, &masterHostId2, &masterHostId3}, nil) + setDefaultGetMasterNodesIds := func(mockClusterApi *cluster.MockAPI, times int) { + mockClusterApi.EXPECT().GetMasterNodesIds(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*strfmt.UUID{&masterHostId1, &masterHostId2, &masterHostId3}, nil).Times(times) + } + set4GetMasterNodesIds := func(mockClusterApi *cluster.MockAPI) { + mockClusterApi.EXPECT().GetMasterNodesIds(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*strfmt.UUID{&masterHostId1, &masterHostId2, &masterHostId3, &masterHostId4}, nil) } setDefaultJobCreate := func(mockJobApi *job.MockAPI) { mockJob.EXPECT().Create(gomock.Any(), gomock.Any()).Return(nil).Times(1) @@ -321,16 +711,60 @@ var _ = Describe("cluster", func() { setDefaultJobMonitor := func(mockJobApi *job.MockAPI) { mockJob.EXPECT().Monitor(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) } - setDefaultHostInstall := func(mockClusterApi *cluster.MockAPI) { - mockHostApi.EXPECT().Install(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() - } - setDefaultHostGetHostValidDisks := func(mockClusterApi *cluster.MockAPI) { - mockHostApi.EXPECT().GetHostValidDisks(gomock.Any()).Return([]*models.Disk{getDisk()}, nil).AnyTimes() + setDefaultHostInstall := func(mockClusterApi *cluster.MockAPI, done chan int) { + count := 0 + mockHostApi.EXPECT().Install(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(3). + Do(func(ctx context.Context, h *models.Host, db *gorm.DB) { + count += 1 + if count == 3 { + done <- 1 + } + }) + } setDefaultHostSetBootstrap := func(mockClusterApi *cluster.MockAPI) { - mockHostApi.EXPECT().SetBootstrap(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + mockHostApi.EXPECT().SetBootstrap(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + } + setIgnitionGeneratorVersionSuccess := func(mockClusterApi *cluster.MockAPI) { + mockClusterApi.EXPECT().SetGeneratorVersion(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(1) + } + setDefaultMetricInstallatioStarted := func(mockMetricApi *metrics.MockAPI) { + mockMetricApi.EXPECT().InstallationStarted(gomock.Any()).AnyTimes() + } + mockHandlePreInstallationError := func(mockClusterApi *cluster.MockAPI, done chan int) { + mockClusterApi.EXPECT().HandlePreInstallError(gomock.Any(), gomock.Any(), gomock.Any()).Times(1). + Do(func(ctx, c, err interface{}) { done <- 1 }) + } + setCancelInstallationSuccess := func() { + mockClusterApi.EXPECT().CancelInstallation(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + mockHostApi.EXPECT().CancelInstallation(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + } + setCancelInstallationHostConflict := func() { + mockClusterApi.EXPECT().CancelInstallation(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + mockHostApi.EXPECT().CancelInstallation(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(common.NewApiError(http.StatusConflict, nil)).Times(1) + } + setCancelInstallationInternalServerError := func() { + mockClusterApi.EXPECT().CancelInstallation(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(common.NewApiError(http.StatusInternalServerError, nil)).Times(1) + } + setResetClusterSuccess := func() { + mockJob.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + mockS3Client.EXPECT().DeleteFileFromS3(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + mockClusterApi.EXPECT().ResetCluster(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + mockHostApi.EXPECT().ResetHost(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + } + setResetClusterConflict := func() { + mockJob.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + mockS3Client.EXPECT().DeleteFileFromS3(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + mockClusterApi.EXPECT().ResetCluster(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(common.NewApiError(http.StatusConflict, nil)).Times(1) + } + setResetClusterInternalServerError := func() { + mockJob.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + mockS3Client.EXPECT().DeleteFileFromS3(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + mockClusterApi.EXPECT().ResetCluster(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(common.NewApiError(http.StatusInternalServerError, nil)).Times(1) + } + mockIsInstallable := func() { + mockHostApi.EXPECT().IsInstallable(gomock.Any()).Return(true).Times(3) } - getInventoryStr := func(ipv4Addresses ...string) string { inventory := models.Inventory{Interfaces: []*models.Interface{ { @@ -341,105 +775,820 @@ var _ = Describe("cluster", func() { return string(ret) } - BeforeEach(func() { - Expect(envconfig.Process("test", &cfg)).ShouldNot(HaveOccurred()) - ctrl = gomock.NewController(GinkgoT()) - db = prepareDB() - mockJob = job.NewMockAPI(ctrl) - mockClusterApi = cluster.NewMockAPI(ctrl) - mockHostApi = host.NewMockAPI(ctrl) - mockEvents = events.NewMockHandler(ctrl) - bm = NewBareMetalInventory(db, getTestLog(), mockHostApi, mockClusterApi, cfg, mockJob, mockEvents) + sortedHosts := func(arr []strfmt.UUID) []strfmt.UUID { + sort.Slice(arr, func(i, j int) bool { return arr[i] < arr[j] }) + return arr + } + + sortedNetworks := func(arr []*models.HostNetwork) []*models.HostNetwork { + sort.Slice(arr, func(i, j int) bool { return arr[i].Cidr < arr[j].Cidr }) + return arr + } + + Context("Get", func() { + { + BeforeEach(func() { + clusterID = strfmt.UUID(uuid.New().String()) + err := db.Create(&common.Cluster{Cluster: models.Cluster{ + ID: &clusterID, + APIVip: "10.11.12.13", + IngressVip: "10.11.12.14", + MachineNetworkCidr: "10.11.0.0/16", + }}).Error + Expect(err).ShouldNot(HaveOccurred()) + + addHost(masterHostId1, models.HostRoleMaster, "known", clusterID, getInventoryStr("1.2.3.4/24", "10.11.50.90/16"), db) + addHost(masterHostId2, models.HostRoleMaster, "known", clusterID, getInventoryStr("1.2.3.5/24", "10.11.50.80/16"), db) + addHost(masterHostId3, models.HostRoleMaster, "known", clusterID, getInventoryStr("1.2.3.6/24", "7.8.9.10/24"), db) + }) + + It("GetCluster", func() { + mockHostApi.EXPECT().GetStagesByRole(gomock.Any(), gomock.Any()).Return(nil).Times(3) // Number of hosts + reply := bm.GetCluster(ctx, installer.GetClusterParams{ + ClusterID: clusterID, + }) + actual, ok := reply.(*installer.GetClusterOK) + Expect(ok).To(BeTrue()) + Expect(actual.Payload.APIVip).To(BeEquivalentTo("10.11.12.13")) + Expect(actual.Payload.IngressVip).To(BeEquivalentTo("10.11.12.14")) + Expect(actual.Payload.MachineNetworkCidr).To(Equal("10.11.0.0/16")) + expectedNetworks := sortedNetworks([]*models.HostNetwork{ + { + Cidr: "1.2.3.0/24", + HostIds: sortedHosts([]strfmt.UUID{ + masterHostId1, + masterHostId2, + masterHostId3, + }), + }, + { + Cidr: "10.11.0.0/16", + HostIds: sortedHosts([]strfmt.UUID{ + masterHostId1, + masterHostId2, + }), + }, + { + Cidr: "7.8.9.0/24", + HostIds: []strfmt.UUID{ + masterHostId3, + }, + }, + }) + actualNetworks := sortedNetworks(actual.Payload.HostNetworks) + Expect(len(actualNetworks)).To(Equal(3)) + actualNetworks[0].HostIds = sortedHosts(actualNetworks[0].HostIds) + actualNetworks[1].HostIds = sortedHosts(actualNetworks[1].HostIds) + actualNetworks[2].HostIds = sortedHosts(actualNetworks[2].HostIds) + Expect(actualNetworks).To(Equal(expectedNetworks)) + }) + } + }) + Context("Update", func() { + It("update_cluster_while_installing", func() { + clusterID = strfmt.UUID(uuid.New().String()) + err := db.Create(&common.Cluster{Cluster: models.Cluster{ + ID: &clusterID, + }}).Error + Expect(err).ShouldNot(HaveOccurred()) + + mockClusterApi.EXPECT().VerifyClusterUpdatability(gomock.Any()).Return(errors.Errorf("wrong state")).Times(1) + + apiVip := "8.8.8.8" + reply := bm.UpdateCluster(ctx, installer.UpdateClusterParams{ + ClusterID: clusterID, + ClusterUpdateParams: &models.ClusterUpdateParams{ + APIVip: &apiVip, + }, + }) + Expect(reply).To(BeAssignableToTypeOf(installer.NewUpdateClusterConflict())) + }) + + It("Invalid pull-secret", func() { + pullSecret := "asdfasfda" + reply := bm.UpdateCluster(ctx, installer.UpdateClusterParams{ + ClusterID: clusterID, + ClusterUpdateParams: &models.ClusterUpdateParams{ + PullSecret: &pullSecret, + }, + }) + Expect(reply).To(BeAssignableToTypeOf(installer.NewUpdateClusterBadRequest())) + }) + + It("empty pull-secret", func() { + pullSecret := "" + reply := bm.UpdateCluster(ctx, installer.UpdateClusterParams{ + ClusterID: clusterID, + ClusterUpdateParams: &models.ClusterUpdateParams{ + PullSecret: &pullSecret, + }, + }) + Expect(reply).To(BeAssignableToTypeOf(installer.NewUpdateClusterNotFound())) + }) + Context("Update Network", func() { + BeforeEach(func() { + clusterID = strfmt.UUID(uuid.New().String()) + err := db.Create(&common.Cluster{Cluster: models.Cluster{ + ID: &clusterID, + }}).Error + Expect(err).ShouldNot(HaveOccurred()) + addHost(masterHostId1, models.HostRoleMaster, "known", clusterID, getInventoryStr("1.2.3.4/24", "10.11.50.90/16"), db) + addHost(masterHostId2, models.HostRoleMaster, "known", clusterID, getInventoryStr("1.2.3.5/24", "10.11.50.80/16"), db) + addHost(masterHostId3, models.HostRoleMaster, "known", clusterID, getInventoryStr("1.2.3.6/24", "7.8.9.10/24"), db) + err = db.Model(&models.Host{ID: &masterHostId3, ClusterID: clusterID}).UpdateColumn("free_addresses", + makeFreeNetworksAddressesStr(makeFreeAddresses("10.11.0.0/16", "10.11.12.15", "10.11.12.16"))).Error + Expect(err).ToNot(HaveOccurred()) + mockClusterApi.EXPECT().VerifyClusterUpdatability(gomock.Any()).Return(nil).Times(1) + }) + + It("No machine network", func() { + apiVip := "8.8.8.8" + reply := bm.UpdateCluster(ctx, installer.UpdateClusterParams{ + ClusterID: clusterID, + ClusterUpdateParams: &models.ClusterUpdateParams{ + APIVip: &apiVip, + }, + }) + Expect(reply).To(BeAssignableToTypeOf(&common.ApiErrorResponse{})) + Expect(reply.(*common.ApiErrorResponse).StatusCode()).To(Equal(int32(http.StatusBadRequest))) + }) + It("Api and ingress mismatch", func() { + apiVip := "10.11.12.15" + ingressVip := "1.2.3.20" + reply := bm.UpdateCluster(ctx, installer.UpdateClusterParams{ + ClusterID: clusterID, + ClusterUpdateParams: &models.ClusterUpdateParams{ + APIVip: &apiVip, + IngressVip: &ingressVip, + }, + }) + Expect(reply).To(BeAssignableToTypeOf(&common.ApiErrorResponse{})) + Expect(reply.(*common.ApiErrorResponse).StatusCode()).To(Equal(int32(http.StatusBadRequest))) + }) + It("Same api and ingress", func() { + apiVip := "10.11.12.15" + ingressVip := apiVip + reply := bm.UpdateCluster(ctx, installer.UpdateClusterParams{ + ClusterID: clusterID, + ClusterUpdateParams: &models.ClusterUpdateParams{ + APIVip: &apiVip, + IngressVip: &ingressVip, + }, + }) + Expect(reply).To(BeAssignableToTypeOf(&common.ApiErrorResponse{})) + Expect(reply.(*common.ApiErrorResponse).StatusCode()).To(Equal(int32(http.StatusBadRequest))) + }) + It("Update success", func() { + apiVip := "10.11.12.15" + ingressVip := "10.11.12.16" + mockHostApi.EXPECT().GetStagesByRole(gomock.Any(), gomock.Any()).Return(nil).Times(3) // Number of hosts + mockHostApi.EXPECT().RefreshStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(3) + mockClusterApi.EXPECT().RefreshStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).Times(1) + reply := bm.UpdateCluster(ctx, installer.UpdateClusterParams{ + ClusterID: clusterID, + ClusterUpdateParams: &models.ClusterUpdateParams{ + APIVip: &apiVip, + IngressVip: &ingressVip, + }, + }) + Expect(reply).To(BeAssignableToTypeOf(installer.NewUpdateClusterCreated())) + actual := reply.(*installer.UpdateClusterCreated) + Expect(actual.Payload.APIVip).To(Equal(apiVip)) + Expect(actual.Payload.IngressVip).To(Equal(ingressVip)) + Expect(actual.Payload.MachineNetworkCidr).To(Equal("10.11.0.0/16")) + expectedNetworks := sortedNetworks([]*models.HostNetwork{ + { + Cidr: "1.2.3.0/24", + HostIds: sortedHosts([]strfmt.UUID{ + masterHostId1, + masterHostId2, + masterHostId3, + }), + }, + { + Cidr: "10.11.0.0/16", + HostIds: sortedHosts([]strfmt.UUID{ + masterHostId1, + masterHostId2, + }), + }, + { + Cidr: "7.8.9.0/24", + HostIds: []strfmt.UUID{ + masterHostId3, + }, + }, + }) + actualNetworks := sortedNetworks(actual.Payload.HostNetworks) + Expect(len(actualNetworks)).To(Equal(3)) + actualNetworks[0].HostIds = sortedHosts(actualNetworks[0].HostIds) + actualNetworks[1].HostIds = sortedHosts(actualNetworks[1].HostIds) + actualNetworks[2].HostIds = sortedHosts(actualNetworks[2].HostIds) + Expect(actualNetworks).To(Equal(expectedNetworks)) + }) + }) }) Context("Install", func() { + var DoneChannel chan int + + waitForDoneChannel := func() { + select { + case <-DoneChannel: + break + case <-time.After(1 * time.Second): + panic("not all api calls where made") + } + } + BeforeEach(func() { + DoneChannel = make(chan int) clusterID = strfmt.UUID(uuid.New().String()) - err := db.Create(&models.Cluster{ - ID: &clusterID, - APIVip: "10.11.12.13", - }).Error + err := db.Create(&common.Cluster{Cluster: models.Cluster{ + ID: &clusterID, + APIVip: "10.11.12.13", + IngressVip: "10.11.20.50", + MachineNetworkCidr: "10.11.0.0/16", + Status: swag.String(models.ClusterStatusReady), + }}).Error Expect(err).ShouldNot(HaveOccurred()) - addHost(masterHostId1, "master", "known", clusterID, getInventoryStr("1.2.3.4/24"), db) - addHost(masterHostId2, "master", "known", clusterID, getInventoryStr("1.2.3.5/24", "10.11.50.80/16"), db) - addHost(masterHostId3, "master", "known", clusterID, getInventoryStr(), db) + addHost(masterHostId1, models.HostRoleMaster, "known", clusterID, getInventoryStr("1.2.3.4/24", "10.11.50.90/16"), db) + addHost(masterHostId2, models.HostRoleMaster, "known", clusterID, getInventoryStr("1.2.3.5/24", "10.11.50.80/16"), db) + addHost(masterHostId3, models.HostRoleMaster, "known", clusterID, getInventoryStr("10.11.200.180/16"), db) + err = db.Model(&models.Host{ID: &masterHostId3, ClusterID: clusterID}).UpdateColumn("free_addresses", + makeFreeNetworksAddressesStr(makeFreeAddresses("10.11.0.0/16", "10.11.12.15", "10.11.12.16", "10.11.12.13", "10.11.20.50"))).Error + Expect(err).ToNot(HaveOccurred()) }) It("success", func() { - - setDefaultInstall(mockClusterApi) - setDefaultGetMasterNodesIds(mockClusterApi) - + mockClusterPrepareForInstallationSuccess(mockClusterApi) + mockHostPrepareForRefresh(mockHostApi) + mockHostPrepareForInstallationSuccess(mockHostApi, 3) + mockIsInstallable() setDefaultJobCreate(mockJob) setDefaultJobMonitor(mockJob) - - setDefaultHostInstall(mockClusterApi) - setDefaultHostGetHostValidDisks(mockClusterApi) + setIgnitionGeneratorVersionSuccess(mockClusterApi) + setDefaultInstall(mockClusterApi) + setDefaultGetMasterNodesIds(mockClusterApi, 2) setDefaultHostSetBootstrap(mockClusterApi) + setDefaultHostInstall(mockClusterApi, DoneChannel) + setDefaultMetricInstallatioStarted(mockMetric) reply := bm.InstallCluster(ctx, installer.InstallClusterParams{ ClusterID: clusterID, }) Expect(reply).Should(BeAssignableToTypeOf(installer.NewInstallClusterAccepted())) + waitForDoneChannel() }) + + It("failed to prepare cluster", func() { + // validations + mockIsInstallable() + mockHostPrepareForRefresh(mockHostApi) + setDefaultGetMasterNodesIds(mockClusterApi, 1) + // sync prepare for installation + mockClusterPrepareForInstallationFailure(mockClusterApi) + + reply := bm.InstallCluster(ctx, installer.InstallClusterParams{ + ClusterID: clusterID, + }) + verifyApiError(reply, http.StatusInternalServerError) + }) + + It("failed to prepare host", func() { + // validations + mockHostPrepareForRefresh(mockHostApi) + mockIsInstallable() + setDefaultGetMasterNodesIds(mockClusterApi, 1) + // sync prepare for installation + mockClusterPrepareForInstallationSuccess(mockClusterApi) + mockHostPrepareForInstallationSuccess(mockHostApi, 2) + mockHostPrepareForInstallationFailure(mockHostApi, 1) + + reply := bm.InstallCluster(ctx, installer.InstallClusterParams{ + ClusterID: clusterID, + }) + verifyApiError(reply, http.StatusInternalServerError) + }) + + It("cidr calculate error", func() { + mockHostPrepareForRefresh(mockHostApi) + updateMachineCidr(clusterID, "", db) + reply := bm.InstallCluster(ctx, installer.InstallClusterParams{ + ClusterID: clusterID, + }) + verifyApiError(reply, http.StatusBadRequest) + }) + + It("cidr mismatch", func() { + mockHostPrepareForRefresh(mockHostApi) + updateMachineCidr(clusterID, "1.1.0.0/16", db) + reply := bm.InstallCluster(ctx, installer.InstallClusterParams{ + ClusterID: clusterID, + }) + verifyApiError(reply, http.StatusBadRequest) + }) + + It("Additional non matching master", func() { + mockHostPrepareForRefresh(mockHostApi) + addHost(masterHostId4, models.HostRoleMaster, "known", clusterID, getInventoryStr("10.12.200.180/16"), db) + set4GetMasterNodesIds(mockClusterApi) + + reply := bm.InstallCluster(ctx, installer.InstallClusterParams{ + ClusterID: clusterID, + }) + verifyApiError(reply, http.StatusBadRequest) + }) + It("cluster failed to update", func() { + mockHostPrepareForRefresh(mockHostApi) + mockIsInstallable() + mockClusterPrepareForInstallationSuccess(mockClusterApi) + mockHostPrepareForInstallationSuccess(mockHostApi, 3) + setDefaultJobCreate(mockJob) + setDefaultJobMonitor(mockJob) + setIgnitionGeneratorVersionSuccess(mockClusterApi) + mockHandlePreInstallationError(mockClusterApi, DoneChannel) + mockClusterApi.EXPECT().GetMasterNodesIds(gomock.Any(), gomock.Any(), gomock.Any()).Return([]*strfmt.UUID{&masterHostId1, &masterHostId2, &masterHostId3}, nil) mockClusterApi.EXPECT().Install(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.Errorf("cluster has a error")) reply := bm.InstallCluster(ctx, installer.InstallClusterParams{ ClusterID: clusterID, }) - Expect(reflect.TypeOf(reply)).Should(Equal(reflect.TypeOf(installer.NewInstallClusterConflict()))) + Expect(reply).Should(BeAssignableToTypeOf(installer.NewInstallClusterAccepted())) + waitForDoneChannel() }) - It("host failed to install", func() { + It("not all hosts are ready", func() { + mockHostPrepareForRefresh(mockHostApi) + setDefaultGetMasterNodesIds(mockClusterApi, 1) + // Two out of three nodes are not ready + mockHostApi.EXPECT().IsInstallable(gomock.Any()).Return(false).Times(2) + mockHostApi.EXPECT().IsInstallable(gomock.Any()).Return(true).Times(1) + reply := bm.InstallCluster(ctx, installer.InstallClusterParams{ + ClusterID: clusterID, + }) + verifyApiError(reply, http.StatusConflict) + }) + + It("host failed to install", func() { + mockHostPrepareForRefresh(mockHostApi) + mockClusterPrepareForInstallationSuccess(mockClusterApi) + mockHostPrepareForInstallationSuccess(mockHostApi, 3) + mockIsInstallable() setDefaultInstall(mockClusterApi) - setDefaultGetMasterNodesIds(mockClusterApi) + setDefaultGetMasterNodesIds(mockClusterApi, 2) + setDefaultJobCreate(mockJob) + setDefaultJobMonitor(mockJob) + setIgnitionGeneratorVersionSuccess(mockClusterApi) - mockHostApi.EXPECT().Install(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.Errorf("host has a error")).AnyTimes() - setDefaultHostGetHostValidDisks(mockClusterApi) + mockHostApi.EXPECT().Install(gomock.Any(), gomock.Any(), gomock.Any()). + Return(errors.Errorf("host has a error")).AnyTimes() setDefaultHostSetBootstrap(mockClusterApi) + mockHandlePreInstallationError(mockClusterApi, DoneChannel) reply := bm.InstallCluster(ctx, installer.InstallClusterParams{ ClusterID: clusterID, }) - Expect(reflect.TypeOf(reply)).Should(Equal(reflect.TypeOf(installer.NewInstallClusterConflict()))) - + Expect(reply).Should(BeAssignableToTypeOf(installer.NewInstallClusterAccepted())) + waitForDoneChannel() }) - It("GetMasterNodesIds fails", func() { + It("list of masters for setting bootstrap return empty list", func() { + mockHostPrepareForRefresh(mockHostApi) + mockClusterPrepareForInstallationSuccess(mockClusterApi) + mockHostPrepareForInstallationSuccess(mockHostApi, 3) + mockIsInstallable() setDefaultInstall(mockClusterApi) + setDefaultJobCreate(mockJob) + setDefaultJobMonitor(mockJob) + setIgnitionGeneratorVersionSuccess(mockClusterApi) + // first call is for verifyClusterNetworkConfig mockClusterApi.EXPECT().GetMasterNodesIds(gomock.Any(), gomock.Any(), gomock.Any()). - Return([]*strfmt.UUID{&masterHostId1, &masterHostId2, &masterHostId3}, errors.Errorf("nop")) + Return([]*strfmt.UUID{&masterHostId1, &masterHostId2, &masterHostId3}, nil).Times(1) + // second call is for setBootstrapHost + mockClusterApi.EXPECT().GetMasterNodesIds(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]*strfmt.UUID{}, nil).Times(1) + mockHandlePreInstallationError(mockClusterApi, DoneChannel) - setDefaultHostInstall(mockClusterApi) - setDefaultHostGetHostValidDisks(mockClusterApi) + reply := bm.InstallCluster(ctx, installer.InstallClusterParams{ + ClusterID: clusterID, + }) + Expect(reply).Should(BeAssignableToTypeOf(installer.NewInstallClusterAccepted())) + waitForDoneChannel() + }) + + It("GetMasterNodesIds fails", func() { + mockHostPrepareForRefresh(mockHostApi) + mockClusterApi.EXPECT().GetMasterNodesIds(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]*strfmt.UUID{&masterHostId1, &masterHostId2, &masterHostId3}, errors.Errorf("nop")) reply := bm.InstallCluster(ctx, installer.InstallClusterParams{ ClusterID: clusterID, }) - Expect(reflect.TypeOf(reply)).Should(Equal(reflect.TypeOf(installer.NewInstallClusterInternalServerError()))) + verifyApiError(reply, http.StatusInternalServerError) }) - It("GetMasterNodesIds returns empty list", func() { - setDefaultInstall(mockClusterApi) + It("GetMasterNodesIds returns empty list", func() { + mockHostPrepareForRefresh(mockHostApi) mockClusterApi.EXPECT().GetMasterNodesIds(gomock.Any(), gomock.Any(), gomock.Any()). Return([]*strfmt.UUID{&masterHostId1, &masterHostId2, &masterHostId3}, errors.Errorf("nop")) - setDefaultHostInstall(mockClusterApi) - setDefaultHostGetHostValidDisks(mockClusterApi) - reply := bm.InstallCluster(ctx, installer.InstallClusterParams{ ClusterID: clusterID, }) - Expect(reflect.TypeOf(reply)).Should(Equal(reflect.TypeOf(installer.NewInstallClusterInternalServerError()))) + verifyApiError(reply, http.StatusInternalServerError) + }) + + It("get DNS domain success", func() { + bm.Config.BaseDNSDomains = map[string]string{ + "dns.example.com": "abc/route53", + } + dnsDomain, err := bm.getDNSDomain("test-cluster", "dns.example.com") + Expect(err).NotTo(HaveOccurred()) + Expect(dnsDomain.ID).Should(Equal("abc")) + Expect(dnsDomain.Provider).Should(Equal("route53")) + Expect(dnsDomain.APIDomainName).Should(Equal("api.test-cluster.dns.example.com")) + Expect(dnsDomain.IngressDomainName).Should(Equal("*.apps.test-cluster.dns.example.com")) + }) + It("get DNS domain invalid", func() { + bm.Config.BaseDNSDomains = map[string]string{ + "dns.example.com": "abc", + } + _, err := bm.getDNSDomain("test-cluster", "dns.example.com") + Expect(err).To(HaveOccurred()) + }) + It("get DNS domain undefined", func() { + dnsDomain, err := bm.getDNSDomain("test-cluster", "dns.example.com") + Expect(err).NotTo(HaveOccurred()) + Expect(dnsDomain).Should(BeNil()) + }) + + Context("CancelInstallation", func() { + BeforeEach(func() { + mockHostApi.EXPECT().GetStagesByRole(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + }) + It("cancel installation success", func() { + setCancelInstallationSuccess() + + cancelReply := bm.CancelInstallation(ctx, installer.CancelInstallationParams{ + ClusterID: clusterID, + }) + Expect(cancelReply).Should(BeAssignableToTypeOf(installer.NewCancelInstallationAccepted())) + }) + It("cancel installation conflict", func() { + setCancelInstallationHostConflict() + + cancelReply := bm.CancelInstallation(ctx, installer.CancelInstallationParams{ + ClusterID: clusterID, + }) + + verifyApiError(cancelReply, http.StatusConflict) + }) + It("cancel installation internal error", func() { + setCancelInstallationInternalServerError() + + cancelReply := bm.CancelInstallation(ctx, installer.CancelInstallationParams{ + ClusterID: clusterID, + }) + + verifyApiError(cancelReply, http.StatusInternalServerError) + }) + }) + + Context("reset cluster", func() { + BeforeEach(func() { + mockHostApi.EXPECT().GetStagesByRole(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + }) + It("reset installation success", func() { + setResetClusterSuccess() + + resetReply := bm.ResetCluster(ctx, installer.ResetClusterParams{ + ClusterID: clusterID, + }) + Expect(resetReply).Should(BeAssignableToTypeOf(installer.NewResetClusterAccepted())) + }) + It("reset cluster conflict", func() { + setResetClusterConflict() + + cancelReply := bm.ResetCluster(ctx, installer.ResetClusterParams{ + ClusterID: clusterID, + }) + + verifyApiError(cancelReply, http.StatusConflict) + }) + It("reset cluster internal error", func() { + setResetClusterInternalServerError() + + cancelReply := bm.ResetCluster(ctx, installer.ResetClusterParams{ + ClusterID: clusterID, + }) + + verifyApiError(cancelReply, http.StatusInternalServerError) + }) + }) + + Context("complete installation", func() { + success := true + errorInfo := "dummy" + It("complete success", func() { + mockClusterApi.EXPECT().CompleteInstallation(ctx, gomock.Any(), success, errorInfo).Return(nil).Times(1) + reply := bm.CompleteInstallation(ctx, installer.CompleteInstallationParams{ + ClusterID: clusterID, + CompletionParams: &models.CompletionParams{ErrorInfo: errorInfo, IsSuccess: &success}, + }) + Expect(reply).Should(BeAssignableToTypeOf(installer.NewCompleteInstallationAccepted())) + }) + It("complete bad request", func() { + mockClusterApi.EXPECT().CompleteInstallation(ctx, gomock.Any(), success, errorInfo).Return(common.NewApiError(http.StatusBadRequest, nil)).Times(1) + + reply := bm.CompleteInstallation(ctx, installer.CompleteInstallationParams{ + ClusterID: clusterID, + CompletionParams: &models.CompletionParams{ErrorInfo: errorInfo, IsSuccess: &success}, + }) + + verifyApiError(reply, http.StatusBadRequest) + }) + }) + + AfterEach(func() { + close(DoneChannel) }) }) +}) + +var _ = Describe("KubeConfig download", func() { + + var ( + bm *bareMetalInventory + cfg Config + db *gorm.DB + ctx = context.Background() + ctrl *gomock.Controller + mockS3Client *awsS3Client.MockS3Client + clusterID strfmt.UUID + c common.Cluster + mockJob *job.MockAPI + clusterApi cluster.API + dbName = "kubeconfig_download" + ) + + BeforeEach(func() { + Expect(envconfig.Process("test", &cfg)).ShouldNot(HaveOccurred()) + ctrl = gomock.NewController(GinkgoT()) + db = common.PrepareTestDB(dbName) + clusterID = strfmt.UUID(uuid.New().String()) + mockS3Client = awsS3Client.NewMockS3Client(ctrl) + mockJob = job.NewMockAPI(ctrl) + clusterApi = cluster.NewManager(cluster.Config{}, getTestLog().WithField("pkg", "cluster-monitor"), + db, nil, nil, nil) + + mockJob.EXPECT().Create(gomock.Any(), gomock.Any()).Return(nil).Times(1) + bm = NewBareMetalInventory(db, getTestLog(), nil, clusterApi, cfg, mockJob, nil, mockS3Client, nil) + c = common.Cluster{Cluster: models.Cluster{ + ID: &clusterID, + APIVip: "10.11.12.13", + }} + err := db.Create(&c).Error + Expect(err).ShouldNot(HaveOccurred()) + }) + AfterEach(func() { ctrl.Finish() - db.Close() + common.DeleteTestDB(db, dbName) + }) + + It("kubeconfig download no cluster id", func() { + clusterId := strToUUID(uuid.New().String()) + generateReply := bm.DownloadClusterKubeconfig(ctx, installer.DownloadClusterKubeconfigParams{ + ClusterID: *clusterId, + }) + Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewDownloadClusterKubeconfigNotFound())) + }) + It("kubeconfig download cluster is not in installed state", func() { + generateReply := bm.DownloadClusterKubeconfig(ctx, installer.DownloadClusterKubeconfigParams{ + ClusterID: clusterID, + }) + Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewDownloadClusterKubeconfigConflict())) + + }) + It("kubeconfig download s3download failure", func() { + status := ClusterStatusInstalled + c.Status = &status + db.Save(&c) + fileName := fmt.Sprintf("%s/%s", clusterID, kubeconfig) + mockS3Client.EXPECT().DownloadFileFromS3(ctx, fileName, "test").Return(nil, int64(0), errors.Errorf("dummy")) + generateReply := bm.DownloadClusterKubeconfig(ctx, installer.DownloadClusterKubeconfigParams{ + ClusterID: clusterID, + }) + Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewDownloadClusterKubeconfigConflict())) + }) + It("kubeconfig download happy flow", func() { + status := ClusterStatusInstalled + c.Status = &status + db.Save(&c) + fileName := fmt.Sprintf("%s/%s", clusterID, kubeconfig) + r := ioutil.NopCloser(bytes.NewReader([]byte("test"))) + mockS3Client.EXPECT().DownloadFileFromS3(ctx, fileName, "test").Return(r, int64(4), nil) + generateReply := bm.DownloadClusterKubeconfig(ctx, installer.DownloadClusterKubeconfigParams{ + ClusterID: clusterID, + }) + Expect(generateReply).Should(Equal(filemiddleware.NewResponder(installer.NewDownloadClusterKubeconfigOK().WithPayload(r), kubeconfig, 4))) }) }) + +var _ = Describe("UploadClusterIngressCert test", func() { + + var ( + bm *bareMetalInventory + cfg Config + db *gorm.DB + ctx = context.Background() + ctrl *gomock.Controller + mockS3Client *awsS3Client.MockS3Client + clusterID strfmt.UUID + c common.Cluster + ingressCa models.IngressCertParams + kubeconfigFile *os.File + kubeconfigNoingress string + kubeconfigObject string + mockJob *job.MockAPI + clusterApi cluster.API + dbName = "upload_cluster_ingress_cert" + ) + + BeforeEach(func() { + Expect(envconfig.Process("test", &cfg)).ShouldNot(HaveOccurred()) + ctrl = gomock.NewController(GinkgoT()) + db = common.PrepareTestDB(dbName) + ingressCa = "-----BEGIN CERTIFICATE-----\nMIIDozCCAougAwIBAgIULCOqWTF" + + "aEA8gNEmV+rb7h1v0r3EwDQYJKoZIhvcNAQELBQAwYTELMAkGA1UEBhMCaXMxCzAJBgNVBAgMAmRk" + + "MQswCQYDVQQHDAJkZDELMAkGA1UECgwCZGQxCzAJBgNVBAsMAmRkMQswCQYDVQQDDAJkZDERMA8GCSqGSIb3DQEJARYCZGQwHhcNMjAwNTI1MTYwNTAwWhcNMzA" + + "wNTIzMTYwNTAwWjBhMQswCQYDVQQGEwJpczELMAkGA1UECAwCZGQxCzAJBgNVBAcMAmRkMQswCQYDVQQKDAJkZDELMAkGA1UECwwCZGQxCzAJBgNVBAMMAmRkMREwDwYJKoZIh" + + "vcNAQkBFgJkZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAML63CXkBb+lvrJKfdfYBHLDYfuaC6exCSqASUAosJWWrfyDiDMUbmfs06PLKyv7N8efDhza74ov0EQJ" + + "NRhMNaCE+A0ceq6ZXmmMswUYFdLAy8K2VMz5mroBFX8sj5PWVr6rDJ2ckBaFKWBB8NFmiK7MTWSIF9n8M107/9a0QURCvThUYu+sguzbsLODFtXUxG5rtTVKBVcPZvEfRky2Tkt4AySFS" + + "mkO6Kf4sBd7MC4mKWZm7K8k7HrZYz2usSpbrEtYGtr6MmN9hci+/ITDPE291DFkzIcDCF493v/3T+7XsnmQajh6kuI+bjIaACfo8N+twEoJf/N1PmphAQdEiC0CAwEAAaNTMFEwHQYDVR0O" + + "BBYEFNvmSprQQ2HUUtPxs6UOuxq9lKKpMB8GA1UdIwQYMBaAFNvmSprQQ2HUUtPxs6UOuxq9lKKpMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJEWxnxtQV5IqPVRr2SM" + + "WNNxcJ7A/wyet39l5VhHjbrQGynk5WS80psn/riLUfIvtzYMWC0IR0pIMQuMDF5sNcKp4D8Xnrd+Bl/4/Iy/iTOoHlw+sPkKv+NL2XR3iO8bSDwjtjvd6L5NkUuzsRoSkQCG2fHASqqgFoyV9Ld" + + "RsQa1w9ZGebtEWLuGsrJtR7gaFECqJnDbb0aPUMixmpMHID8kt154TrLhVFmMEqGGC1GvZVlQ9Of3GP9y7X4vDpHshdlWotOnYKHaeu2d5cRVFHhEbrslkISgh/TRuyl7VIpnjOYUwMBpCiVH6M" + + "2lyDI6UR3Fbz4pVVAxGXnVhBExjBE=\n-----END CERTIFICATE-----" + clusterID = strfmt.UUID(uuid.New().String()) + mockS3Client = awsS3Client.NewMockS3Client(ctrl) + mockJob = job.NewMockAPI(ctrl) + clusterApi = cluster.NewManager(cluster.Config{}, getTestLog().WithField("pkg", "cluster-monitor"), + db, nil, nil, nil) + mockJob.EXPECT().Create(gomock.Any(), gomock.Any()).Return(nil).Times(1) + bm = NewBareMetalInventory(db, getTestLog(), nil, clusterApi, cfg, mockJob, nil, mockS3Client, nil) + c = common.Cluster{Cluster: models.Cluster{ + ID: &clusterID, + APIVip: "10.11.12.13", + }} + kubeconfigNoingress = fmt.Sprintf("%s/%s", clusterID, "kubeconfig-noingress") + kubeconfigObject = fmt.Sprintf("%s/%s", clusterID, kubeconfig) + err := db.Create(&c).Error + Expect(err).ShouldNot(HaveOccurred()) + kubeconfigFile, err = os.Open("../../subsystem/test_kubeconfig") + Expect(err).ShouldNot(HaveOccurred()) + + }) + + AfterEach(func() { + ctrl.Finish() + common.DeleteTestDB(db, dbName) + kubeconfigFile.Close() + }) + + objectExists := func() { + mockS3Client.EXPECT().DoesObjectExist(ctx, kubeconfigObject, "test").Return(false, nil).Times(1) + } + + It("UploadClusterIngressCert no cluster id", func() { + clusterId := strToUUID(uuid.New().String()) + generateReply := bm.UploadClusterIngressCert(ctx, installer.UploadClusterIngressCertParams{ + ClusterID: *clusterId, + IngressCertParams: ingressCa, + }) + Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewUploadClusterIngressCertNotFound())) + }) + It("UploadClusterIngressCert cluster is not in installed state", func() { + generateReply := bm.UploadClusterIngressCert(ctx, installer.UploadClusterIngressCertParams{ + ClusterID: clusterID, + IngressCertParams: ingressCa, + }) + Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewUploadClusterIngressCertBadRequest())) + + }) + It("UploadClusterIngressCert kubeconfig already exists, return ok", func() { + status := models.ClusterStatusFinalizing + c.Status = &status + db.Save(&c) + mockS3Client.EXPECT().DoesObjectExist(ctx, kubeconfigObject, "test").Return(true, nil).Times(1) + generateReply := bm.UploadClusterIngressCert(ctx, installer.UploadClusterIngressCertParams{ + ClusterID: clusterID, + IngressCertParams: ingressCa, + }) + Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewUploadClusterIngressCertCreated())) + }) + It("UploadClusterIngressCert DoesObjectExist fails ", func() { + status := models.ClusterStatusFinalizing + c.Status = &status + db.Save(&c) + mockS3Client.EXPECT().DoesObjectExist(ctx, kubeconfigObject, "test").Return(true, errors.Errorf("dummy")).Times(1) + generateReply := bm.UploadClusterIngressCert(ctx, installer.UploadClusterIngressCertParams{ + ClusterID: clusterID, + IngressCertParams: ingressCa, + }) + Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewUploadClusterIngressCertInternalServerError())) + }) + It("UploadClusterIngressCert s3download failure", func() { + status := models.ClusterStatusFinalizing + c.Status = &status + db.Save(&c) + objectExists() + mockS3Client.EXPECT().DownloadFileFromS3(ctx, kubeconfigNoingress, "test").Return(nil, int64(0), errors.Errorf("dummy")).Times(1) + generateReply := bm.UploadClusterIngressCert(ctx, installer.UploadClusterIngressCertParams{ + ClusterID: clusterID, + IngressCertParams: ingressCa, + }) + Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewUploadClusterIngressCertInternalServerError())) + }) + It("UploadClusterIngressCert bad kubeconfig, mergeIngressCaIntoKubeconfig failure", func() { + status := models.ClusterStatusFinalizing + c.Status = &status + db.Save(&c) + r := ioutil.NopCloser(bytes.NewReader([]byte("test"))) + objectExists() + mockS3Client.EXPECT().DownloadFileFromS3(ctx, kubeconfigNoingress, "test").Return(r, int64(0), nil).Times(1) + generateReply := bm.UploadClusterIngressCert(ctx, installer.UploadClusterIngressCertParams{ + ClusterID: clusterID, + IngressCertParams: ingressCa, + }) + Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewUploadClusterIngressCertInternalServerError())) + }) + It("UploadClusterIngressCert bad ingressCa, mergeIngressCaIntoKubeconfig failure", func() { + status := models.ClusterStatusFinalizing + c.Status = &status + db.Save(&c) + objectExists() + mockS3Client.EXPECT().DownloadFileFromS3(ctx, kubeconfigNoingress, "test").Return(kubeconfigFile, int64(0), nil) + generateReply := bm.UploadClusterIngressCert(ctx, installer.UploadClusterIngressCertParams{ + ClusterID: clusterID, + IngressCertParams: "bad format", + }) + Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewUploadClusterIngressCertInternalServerError())) + }) + + It("UploadClusterIngressCert push fails", func() { + status := models.ClusterStatusFinalizing + c.Status = &status + db.Save(&c) + data, err := os.Open("../../subsystem/test_kubeconfig") + Expect(err).ShouldNot(HaveOccurred()) + kubeConfigAsBytes, err := ioutil.ReadAll(data) + Expect(err).ShouldNot(HaveOccurred()) + log := logrus.New() + merged, err := mergeIngressCaIntoKubeconfig(kubeConfigAsBytes, []byte(ingressCa), log) + Expect(err).ShouldNot(HaveOccurred()) + Expect(merged).ShouldNot(Equal(kubeConfigAsBytes)) + Expect(merged).ShouldNot(Equal([]byte(ingressCa))) + objectExists() + mockS3Client.EXPECT().DownloadFileFromS3(ctx, kubeconfigNoingress, "test").Return(kubeconfigFile, int64(0), nil).Times(1) + mockS3Client.EXPECT().PushDataToS3(ctx, merged, kubeconfigObject, "test").Return(errors.Errorf("Dummy")) + generateReply := bm.UploadClusterIngressCert(ctx, installer.UploadClusterIngressCertParams{ + ClusterID: clusterID, + IngressCertParams: ingressCa, + }) + Expect(generateReply).Should(BeAssignableToTypeOf(installer.NewUploadClusterIngressCertInternalServerError())) + }) + + It("UploadClusterIngressCert download happy flow", func() { + status := models.ClusterStatusInstalled + c.Status = &status + db.Save(&c) + data, err := os.Open("../../subsystem/test_kubeconfig") + Expect(err).ShouldNot(HaveOccurred()) + kubeConfigAsBytes, err := ioutil.ReadAll(data) + Expect(err).ShouldNot(HaveOccurred()) + log := logrus.New() + merged, err := mergeIngressCaIntoKubeconfig(kubeConfigAsBytes, []byte(ingressCa), log) + Expect(err).ShouldNot(HaveOccurred()) + objectExists() + mockS3Client.EXPECT().DownloadFileFromS3(ctx, kubeconfigNoingress, "test").Return(kubeconfigFile, int64(0), nil).Times(1) + mockS3Client.EXPECT().PushDataToS3(ctx, merged, kubeconfigObject, "test").Return(nil) + generateReply := bm.UploadClusterIngressCert(ctx, installer.UploadClusterIngressCertParams{ + ClusterID: clusterID, + IngressCertParams: ingressCa, + }) + Expect(generateReply).Should(Equal(installer.NewUploadClusterIngressCertCreated())) + }) +}) + +func verifyApiError(responder middleware.Responder, expectedHttpStatus int32) { + ExpectWithOffset(1, responder).To(BeAssignableToTypeOf(common.NewApiError(expectedHttpStatus, nil))) + conncreteError := responder.(*common.ApiErrorResponse) + ExpectWithOffset(1, conncreteError.StatusCode()).To(Equal(expectedHttpStatus)) +} diff --git a/internal/cluster/cluster.go b/internal/cluster/cluster.go index 85c221df0..609fee681 100644 --- a/internal/cluster/cluster.go +++ b/internal/cluster/cluster.go @@ -3,16 +3,24 @@ package cluster import ( "context" "fmt" + "net/http" "time" - "github.com/filanov/bm-inventory/internal/events" + "github.com/filanov/bm-inventory/internal/metrics" + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/internal/events" + "github.com/filanov/bm-inventory/internal/host" + "github.com/filanov/bm-inventory/models" + logutil "github.com/filanov/bm-inventory/pkg/log" + "github.com/filanov/bm-inventory/pkg/requestid" + "github.com/filanov/stateswitch" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "github.com/jinzhu/gorm" + "github.com/pkg/errors" "github.com/sirupsen/logrus" - - "github.com/filanov/bm-inventory/models" + "github.com/thoas/go-funk" ) const minHostsNeededForInstallation = 3 @@ -20,22 +28,22 @@ const minHostsNeededForInstallation = 3 //go:generate mockgen -source=cluster.go -package=cluster -destination=mock_cluster_api.go type StateAPI interface { - // Refresh state in case of hosts update7 - RefreshStatus(ctx context.Context, c *models.Cluster, db *gorm.DB) (*UpdateReply, error) + // Refresh state in case of hosts update + RefreshStatus(ctx context.Context, c *common.Cluster, db *gorm.DB) (*common.Cluster, error) } type RegistrationAPI interface { // Register a new cluster - RegisterCluster(ctx context.Context, c *models.Cluster) error + RegisterCluster(ctx context.Context, c *common.Cluster) error //deregister cluster - DeregisterCluster(ctx context.Context, c *models.Cluster) error + DeregisterCluster(ctx context.Context, c *common.Cluster) error } type InstallationAPI interface { // Install cluster - Install(ctx context.Context, c *models.Cluster, db *gorm.DB) error + Install(ctx context.Context, c *common.Cluster, db *gorm.DB) error // Get the cluster master nodes ID's - GetMasterNodesIds(ctx context.Context, c *models.Cluster, db *gorm.DB) ([]*strfmt.UUID, error) + GetMasterNodesIds(ctx context.Context, c *common.Cluster, db *gorm.DB) ([]*strfmt.UUID, error) } type API interface { @@ -43,114 +51,314 @@ type API interface { RegistrationAPI InstallationAPI ClusterMonitoring() + DownloadFiles(c *common.Cluster) (err error) + DownloadKubeconfig(c *common.Cluster) (err error) + GetCredentials(c *common.Cluster) (err error) + UploadIngressCert(c *common.Cluster) (err error) + VerifyClusterUpdatability(c *common.Cluster) (err error) + AcceptRegistration(c *common.Cluster) (err error) + SetGeneratorVersion(c *common.Cluster, version string, db *gorm.DB) error + CancelInstallation(ctx context.Context, c *common.Cluster, reason string, db *gorm.DB) *common.ApiErrorResponse + ResetCluster(ctx context.Context, c *common.Cluster, reason string, db *gorm.DB) *common.ApiErrorResponse + PrepareForInstallation(ctx context.Context, c *common.Cluster, db *gorm.DB) error + HandlePreInstallError(ctx context.Context, c *common.Cluster, err error) + CompleteInstallation(ctx context.Context, c *common.Cluster, successfullyFinished bool, reason string) *common.ApiErrorResponse +} + +type Config struct { + PrepareConfig PrepareConfig } type Manager struct { + Config log logrus.FieldLogger db *gorm.DB insufficient StateAPI ready StateAPI installing StateAPI + finalizing StateAPI installed StateAPI error StateAPI + prepare StateAPI registrationAPI RegistrationAPI installationAPI InstallationAPI eventsHandler events.Handler + sm stateswitch.StateMachine + metricAPI metrics.API } -func NewManager(log logrus.FieldLogger, db *gorm.DB, eventsHandler events.Handler) *Manager { +func NewManager(cfg Config, log logrus.FieldLogger, db *gorm.DB, eventsHandler events.Handler, hostAPI host.API, metricApi metrics.API) *Manager { + th := &transitionHandler{ + log: log, + db: db, + } return &Manager{ log: log, db: db, - insufficient: NewInsufficientState(log, db), + insufficient: NewInsufficientState(log, db, hostAPI), ready: NewReadyState(log, db), installing: NewInstallingState(log, db), + finalizing: NewFinalizingState(log, db), installed: NewInstalledState(log, db), error: NewErrorState(log, db), + prepare: NewPrepareForInstallation(cfg.PrepareConfig, log, db), registrationAPI: NewRegistrar(log, db), installationAPI: NewInstaller(log, db), eventsHandler: eventsHandler, + sm: NewClusterStateMachine(th), + metricAPI: metricApi, } } func (m *Manager) getCurrentState(status string) (StateAPI, error) { switch status { case "": - case clusterStatusInsufficient: + case models.ClusterStatusInsufficient: return m.insufficient, nil - case clusterStatusReady: + case models.ClusterStatusReady: return m.ready, nil - case clusterStatusInstalling: + case models.ClusterStatusInstalling: return m.installing, nil - case clusterStatusInstalled: + case models.ClusterStatusFinalizing: + return m.finalizing, nil + case models.ClusterStatusInstalled: return m.installed, nil - case clusterStatusError: + case models.ClusterStatusError: return m.error, nil + case models.ClusterStatusPreparingForInstallation: + return m.prepare, nil } - return nil, fmt.Errorf("not supported cluster status: %s", status) + return nil, errors.Errorf("not supported cluster status: %s", status) } -func (m *Manager) RegisterCluster(ctx context.Context, c *models.Cluster) error { +func (m *Manager) RegisterCluster(ctx context.Context, c *common.Cluster) error { err := m.registrationAPI.RegisterCluster(ctx, c) - var msg string if err != nil { - msg = fmt.Sprintf("Registration of cluster %s failed. Error: %s", c.ID, err.Error()) + m.eventsHandler.AddEvent(ctx, c.ID.String(), models.EventSeverityError, + fmt.Sprintf("Failed to register cluster with name \"%s\". Error: %s", c.Name, err.Error()), time.Now()) } else { - msg = fmt.Sprintf("Registered cluster %s", c.ID) + m.eventsHandler.AddEvent(ctx, c.ID.String(), models.EventSeverityInfo, + fmt.Sprintf("Registered cluster \"%s\"", c.Name), time.Now()) } - m.eventsHandler.AddEvent(ctx, c.ID.String(), msg, time.Now()) return err } -func (m *Manager) DeregisterCluster(ctx context.Context, c *models.Cluster) error { +func (m *Manager) DeregisterCluster(ctx context.Context, c *common.Cluster) error { err := m.registrationAPI.DeregisterCluster(ctx, c) - var msg string if err != nil { - msg = fmt.Sprintf("Deregistration of cluster %s failed. Error: %s", c.ID, err.Error()) + m.eventsHandler.AddEvent(ctx, c.ID.String(), models.EventSeverityError, + fmt.Sprintf("Failed to deregister cluster. Error: %s", err.Error()), time.Now()) } else { - msg = fmt.Sprintf("Deregistered cluster %s", c.ID) + m.eventsHandler.AddEvent(ctx, c.ID.String(), models.EventSeverityError, "Deregistered cluster", time.Now()) } - m.eventsHandler.AddEvent(ctx, c.ID.String(), msg, time.Now()) return err } -func (m *Manager) RefreshStatus(ctx context.Context, c *models.Cluster, db *gorm.DB) (*UpdateReply, error) { - state, err := m.getCurrentState(swag.StringValue(c.Status)) +func (m *Manager) RefreshStatus(ctx context.Context, c *common.Cluster, db *gorm.DB) (*common.Cluster, error) { + log := logutil.FromContext(ctx, m.log) + + stateBeforeRefresh := swag.StringValue(c.Status) + // get updated cluster info with hosts + var cluster common.Cluster + + if err := db.Preload("Hosts").Take(&cluster, "id = ?", c.ID.String()).Error; err != nil { + return nil, errors.Wrapf(err, "failed to get cluster %s", c.ID.String()) + } + state, err := m.getCurrentState(swag.StringValue(cluster.Status)) if err != nil { return nil, err } - return state.RefreshStatus(ctx, c, db) + + clusterAfterRefresh, err := state.RefreshStatus(ctx, &cluster, db) + //report installation finished metric if needed + reportInstallationCompleteStatuses := []string{models.ClusterStatusInstalled, models.ClusterStatusError} + if err == nil && stateBeforeRefresh != "" && stateBeforeRefresh == models.ClusterStatusInstalling && + funk.ContainsString(reportInstallationCompleteStatuses, swag.StringValue(clusterAfterRefresh.Status)) { + m.metricAPI.ClusterInstallationFinished(log, swag.StringValue(cluster.Status), c.OpenshiftVersion, c.InstallStartedAt) + } + return clusterAfterRefresh, err } -func (m *Manager) Install(ctx context.Context, c *models.Cluster, db *gorm.DB) error { +func (m *Manager) Install(ctx context.Context, c *common.Cluster, db *gorm.DB) error { return m.installationAPI.Install(ctx, c, db) } -func (m *Manager) GetMasterNodesIds(ctx context.Context, c *models.Cluster, db *gorm.DB) ([]*strfmt.UUID, error) { +func (m *Manager) GetMasterNodesIds(ctx context.Context, c *common.Cluster, db *gorm.DB) ([]*strfmt.UUID, error) { return m.installationAPI.GetMasterNodesIds(ctx, c, db) } func (m *Manager) ClusterMonitoring() { - var clusters []*models.Cluster + var ( + clusters []*common.Cluster + clusterAfterRefresh *common.Cluster + requestID = requestid.NewID() + ctx = requestid.ToContext(context.Background(), requestID) + log = requestid.RequestIDLogger(m.log, requestID) + err error + ) - if err := m.db.Find(&clusters).Error; err != nil { - m.log.WithError(err).Errorf("failed to get clusters") + if err = m.db.Find(&clusters).Error; err != nil { + log.WithError(err).Errorf("failed to get clusters") return } for _, cluster := range clusters { - state, err := m.getCurrentState(swag.StringValue(cluster.Status)) - - if err != nil { - m.log.WithError(err).Errorf("failed to get cluster %s currentState", cluster.ID) + if clusterAfterRefresh, err = m.RefreshStatus(ctx, cluster, m.db); err != nil { + log.WithError(err).Errorf("failed to refresh cluster %s state", cluster.ID) continue } - stateReply, err := state.RefreshStatus(context.Background(), cluster, m.db) - if err != nil { - m.log.WithError(err).Errorf("failed to refresh cluster %s state", cluster.ID) - continue - } - if stateReply.IsChanged { - m.log.Infof("cluster %s updated to state %s via monitor", cluster.ID, stateReply.State) + + if swag.StringValue(clusterAfterRefresh.Status) != swag.StringValue(cluster.Status) { + log.Infof("cluster %s updated status from %s to %s via monitor", cluster.ID, + swag.StringValue(cluster.Status), swag.StringValue(clusterAfterRefresh.Status)) } } } + +func (m *Manager) DownloadFiles(c *common.Cluster) (err error) { + clusterStatus := swag.StringValue(c.Status) + allowedStatuses := []string{clusterStatusInstalling, + models.ClusterStatusFinalizing, + clusterStatusInstalled, + clusterStatusError} + if !funk.ContainsString(allowedStatuses, clusterStatus) { + err = errors.Errorf("cluster %s is in %s state, files can be downloaded only when status is one of: %s", + c.ID, clusterStatus, allowedStatuses) + } + return err +} + +func (m *Manager) DownloadKubeconfig(c *common.Cluster) (err error) { + clusterStatus := swag.StringValue(c.Status) + if clusterStatus != clusterStatusInstalled { + err = errors.Errorf("cluster %s is in %s state, %s can be downloaded only in installed state", c.ID, clusterStatus, "kubeconfig") + } + + return err +} +func (m *Manager) GetCredentials(c *common.Cluster) (err error) { + clusterStatus := swag.StringValue(c.Status) + allowedStatuses := []string{clusterStatusInstalling, models.ClusterStatusFinalizing, clusterStatusInstalled} + if !funk.ContainsString(allowedStatuses, clusterStatus) { + err = errors.Errorf("Cluster %s is in %s state, credentials are available only in installing or installed state", c.ID, clusterStatus) + } + + return err +} + +func (m *Manager) UploadIngressCert(c *common.Cluster) (err error) { + clusterStatus := swag.StringValue(c.Status) + allowedStatuses := []string{models.ClusterStatusFinalizing, clusterStatusInstalled} + if !funk.ContainsString(allowedStatuses, clusterStatus) { + err = errors.Errorf("Cluster %s is in %s state, upload ingress ca can be done only in %s or %s state", c.ID, clusterStatus, models.ClusterStatusFinalizing, clusterStatusInstalled) + } + return err +} + +func (m *Manager) AcceptRegistration(c *common.Cluster) (err error) { + clusterStatus := swag.StringValue(c.Status) + allowedStatuses := []string{clusterStatusInsufficient, clusterStatusReady} + if !funk.ContainsString(allowedStatuses, clusterStatus) { + err = errors.Errorf("Cluster %s is in %s state, host can register only in one of %s", c.ID, clusterStatus, allowedStatuses) + } + return err +} + +func (m *Manager) VerifyClusterUpdatability(c *common.Cluster) (err error) { + clusterStatus := swag.StringValue(c.Status) + allowedStatuses := []string{clusterStatusInsufficient, clusterStatusReady} + if !funk.ContainsString(allowedStatuses, clusterStatus) { + err = errors.Errorf("Cluster %s is in %s state, cluster can be updated only in one of %s", c.ID, clusterStatus, allowedStatuses) + } + return err +} + +func (m *Manager) SetGeneratorVersion(c *common.Cluster, version string, db *gorm.DB) error { + return db.Model(&common.Cluster{}).Where("id = ?", c.ID.String()). + Update("ignition_generator_version", version).Error +} + +func (m *Manager) CancelInstallation(ctx context.Context, c *common.Cluster, reason string, db *gorm.DB) *common.ApiErrorResponse { + log := logutil.FromContext(ctx, m.log) + + eventSeverity := models.EventSeverityInfo + eventInfo := "Canceled cluster installation" + defer func() { + m.eventsHandler.AddEvent(ctx, c.ID.String(), eventSeverity, eventInfo, time.Now()) + }() + + err := m.sm.Run(TransitionTypeCancelInstallation, newStateCluster(c), &TransitionArgsCancelInstallation{ + ctx: ctx, + reason: reason, + db: db, + }) + if err != nil { + eventSeverity = models.EventSeverityError + eventInfo = fmt.Sprintf("Failed to cancel installation: %s", err.Error()) + return common.NewApiError(http.StatusConflict, err) + } + //report installation finished metric + m.metricAPI.ClusterInstallationFinished(log, "canceled", c.OpenshiftVersion, c.InstallStartedAt) + return nil +} + +func (m *Manager) ResetCluster(ctx context.Context, c *common.Cluster, reason string, db *gorm.DB) *common.ApiErrorResponse { + eventSeverity := models.EventSeverityInfo + eventInfo := "Reset cluster installation" + defer func() { + m.eventsHandler.AddEvent(ctx, c.ID.String(), eventSeverity, eventInfo, time.Now()) + }() + + err := m.sm.Run(TransitionTypeResetCluster, newStateCluster(c), &TransitionArgsResetCluster{ + ctx: ctx, + reason: reason, + db: db, + }) + if err != nil { + eventSeverity = models.EventSeverityError + eventInfo = fmt.Sprintf("Failed to reset installation. Error: %s", err.Error()) + return common.NewApiError(http.StatusConflict, err) + } + return nil +} + +func (m *Manager) CompleteInstallation(ctx context.Context, c *common.Cluster, successfullyFinished bool, reason string) *common.ApiErrorResponse { + log := logutil.FromContext(ctx, m.log) + + err := m.sm.Run(TransitionTypeCompleteInstallation, newStateCluster(c), &TransitionArgsCompleteInstallation{ + ctx: ctx, + isSuccess: successfullyFinished, + reason: reason, + }) + if err != nil { + return common.NewApiError(http.StatusConflict, err) + } + result := models.ClusterStatusInstalled + if !successfullyFinished { + result = models.ClusterStatusError + } + m.metricAPI.ClusterInstallationFinished(log, result, c.OpenshiftVersion, c.InstallStartedAt) + return nil +} + +func (m *Manager) PrepareForInstallation(ctx context.Context, c *common.Cluster, db *gorm.DB) error { + err := m.sm.Run(TransitionTypePrepareForInstallation, newStateCluster(c), + &TransitionArgsPrepareForInstallation{ + ctx: ctx, + db: db, + }, + ) + return err +} + +func (m *Manager) HandlePreInstallError(ctx context.Context, c *common.Cluster, installErr error) { + log := logutil.FromContext(ctx, m.log) + err := m.sm.Run(TransitionTypeHandlePreInstallationError, newStateCluster(c), &TransitionArgsHandlePreInstallationError{ + ctx: ctx, + installErr: installErr, + }) + if err != nil { + log.WithError(err).Errorf("Failed to handle pre installation error for cluster %s", c.ID.String()) + } else { + log.Infof("Successfully handled pre-installation error, cluster %s", c.ID.String()) + } +} diff --git a/internal/cluster/cluster_suite_test.go b/internal/cluster/cluster_suite_test.go new file mode 100644 index 000000000..fcbf39e6d --- /dev/null +++ b/internal/cluster/cluster_suite_test.go @@ -0,0 +1,17 @@ +package cluster_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/filanov/bm-inventory/internal/common" +) + +func TestCluster(t *testing.T) { + RegisterFailHandler(Fail) + common.InitializeDBTest() + defer common.TerminateDBTest() + RunSpecs(t, "cluster tests") +} diff --git a/internal/cluster/cluster_test.go b/internal/cluster/cluster_test.go index 7c225db06..7a712f519 100644 --- a/internal/cluster/cluster_test.go +++ b/internal/cluster/cluster_test.go @@ -3,55 +3,68 @@ package cluster import ( "context" "io/ioutil" - "testing" + "net/http" + "time" + + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/internal/events" + "github.com/filanov/bm-inventory/internal/host" + "github.com/filanov/bm-inventory/internal/metrics" + "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" + "github.com/golang/mock/gomock" "github.com/google/uuid" - "github.com/jinzhu/gorm" - _ "github.com/jinzhu/gorm/dialects/sqlite" + _ "github.com/jinzhu/gorm/dialects/postgres" "github.com/sirupsen/logrus" - "github.com/filanov/bm-inventory/models" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/pkg/errors" ) +var defaultTestConfig = Config{ + PrepareConfig: PrepareConfig{ + InstallationTimeout: 10 * time.Minute, + }, +} + var _ = Describe("stateMachine", func() { var ( - ctx = context.Background() - db *gorm.DB - state API - cluster models.Cluster - stateReply *UpdateReply - stateErr error + ctx = context.Background() + db *gorm.DB + state API + cluster *common.Cluster + refreshedCluster *common.Cluster + stateErr error + dbName = "state_machine" ) BeforeEach(func() { - db = prepareDB() - state = NewManager(getTestLog(), db, nil) + db = common.PrepareTestDB(dbName, &events.Event{}) + state = NewManager(defaultTestConfig, getTestLog(), db, nil, nil, nil) id := strfmt.UUID(uuid.New().String()) - cluster = models.Cluster{ + cluster = &common.Cluster{Cluster: models.Cluster{ ID: &id, Status: swag.String("not a known state"), - } + }} Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred()) }) Context("unknown_cluster_state", func() { It("update_cluster", func() { - stateReply, stateErr = state.RefreshStatus(ctx, &cluster, db) + refreshedCluster, stateErr = state.RefreshStatus(ctx, cluster, db) }) It("install_cluster", func() { - stateErr = state.Install(ctx, &cluster, db) + stateErr = state.Install(ctx, cluster, db) }) AfterEach(func() { - db.Close() - Expect(stateReply).To(BeNil()) + common.DeleteTestDB(db, dbName) + Expect(refreshedCluster).To(BeNil()) Expect(stateErr).Should(HaveOccurred()) }) }) @@ -71,112 +84,122 @@ insufficient -> known var _ = Describe("cluster monitor", func() { var ( //ctx = context.Background() - db *gorm.DB - c models.Cluster - id strfmt.UUID - err error - clusterApi *Manager + db *gorm.DB + c common.Cluster + id strfmt.UUID + err error + clusterApi *Manager + shouldHaveUpdated bool + expectedState string + ctrl *gomock.Controller + mockHostAPI *host.MockAPI + mockMetric *metrics.MockAPI + dbName = "cluster_monitor" ) BeforeEach(func() { - db = prepareDB() + db = common.PrepareTestDB(dbName, &events.Event{}) id = strfmt.UUID(uuid.New().String()) - clusterApi = NewManager(getTestLog().WithField("pkg", "cluster-monitor"), db, nil) + ctrl = gomock.NewController(GinkgoT()) + mockHostAPI = host.NewMockAPI(ctrl) + mockMetric = metrics.NewMockAPI(ctrl) + clusterApi = NewManager(defaultTestConfig, getTestLog().WithField("pkg", "cluster-monitor"), db, + nil, mockHostAPI, mockMetric) + expectedState = "" + shouldHaveUpdated = false }) Context("from installing state", func() { BeforeEach(func() { - c = models.Cluster{ + c = common.Cluster{Cluster: models.Cluster{ ID: &id, Status: swag.String("installing"), - } + }} Expect(db.Create(&c).Error).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred()) + mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() }) It("installing -> installing", func() { createHost(id, "installing", db) createHost(id, "installing", db) createHost(id, "installing", db) - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("installing"))) + shouldHaveUpdated = false + expectedState = "installing" }) It("installing -> installing (some hosts are installed)", func() { createHost(id, "installing", db) createHost(id, "installed", db) createHost(id, "installed", db) - - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("installing"))) + shouldHaveUpdated = false + expectedState = "installing" }) It("installing -> installing (including installing-in-progress)", func() { createHost(id, "installing-in-progress", db) createHost(id, "installing-in-progress", db) createHost(id, "installing-in-progress", db) - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("installing"))) + shouldHaveUpdated = false + expectedState = "installing" }) It("installing -> installing (including installing-in-progress)", func() { createHost(id, "installing-in-progress", db) createHost(id, "installing-in-progress", db) createHost(id, "installing", db) - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("installing"))) + shouldHaveUpdated = false + expectedState = "installing" }) - It("installing -> installed", func() { + It("installing -> finalizing", func() { createHost(id, "installed", db) createHost(id, "installed", db) createHost(id, "installed", db) - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("installed"))) + shouldHaveUpdated = true + expectedState = models.ClusterStatusFinalizing }) It("installing -> error", func() { + mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), "error", gomock.Any(), gomock.Any()).AnyTimes() createHost(id, "error", db) createHost(id, "installed", db) createHost(id, "installed", db) - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("error"))) + shouldHaveUpdated = true + expectedState = "error" }) It("installing -> error", func() { + mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), "error", gomock.Any(), gomock.Any()).AnyTimes() createHost(id, "installed", db) createHost(id, "installed", db) - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("error"))) + shouldHaveUpdated = true + expectedState = "error" }) It("installing -> error insufficient hosts", func() { + mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), "error", gomock.Any(), gomock.Any()).AnyTimes() createHost(id, "installing", db) createHost(id, "installed", db) + shouldHaveUpdated = true + expectedState = "error" - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("error"))) }) - }) + mockHostAPIIsRequireUserActionResetFalse := func(times int) { + mockHostAPI.EXPECT().IsRequireUserActionReset(gomock.Any()).Return(false).Times(times) + } + Context("ghost hosts", func() { Context("from insufficient state", func() { BeforeEach(func() { - c = models.Cluster{ + c = common.Cluster{Cluster: models.Cluster{ ID: &id, Status: swag.String("insufficient"), - } + }} Expect(db.Create(&c).Error).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred()) @@ -184,42 +207,64 @@ var _ = Describe("cluster monitor", func() { It("insufficient -> insufficient", func() { createHost(id, "known", db) - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("insufficient"))) + mockHostAPIIsRequireUserActionResetFalse(1) + + shouldHaveUpdated = false + expectedState = "insufficient" + }) + It("insufficient -> insufficient", func() { + createHost(id, "known", db) + createHost(id, "known", db) + createHost(id, "known", db) + mockHostAPIIsRequireUserActionResetFalse(3) + shouldHaveUpdated = false + expectedState = "insufficient" }) It("insufficient -> ready", func() { createHost(id, "known", db) createHost(id, "known", db) createHost(id, "known", db) - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("ready"))) + mockHostAPIIsRequireUserActionResetFalse(3) + + shouldHaveUpdated = true + expectedState = "ready" + Expect(db.Model(&c).Updates(map[string]interface{}{"api_vip": "1.2.3.5", "ingress_vip": "1.2.3.5"}).Error).To(Not(HaveOccurred())) }) It("insufficient -> insufficient including hosts in discovering", func() { createHost(id, "known", db) createHost(id, "known", db) createHost(id, "discovering", db) - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("insufficient"))) + mockHostAPIIsRequireUserActionResetFalse(3) + + shouldHaveUpdated = false + expectedState = "insufficient" }) It("insufficient -> insufficient including hosts in error", func() { createHost(id, "known", db) createHost(id, "known", db) createHost(id, "error", db) - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("insufficient"))) + mockHostAPIIsRequireUserActionResetFalse(3) + + shouldHaveUpdated = false + expectedState = "insufficient" + }) + It("insufficient -> insufficient including hosts in disabled", func() { + createHost(id, "known", db) + createHost(id, "known", db) + createHost(id, "disabled", db) + mockHostAPIIsRequireUserActionResetFalse(3) + + shouldHaveUpdated = false + expectedState = "insufficient" }) }) Context("from ready state", func() { BeforeEach(func() { - c = models.Cluster{ + c = common.Cluster{Cluster: models.Cluster{ ID: &id, Status: swag.String("ready"), - } + }} Expect(db.Create(&c).Error).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred()) @@ -229,41 +274,322 @@ var _ = Describe("cluster monitor", func() { createHost(id, "known", db) createHost(id, "known", db) createHost(id, "known", db) - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("ready"))) + + shouldHaveUpdated = false + expectedState = "ready" }) It("ready -> insufficient", func() { createHost(id, "known", db) createHost(id, "known", db) - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("insufficient"))) + + shouldHaveUpdated = true + expectedState = "insufficient" }) It("ready -> insufficient one host is discovering", func() { createHost(id, "known", db) createHost(id, "known", db) createHost(id, "discovering", db) - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("insufficient"))) + + shouldHaveUpdated = true + expectedState = "insufficient" }) It("ready -> insufficient including hosts in error", func() { createHost(id, "known", db) createHost(id, "known", db) createHost(id, "error", db) - clusterApi.ClusterMonitoring() - c = geCluster(id, db) - Expect(c.Status).Should(Equal(swag.String("insufficient"))) + + shouldHaveUpdated = true + expectedState = "insufficient" + }) + It("ready -> insufficient including hosts in disabled", func() { + createHost(id, "known", db) + createHost(id, "known", db) + createHost(id, "disabled", db) + + shouldHaveUpdated = true + expectedState = "insufficient" }) }) }) AfterEach(func() { - db.Close() + before := time.Now().Truncate(10 * time.Millisecond) + c = geCluster(id, db) + saveUpdatedTime := c.StatusUpdatedAt + saveStatusInfo := c.StatusInfo + clusterApi.ClusterMonitoring() + after := time.Now().Truncate(10 * time.Millisecond) + c = geCluster(id, db) + Expect(swag.StringValue(c.Status)).Should(Equal(expectedState)) + if shouldHaveUpdated { + Expect(c.StatusInfo).ShouldNot(BeNil()) + updateTime := time.Time(c.StatusUpdatedAt).Truncate(10 * time.Millisecond) + Expect(updateTime).Should(BeTemporally(">=", before)) + Expect(updateTime).Should(BeTemporally("<=", after)) + } else { + Expect(c.StatusUpdatedAt).Should(Equal(saveUpdatedTime)) + Expect(c.StatusInfo).Should(Equal(saveStatusInfo)) + } + + common.DeleteTestDB(db, dbName) + ctrl.Finish() + }) + +}) + +var _ = Describe("VerifyRegisterHost", func() { + var ( + db *gorm.DB + id strfmt.UUID + clusterApi *Manager + errTemplate = "Cluster %s is in %s state, host can register only in one of [insufficient ready]" + dbName = "verify_register_host" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + id = strfmt.UUID(uuid.New().String()) + clusterApi = NewManager(defaultTestConfig, getTestLog().WithField("pkg", "cluster-monitor"), db, + nil, nil, nil) + }) + + checkVerifyRegisterHost := func(clusterStatus string, expectErr bool) { + cluster := common.Cluster{Cluster: models.Cluster{ID: &id, Status: swag.String(clusterStatus)}} + Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred()) + cluster = geCluster(id, db) + err := clusterApi.AcceptRegistration(&cluster) + if expectErr { + Expect(err.Error()).Should(Equal(errors.Errorf(errTemplate, id, clusterStatus).Error())) + } else { + Expect(err).Should(BeNil()) + } + } + It("Register host while cluster in ready state", func() { + checkVerifyRegisterHost(clusterStatusReady, false) + }) + It("Register host while cluster in insufficient state", func() { + checkVerifyRegisterHost(clusterStatusInsufficient, false) + }) + It("Register host while cluster in installing state", func() { + checkVerifyRegisterHost(clusterStatusInstalling, true) + }) + It("Register host while cluster in installing state", func() { + checkVerifyRegisterHost(models.ClusterStatusFinalizing, true) + }) + It("Register host while cluster in error state", func() { + checkVerifyRegisterHost(clusterStatusError, true) + }) + + It("Register host while cluster in installed state", func() { + checkVerifyRegisterHost(clusterStatusInstalled, true) + }) + AfterEach(func() { + common.DeleteTestDB(db, dbName) }) +}) +var _ = Describe("VerifyClusterUpdatability", func() { + var ( + db *gorm.DB + id strfmt.UUID + clusterApi *Manager + errTemplate = "Cluster %s is in %s state, cluster can be updated only in one of [insufficient ready]" + dbName = "verify_cluster_updatability" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + id = strfmt.UUID(uuid.New().String()) + clusterApi = NewManager(defaultTestConfig, getTestLog().WithField("pkg", "cluster-monitor"), db, + nil, nil, nil) + }) + + checkVerifyClusterUpdatability := func(clusterStatus string, expectErr bool) { + cluster := common.Cluster{Cluster: models.Cluster{ID: &id, Status: swag.String(clusterStatus)}} + Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred()) + cluster = geCluster(id, db) + err := clusterApi.VerifyClusterUpdatability(&cluster) + if expectErr { + Expect(err.Error()).Should(Equal(errors.Errorf(errTemplate, id, clusterStatus).Error())) + } else { + Expect(err).Should(BeNil()) + } + } + It("Update cluster while insufficient", func() { + checkVerifyClusterUpdatability(clusterStatusInsufficient, false) + }) + It("Update cluster while ready", func() { + checkVerifyClusterUpdatability(clusterStatusReady, false) + }) + It("Update cluster while installing", func() { + checkVerifyClusterUpdatability(clusterStatusInstalling, true) + }) + It("Update cluster while installed", func() { + checkVerifyClusterUpdatability(clusterStatusInstalled, true) + }) + It("Update cluster while error", func() { + checkVerifyClusterUpdatability(clusterStatusError, true) + }) + + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) +}) + +var _ = Describe("SetGeneratorVersion", func() { + var ( + db *gorm.DB + id strfmt.UUID + clusterApi *Manager + dbName = "set_generator_version" + ) + + It("set generator version", func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + id = strfmt.UUID(uuid.New().String()) + clusterApi = NewManager(defaultTestConfig, getTestLog().WithField("pkg", "cluster-monitor"), db, + nil, nil, nil) + cluster := common.Cluster{Cluster: models.Cluster{ID: &id, Status: swag.String(clusterStatusReady)}} + Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred()) + cluster = geCluster(id, db) + Expect(clusterApi.SetGeneratorVersion(&cluster, "v1", db)).ShouldNot(HaveOccurred()) + cluster = geCluster(id, db) + Expect(cluster.IgnitionGeneratorVersion).To(Equal("v1")) + }) + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) +}) + +var _ = Describe("CancelInstallation", func() { + var ( + ctx = context.Background() + db *gorm.DB + state API + c common.Cluster + eventsHandler events.Handler + ctrl *gomock.Controller + mockMetric *metrics.MockAPI + dbName = "cluster_cancel_installation" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + eventsHandler = events.New(db, logrus.New()) + ctrl = gomock.NewController(GinkgoT()) + mockMetric = metrics.NewMockAPI(ctrl) + state = NewManager(defaultTestConfig, getTestLog(), db, eventsHandler, nil, mockMetric) + id := strfmt.UUID(uuid.New().String()) + c = common.Cluster{Cluster: models.Cluster{ + ID: &id, + Status: swag.String(clusterStatusInsufficient), + }} + }) + + Context("cancel_installation", func() { + It("cancel_installation", func() { + c.Status = swag.String(clusterStatusInstalling) + c.InstallStartedAt = strfmt.DateTime(time.Now().Add(-time.Minute)) + Expect(db.Create(&c).Error).ShouldNot(HaveOccurred()) + mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), "canceled", c.OpenshiftVersion, c.InstallStartedAt) + Expect(state.CancelInstallation(ctx, &c, "some reason", db)).ShouldNot(HaveOccurred()) + events, err := eventsHandler.GetEvents(c.ID.String()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(events)).ShouldNot(Equal(0)) + cancelEvent := events[len(events)-1] + Expect(*cancelEvent.Severity).Should(Equal(models.EventSeverityInfo)) + Expect(*cancelEvent.Message).Should(Equal("Canceled cluster installation")) + }) + It("cancel_failed_installation", func() { + c.Status = swag.String(clusterStatusError) + c.InstallStartedAt = strfmt.DateTime(time.Now().Add(-time.Minute)) + Expect(db.Create(&c).Error).ShouldNot(HaveOccurred()) + mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), "canceled", c.OpenshiftVersion, c.InstallStartedAt) + Expect(state.CancelInstallation(ctx, &c, "some reason", db)).ShouldNot(HaveOccurred()) + events, err := eventsHandler.GetEvents(c.ID.String()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(events)).ShouldNot(Equal(0)) + cancelEvent := events[len(events)-1] + Expect(*cancelEvent.Severity).Should(Equal(models.EventSeverityInfo)) + Expect(*cancelEvent.Message).Should(Equal("Canceled cluster installation")) + }) + + AfterEach(func() { + db.First(&c, "id = ?", c.ID) + Expect(swag.StringValue(c.Status)).Should(Equal(clusterStatusError)) + }) + }) + + Context("invalid_cancel_installation", func() { + It("nothing_to_cancel", func() { + Expect(state.CancelInstallation(ctx, &c, "some reason", db)).Should(HaveOccurred()) + events, err := eventsHandler.GetEvents(c.ID.String()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(events)).ShouldNot(Equal(0)) + cancelEvent := events[len(events)-1] + Expect(*cancelEvent.Severity).Should(Equal(models.EventSeverityError)) + }) + }) + + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) +}) + +var _ = Describe("ResetCluster", func() { + var ( + ctx = context.Background() + db *gorm.DB + state API + c common.Cluster + eventsHandler events.Handler + dbName = "reset_cluster" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + eventsHandler = events.New(db, logrus.New()) + state = NewManager(defaultTestConfig, getTestLog(), db, eventsHandler, nil, nil) + }) + + It("reset_cluster", func() { + id := strfmt.UUID(uuid.New().String()) + c = common.Cluster{Cluster: models.Cluster{ + ID: &id, + Status: swag.String(clusterStatusError), + }} + Expect(db.Create(&c).Error).ShouldNot(HaveOccurred()) + Expect(state.ResetCluster(ctx, &c, "some reason", db)).ShouldNot(HaveOccurred()) + db.First(&c, "id = ?", c.ID) + Expect(swag.StringValue(c.Status)).Should(Equal(clusterStatusInsufficient)) + events, err := eventsHandler.GetEvents(c.ID.String()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(events)).ShouldNot(Equal(0)) + resetEvent := events[len(events)-1] + Expect(*resetEvent.Severity).Should(Equal(models.EventSeverityInfo)) + Expect(*resetEvent.Message).Should(Equal("Reset cluster installation")) + }) + + It("reset cluster conflict", func() { + id := strfmt.UUID(uuid.New().String()) + c = common.Cluster{Cluster: models.Cluster{ + ID: &id, + Status: swag.String(clusterStatusReady), + }} + Expect(db.Create(&c).Error).ShouldNot(HaveOccurred()) + reply := state.ResetCluster(ctx, &c, "some reason", db) + Expect(int(reply.StatusCode())).Should(Equal(http.StatusConflict)) + events, err := eventsHandler.GetEvents(c.ID.String()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(events)).ShouldNot(Equal(0)) + resetEvent := events[len(events)-1] + Expect(*resetEvent.Severity).Should(Equal(models.EventSeverityError)) + }) + + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) }) func createHost(clusterId strfmt.UUID, state string, db *gorm.DB) { @@ -271,33 +597,20 @@ func createHost(clusterId strfmt.UUID, state string, db *gorm.DB) { host := models.Host{ ID: &hostId, ClusterID: clusterId, - Role: "master", + Role: models.HostRoleMaster, Status: swag.String(state), } Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) } -func prepareDB() *gorm.DB { - db, err := gorm.Open("sqlite3", ":memory:") - Expect(err).ShouldNot(HaveOccurred()) - db.AutoMigrate(&models.Cluster{}) - db.AutoMigrate(&models.Host{}) - return db -} - -func Test(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "cluster state machine tests") -} - func getTestLog() logrus.FieldLogger { l := logrus.New() l.SetOutput(ioutil.Discard) return l } -func geCluster(clusterId strfmt.UUID, db *gorm.DB) models.Cluster { - var cluster models.Cluster +func geCluster(clusterId strfmt.UUID, db *gorm.DB) common.Cluster { + var cluster common.Cluster Expect(db.Preload("Hosts").First(&cluster, "id = ?", clusterId).Error).ShouldNot(HaveOccurred()) return cluster } @@ -309,10 +622,174 @@ func addInstallationRequirements(clusterId strfmt.UUID, db *gorm.DB) { host = models.Host{ ID: &hostId, ClusterID: clusterId, - Role: "master", + Role: models.HostRoleMaster, Status: swag.String("known"), } Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) } + Expect(db.Model(&common.Cluster{Cluster: models.Cluster{ID: &clusterId}}).Updates(map[string]interface{}{"api_vip": "1.2.3.5", "ingress_vip": "1.2.3.5"}).Error).To(Not(HaveOccurred())) + } + +var _ = Describe("PrepareForInstallation", func() { + var ( + ctx = context.Background() + capi API + db *gorm.DB + clusterId strfmt.UUID + dbName = "cluster_prepare_for_installation" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + capi = NewManager(defaultTestConfig, getTestLog(), db, nil, nil, nil) + clusterId = strfmt.UUID(uuid.New().String()) + }) + + // state changes to preparing-for-installation + success := func(cluster *common.Cluster) { + Expect(capi.PrepareForInstallation(ctx, cluster, db)).NotTo(HaveOccurred()) + Expect(db.Take(cluster, "id = ?", clusterId).Error).NotTo(HaveOccurred()) + Expect(swag.StringValue(cluster.Status)).To(Equal(models.ClusterStatusPreparingForInstallation)) + } + + // status should not change + failure := func(cluster *common.Cluster) { + src := swag.StringValue(cluster.Status) + Expect(capi.PrepareForInstallation(ctx, cluster, db)).To(HaveOccurred()) + Expect(db.Take(cluster, "id = ?", clusterId).Error).NotTo(HaveOccurred()) + Expect(swag.StringValue(cluster.Status)).Should(Equal(src)) + } + + tests := []struct { + name string + srcState string + validation func(cluster *common.Cluster) + }{ + { + name: "success from ready", + srcState: models.ClusterStatusReady, + validation: success, + }, + { + name: "already prepared for installation - should fail", + srcState: models.ClusterStatusPreparingForInstallation, + validation: failure, + }, + { + name: "insufficient - should fail", + srcState: models.ClusterStatusInsufficient, + validation: failure, + }, + { + name: "installing - should fail", + srcState: models.ClusterStatusInstalling, + validation: failure, + }, + { + name: "error - should fail", + srcState: models.ClusterStatusError, + validation: failure, + }, + { + name: "installed - should fail", + srcState: models.ClusterStatusInstalled, + validation: failure, + }, + } + + for i := range tests { + t := tests[i] + It(t.name, func() { + cluster := common.Cluster{Cluster: models.Cluster{ID: &clusterId, Status: swag.String(t.srcState)}} + Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred()) + Expect(db.Take(&cluster, "id = ?", clusterId).Error).ShouldNot(HaveOccurred()) + t.validation(&cluster) + }) + } + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) +}) + +var _ = Describe("HandlePreInstallationError", func() { + var ( + ctx = context.Background() + capi API + db *gorm.DB + clusterId strfmt.UUID + dbName = "handle_preInstallation_error" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + capi = NewManager(defaultTestConfig, getTestLog(), db, nil, nil, nil) + clusterId = strfmt.UUID(uuid.New().String()) + }) + + // state changes to error + success := func(cluster *common.Cluster) { + capi.HandlePreInstallError(ctx, cluster, errors.Errorf("pre-install error")) + Expect(db.Take(cluster, "id = ?", clusterId).Error).NotTo(HaveOccurred()) + Expect(swag.StringValue(cluster.Status)).To(Equal(models.ClusterStatusError)) + } + + // status should not change + failure := func(cluster *common.Cluster) { + src := swag.StringValue(cluster.Status) + capi.HandlePreInstallError(ctx, cluster, errors.Errorf("pre-install error")) + Expect(db.Take(cluster, "id = ?", clusterId).Error).NotTo(HaveOccurred()) + Expect(swag.StringValue(cluster.Status)).Should(Equal(src)) + } + + tests := []struct { + name string + srcState string + validation func(cluster *common.Cluster) + }{ + { + name: "success", + srcState: models.ClusterStatusPreparingForInstallation, + validation: success, + }, + { + name: "ready - should fail", + srcState: models.ClusterStatusReady, + validation: failure, + }, + { + name: "insufficient - should fail", + srcState: models.ClusterStatusInsufficient, + validation: failure, + }, + { + name: "installing - should fail", + srcState: models.ClusterStatusInstalling, + validation: failure, + }, + { + name: "error - success", + srcState: models.ClusterStatusError, + validation: success, + }, + { + name: "installed - should fail", + srcState: models.ClusterStatusInstalled, + validation: failure, + }, + } + + for i := range tests { + t := tests[i] + It(t.name, func() { + cluster := common.Cluster{Cluster: models.Cluster{ID: &clusterId, Status: swag.String(t.srcState)}} + Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred()) + Expect(db.Take(&cluster, "id = ?", clusterId).Error).ShouldNot(HaveOccurred()) + t.validation(&cluster) + }) + } + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) +}) diff --git a/internal/cluster/common.go b/internal/cluster/common.go index fd540ad38..c2e018db6 100644 --- a/internal/cluster/common.go +++ b/internal/cluster/common.go @@ -1,79 +1,114 @@ package cluster import ( + "time" + + "github.com/filanov/bm-inventory/internal/common" "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "github.com/jinzhu/gorm" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "github.com/thoas/go-funk" ) const ( - clusterStatusInsufficient = "insufficient" - clusterStatusReady = "ready" - clusterStatusInstalling = "installing" - clusterStatusInstalled = "installed" - clusterStatusError = "error" + clusterStatusInsufficient = "insufficient" + clusterStatusReady = "ready" + clusterStatusPrepareForInstallation = "preparing-for-installation" + clusterStatusInstalling = "installing" + clusterStatusInstalled = "installed" + clusterStatusError = "error" ) const ( - statusInfoReady = "Cluster ready to be installed" - statusInfoInsufficient = "cluster is insufficient, exactly 3 known master hosts are needed for installation" - statusInfoInstalling = "Installation in progress" - statusInfoInstalled = "installed" + statusInfoReady = "Cluster ready to be installed" + statusInfoInsufficient = "cluster is insufficient, exactly 3 known master hosts are needed for installation" + statusInfoInstalling = "Installation in progress" + statusInfoFinalizing = "Finalizing cluster installation" + statusInfoInstalled = "installed" + statusInfoPreparingForInstallation = "Preparing cluster for installation" + statusInfoPreparingForInstallationTimeout = "Preparing cluster for installation timeout" ) -type UpdateReply struct { - State string - IsChanged bool -} - type baseState struct { //TODO remove when res: https://github.com/golangci/golangci-lint/issues/537 log logrus.FieldLogger //nolint:structcheck db *gorm.DB //nolint:structcheck } -func updateState(state string, statusInfo string, c *models.Cluster, db *gorm.DB, log logrus.FieldLogger) (*UpdateReply, error) { - updates := map[string]interface{}{"status": state, "status_info": statusInfo} - dbReply := db.Model(&models.Cluster{}).Where("id = ? and status = ?", - c.ID.String(), swag.StringValue(c.Status)).Updates(updates) - if dbReply.Error != nil { - return nil, errors.Wrapf(dbReply.Error, "failed to update cluster %s state from %s to %s", - c.ID.String(), swag.StringValue(c.Status), state) +func updateClusterStatus(log logrus.FieldLogger, db *gorm.DB, clusterId strfmt.UUID, srcStatus string, + newStatus string, statusInfo string, extra ...interface{}) (*common.Cluster, error) { + var cluster *common.Cluster + var err error + + extra = append(append(make([]interface{}, 0), "status", newStatus, "status_info", statusInfo), extra...) + + if newStatus != srcStatus { + extra = append(extra, "status_updated_at", strfmt.DateTime(time.Now())) + } + + if cluster, err = UpdateCluster(log, db, clusterId, srcStatus, extra...); err != nil || + swag.StringValue(cluster.Status) != newStatus { + return nil, errors.Wrapf(err, "failed to update cluster %s state from %s to %s", + clusterId, srcStatus, newStatus) + } + + return cluster, nil +} + +func UpdateCluster(log logrus.FieldLogger, db *gorm.DB, clusterId strfmt.UUID, srcStatus string, extra ...interface{}) (*common.Cluster, error) { + updates := make(map[string]interface{}) + + if len(extra)%2 != 0 { + return nil, errors.Errorf("invalid update extra parameters %+v", extra) + } + for i := 0; i < len(extra); i += 2 { + updates[extra[i].(string)] = extra[i+1] + } + + // Query by + // Status is required as well to avoid races between different components. + dbReply := db.Model(&common.Cluster{}).Where("id = ? and status = ?", clusterId, srcStatus).Updates(updates) + + if dbReply.Error != nil || dbReply.RowsAffected == 0 { + return nil, errors.Errorf("failed to update cluster %s. nothing have changed", clusterId) } - if dbReply.RowsAffected == 0 { - return nil, errors.Errorf("failed to update cluster %s state from %s to %s, nothing have changed", - c.ID.String(), swag.StringValue(c.Status), state) + log.Infof("cluster %s has been updated with the following updateds %+v", clusterId, extra) + + var cluster common.Cluster + + if err := db.First(&cluster, "id = ?", clusterId).Error; err != nil { + return nil, errors.Wrapf(err, "failed to read from cluster %s from the database after the update", + clusterId) } - log.Infof("updated cluster %s from state <%s> to state <%s>", c.ID.String(), swag.StringValue(c.Status), state) - return &UpdateReply{ - State: state, - IsChanged: state != swag.StringValue(c.Status), - }, nil + + return &cluster, nil } -func getKnownMastersNodesIds(c *models.Cluster, db *gorm.DB) ([]*strfmt.UUID, error) { +func getKnownMastersNodesIds(c *common.Cluster, db *gorm.DB) ([]*strfmt.UUID, error) { - var cluster models.Cluster + var cluster common.Cluster var masterNodesIds []*strfmt.UUID if err := db.Preload("Hosts").First(&cluster, "id = ?", c.ID).Error; err != nil { return nil, errors.Errorf("cluster %s not found", c.ID) } + + allowedStatuses := []string{models.HostStatusKnown, models.HostStatusPreparingForInstallation} for _, host := range cluster.Hosts { - if host.Role == "master" && swag.StringValue(host.Status) == "known" { + if host.Role == models.HostRoleMaster && funk.ContainsString(allowedStatuses, swag.StringValue(host.Status)) { masterNodesIds = append(masterNodesIds, host.ID) } } return masterNodesIds, nil } -func mapMasterHostsByStatus(c *models.Cluster) map[string][]*models.Host { +func mapMasterHostsByStatus(c *common.Cluster) map[string][]*models.Host { hostMap := make(map[string][]*models.Host) for _, host := range c.Hosts { - if host.Role != "master" { + if host.Role != models.HostRoleMaster { continue } if _, ok := hostMap[swag.StringValue(host.Status)]; ok { diff --git a/internal/cluster/common_test.go b/internal/cluster/common_test.go new file mode 100644 index 000000000..b943976f0 --- /dev/null +++ b/internal/cluster/common_test.go @@ -0,0 +1,81 @@ +package cluster + +import ( + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/models" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/google/uuid" + "github.com/jinzhu/gorm" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var defaultStatus = "status" +var defaultStatusInfo = "statusInfo" +var newStatus = "newStatus" +var newStatusInfo = "newStatusInfo" + +var _ = Describe("update_cluster_state", func() { + var ( + db *gorm.DB + cluster *common.Cluster + lastUpdatedTime strfmt.DateTime + err error + dbName string = "common_test" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName) + + id := strfmt.UUID(uuid.New().String()) + cluster = &common.Cluster{Cluster: models.Cluster{ + ID: &id, + Status: &defaultStatus, + StatusInfo: &defaultStatusInfo, + }} + Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred()) + + lastUpdatedTime = cluster.StatusUpdatedAt + }) + + Describe("UpdateCluster", func() { + It("change_status", func() { + cluster, err = UpdateCluster(getTestLog(), db, *cluster.ID, *cluster.Status, "status", newStatus, "status_info", newStatusInfo) + Expect(err).ShouldNot(HaveOccurred()) + Expect(swag.StringValue(cluster.Status)).Should(Equal(newStatus)) + Expect(*cluster.StatusInfo).Should(Equal(newStatusInfo)) + }) + + Describe("negative", func() { + It("invalid_extras_amount", func() { + _, err = UpdateCluster(getTestLog(), db, *cluster.ID, *cluster.Status, "1") + Expect(err).Should(HaveOccurred()) + _, err = UpdateCluster(getTestLog(), db, *cluster.ID, *cluster.Status, "1", "2", "3") + Expect(err).Should(HaveOccurred()) + }) + + It("no_matching_rows", func() { + _, err = UpdateCluster(getTestLog(), db, *cluster.ID, "otherStatus", "status", newStatus) + Expect(err).Should(HaveOccurred()) + }) + + AfterEach(func() { + Expect(db.First(&cluster, "id = ?", cluster.ID).Error).ShouldNot(HaveOccurred()) + Expect(*cluster.Status).ShouldNot(Equal(newStatus)) + Expect(*cluster.StatusInfo).ShouldNot(Equal(newStatusInfo)) + Expect(cluster.StatusUpdatedAt.String()).Should(Equal(lastUpdatedTime.String())) + }) + }) + + It("db_failure", func() { + db.Close() + _, err = UpdateCluster(getTestLog(), db, *cluster.ID, *cluster.Status, "status", newStatus) + Expect(err).Should(HaveOccurred()) + }) + }) + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) + +}) diff --git a/internal/cluster/error.go b/internal/cluster/error.go index ada62413e..980d62801 100644 --- a/internal/cluster/error.go +++ b/internal/cluster/error.go @@ -3,9 +3,9 @@ package cluster import ( context "context" + "github.com/filanov/bm-inventory/internal/common" "github.com/sirupsen/logrus" - "github.com/filanov/bm-inventory/models" "github.com/go-openapi/swag" "github.com/jinzhu/gorm" "github.com/pkg/errors" @@ -20,14 +20,11 @@ func NewErrorState(log logrus.FieldLogger, db *gorm.DB) *errorState { type errorState baseState -func (e *errorState) RefreshStatus(ctx context.Context, c *models.Cluster, db *gorm.DB) (*UpdateReply, error) { - return &UpdateReply{ - State: clusterStatusError, - IsChanged: false, - }, nil +func (e *errorState) RefreshStatus(ctx context.Context, c *common.Cluster, db *gorm.DB) (*common.Cluster, error) { + return c, nil } -func (e *errorState) Install(ctx context.Context, c *models.Cluster) (*UpdateReply, error) { - return nil, errors.Errorf("unable to install cluster <%s> in <%s> status", +func (e *errorState) Install(ctx context.Context, c *common.Cluster) error { + return errors.Errorf("unable to install cluster <%s> in <%s> status", c.ID, swag.StringValue(c.Status)) } diff --git a/internal/cluster/finalizing.go b/internal/cluster/finalizing.go new file mode 100644 index 000000000..5cfec46e5 --- /dev/null +++ b/internal/cluster/finalizing.go @@ -0,0 +1,25 @@ +package cluster + +import ( + context "context" + + "github.com/filanov/bm-inventory/internal/common" + "github.com/sirupsen/logrus" + + "github.com/jinzhu/gorm" +) + +func NewFinalizingState(log logrus.FieldLogger, db *gorm.DB) *finalizingState { + return &finalizingState{ + log: log, + db: db, + } +} + +type finalizingState baseState + +var _ StateAPI = (*Manager)(nil) + +func (i *finalizingState) RefreshStatus(ctx context.Context, c *common.Cluster, db *gorm.DB) (*common.Cluster, error) { + return c, nil +} diff --git a/internal/cluster/installed.go b/internal/cluster/installed.go index e10a59a3b..12017e075 100644 --- a/internal/cluster/installed.go +++ b/internal/cluster/installed.go @@ -3,9 +3,9 @@ package cluster import ( context "context" + "github.com/filanov/bm-inventory/internal/common" "github.com/sirupsen/logrus" - "github.com/filanov/bm-inventory/models" "github.com/go-openapi/swag" "github.com/jinzhu/gorm" "github.com/pkg/errors" @@ -22,14 +22,11 @@ type installedState baseState var _ StateAPI = (*Manager)(nil) -func (i *installedState) RefreshStatus(ctx context.Context, c *models.Cluster, db *gorm.DB) (*UpdateReply, error) { - return &UpdateReply{ - State: clusterStatusInstalled, - IsChanged: false, - }, nil +func (i *installedState) RefreshStatus(ctx context.Context, c *common.Cluster, db *gorm.DB) (*common.Cluster, error) { + return c, nil } -func (i *installedState) Install(ctx context.Context, c *models.Cluster) (*UpdateReply, error) { - return nil, errors.Errorf("unable to install cluster <%s> in <%s> status", +func (i *installedState) Install(ctx context.Context, c *common.Cluster) error { + return errors.Errorf("unable to install cluster <%s> in <%s> status", c.ID, swag.StringValue(c.Status)) } diff --git a/internal/cluster/installer.go b/internal/cluster/installer.go index 2abff0b62..05ef91d59 100644 --- a/internal/cluster/installer.go +++ b/internal/cluster/installer.go @@ -3,13 +3,17 @@ package cluster import ( context "context" + "github.com/filanov/bm-inventory/models" + + logutil "github.com/filanov/bm-inventory/pkg/log" + + "github.com/filanov/bm-inventory/internal/common" "github.com/go-openapi/strfmt" "github.com/pkg/errors" "github.com/go-openapi/swag" - "github.com/filanov/bm-inventory/models" "github.com/jinzhu/gorm" "github.com/sirupsen/logrus" ) @@ -26,16 +30,25 @@ type installer struct { db *gorm.DB } -func (i *installer) Install(ctx context.Context, c *models.Cluster, db *gorm.DB) error { +func (i *installer) Install(ctx context.Context, c *common.Cluster, db *gorm.DB) error { + log := logutil.FromContext(ctx, i.log) switch swag.StringValue(c.Status) { case "": - case clusterStatusReady: - logrus.Infof("cluster %s is starting installation", c.ID) + case clusterStatusPrepareForInstallation: + log.Infof("cluster %s is starting installation", c.ID) case clusterStatusInsufficient: - return errors.Errorf("cluster %s is missing the resources to be installed", c.ID) + masterKnownHosts, err := i.GetMasterNodesIds(ctx, c, db) + if err != nil { + return err + } + return errors.Errorf("cluster %s is expected to have exactly %d known master to be installed, got %d", c.ID, minHostsNeededForInstallation, len(masterKnownHosts)) + case clusterStatusReady: + return errors.Errorf("cluster %s is ready expected %s", c.ID, clusterStatusPrepareForInstallation) case clusterStatusInstalling: return errors.Errorf("cluster %s is already installing", c.ID) + case models.ClusterStatusFinalizing: + return errors.Errorf("cluster %s is already %s", c.ID, models.ClusterStatusFinalizing) case clusterStatusInstalled: return errors.Errorf("cluster %s is already installed", c.ID) case clusterStatusError: @@ -44,14 +57,14 @@ func (i *installer) Install(ctx context.Context, c *models.Cluster, db *gorm.DB) return errors.Errorf("cluster %s state is unclear - cluster state: %s", c.ID, swag.StringValue(c.Status)) } - _, err := updateState(clusterStatusInstalling, statusInfoInstalling, c, db, i.log) - if err != nil { + if _, err := updateClusterStatus(i.log, db, *c.ID, swag.StringValue(c.Status), + clusterStatusInstalling, statusInfoInstalling); err != nil { return err } return nil } -func (i *installer) GetMasterNodesIds(ctx context.Context, cluster *models.Cluster, db *gorm.DB) ([]*strfmt.UUID, error) { +func (i *installer) GetMasterNodesIds(ctx context.Context, cluster *common.Cluster, db *gorm.DB) ([]*strfmt.UUID, error) { return getKnownMastersNodesIds(cluster, db) } diff --git a/internal/cluster/installer_test.go b/internal/cluster/installer_test.go index 09431ae65..8b72055da 100644 --- a/internal/cluster/installer_test.go +++ b/internal/cluster/installer_test.go @@ -3,6 +3,7 @@ package cluster import ( "context" + "github.com/filanov/bm-inventory/internal/common" "github.com/filanov/bm-inventory/internal/host" "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" @@ -20,19 +21,20 @@ var _ = Describe("installer", func() { installerManager InstallationAPI db *gorm.DB id strfmt.UUID - cluster models.Cluster + cluster common.Cluster hostsIds []strfmt.UUID + dbName = "cluster_installer" ) BeforeEach(func() { - db = prepareDB() + db = common.PrepareTestDB(dbName) installerManager = NewInstaller(getTestLog(), db) id = strfmt.UUID(uuid.New().String()) - cluster = models.Cluster{ + cluster = common.Cluster{Cluster: models.Cluster{ ID: &id, Status: swag.String(clusterStatusReady), - } + }} Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred()) }) @@ -41,13 +43,18 @@ var _ = Describe("installer", func() { It("cluster is insufficient", func() { cluster = updateClusterState(cluster, clusterStatusInsufficient, db) err := installerManager.Install(ctx, &cluster, db) - Expect(err.Error()).Should(MatchRegexp(errors.Errorf("cluster %s is missing the resources to be installed", cluster.ID).Error())) + Expect(err.Error()).Should(MatchRegexp(errors.Errorf("cluster %s is expected to have exactly 3 known master to be installed, got 0", cluster.ID).Error())) }) It("cluster is installing", func() { cluster = updateClusterState(cluster, clusterStatusInstalling, db) err := installerManager.Install(ctx, &cluster, db) Expect(err.Error()).Should(MatchRegexp(errors.Errorf("cluster %s is already installing", cluster.ID).Error())) }) + It("cluster is finalizing", func() { + cluster = updateClusterState(cluster, models.ClusterStatusFinalizing, db) + err := installerManager.Install(ctx, &cluster, db) + Expect(err.Error()).Should(MatchRegexp(errors.Errorf("cluster %s is already %s", cluster.ID, models.ClusterStatusFinalizing).Error())) + }) It("cluster is in error", func() { cluster = updateClusterState(cluster, clusterStatusError, db) err := installerManager.Install(ctx, &cluster, db) @@ -65,6 +72,10 @@ var _ = Describe("installer", func() { }) It("cluster is ready", func() { cluster = updateClusterState(cluster, clusterStatusReady, db) + Expect(installerManager.Install(ctx, &cluster, db)).Should(HaveOccurred()) + }) + It("cluster is ready", func() { + cluster = updateClusterState(cluster, clusterStatusPrepareForInstallation, db) err := installerManager.Install(ctx, &cluster, db) Expect(err).Should(BeNil()) @@ -78,11 +89,11 @@ var _ = Describe("installer", func() { It("test getting master ids", func() { for i := 0; i < 3; i++ { - hostsIds = append(hostsIds, addHost("master", host.HostStatusKnown, id, db)) + hostsIds = append(hostsIds, addHost(models.HostRoleMaster, host.HostStatusKnown, id, db)) } masterKnownIds := hostsIds - hostsIds = append(hostsIds, addHost("worker", host.HostStatusKnown, id, db)) - hostsIds = append(hostsIds, addHost("master", host.HostStatusDiscovering, id, db)) + hostsIds = append(hostsIds, addHost(models.HostRoleWorker, host.HostStatusKnown, id, db)) + hostsIds = append(hostsIds, addHost(models.HostRoleMaster, host.HostStatusDiscovering, id, db)) replyMasterNodesIds, err := installerManager.GetMasterNodesIds(ctx, &cluster, db) Expect(err).Should(BeNil()) @@ -93,17 +104,17 @@ var _ = Describe("installer", func() { }) }) AfterEach(func() { - db.Close() + common.DeleteTestDB(db, dbName) }) }) -func updateClusterState(cluster models.Cluster, state string, db *gorm.DB) models.Cluster { +func updateClusterState(cluster common.Cluster, state string, db *gorm.DB) common.Cluster { cluster.Status = swag.String(state) Expect(db.Model(&cluster).Update("status", state).Error).NotTo(HaveOccurred()) return cluster } -func addHost(role string, state string, clusterId strfmt.UUID, db *gorm.DB) strfmt.UUID { +func addHost(role models.HostRole, state string, clusterId strfmt.UUID, db *gorm.DB) strfmt.UUID { hostId := strfmt.UUID(uuid.New().String()) host := models.Host{ @@ -116,6 +127,15 @@ func addHost(role string, state string, clusterId strfmt.UUID, db *gorm.DB) strf return hostId } +func updateHostProgress(h *models.Host, stage models.HostStage, info string, db *gorm.DB) { + progress := &models.HostProgressInfo{ + CurrentStage: stage, + ProgressInfo: info, + } + h.Progress = progress + Expect(db.Model(&h).Update("progress_current_stage", stage, "progress_progress_info", info).Error).ShouldNot(HaveOccurred()) +} + func checkIfIdInArr(a strfmt.UUID, list []*strfmt.UUID) bool { for _, b := range list { if b == &a { diff --git a/internal/cluster/installing.go b/internal/cluster/installing.go index 368119427..1bb841d61 100644 --- a/internal/cluster/installing.go +++ b/internal/cluster/installing.go @@ -4,13 +4,17 @@ import ( context "context" "fmt" + "github.com/filanov/bm-inventory/models" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/filanov/bm-inventory/internal/common" intenralhost "github.com/filanov/bm-inventory/internal/host" logutil "github.com/filanov/bm-inventory/pkg/log" "github.com/sirupsen/logrus" - "github.com/filanov/bm-inventory/models" "github.com/jinzhu/gorm" "github.com/pkg/errors" ) @@ -26,52 +30,58 @@ type installingState baseState var _ StateAPI = (*Manager)(nil) -func (i *installingState) RefreshStatus(ctx context.Context, c *models.Cluster, db *gorm.DB) (*UpdateReply, error) { +func (i *installingState) RefreshStatus(ctx context.Context, c *common.Cluster, db *gorm.DB) (*common.Cluster, error) { log := logutil.FromContext(ctx, i.log) - installationState, StateInfo, err := i.getClusterInstallationState(ctx, c) + installationState, StateInfo, err := i.getClusterInstallationState(ctx, c, db) if err != nil { return nil, errors.Errorf("couldn't determine cluster %s installation state", c.ID) } switch installationState { + case models.ClusterStatusFinalizing: + return updateClusterStatus(log, db, *c.ID, swag.StringValue(c.Status), models.ClusterStatusFinalizing, StateInfo) case clusterStatusInstalled: - return updateState(clusterStatusInstalled, StateInfo, c, i.db, log) + return updateClusterStatus(log, db, *c.ID, swag.StringValue(c.Status), clusterStatusInstalled, StateInfo) case clusterStatusError: - return updateState(clusterStatusError, StateInfo, c, i.db, log) + return updateClusterStatus(log, db, *c.ID, swag.StringValue(c.Status), clusterStatusError, StateInfo) case clusterStatusInstalling: - return &UpdateReply{ - State: clusterStatusInstalling, - IsChanged: false, - }, nil + return c, nil } return nil, errors.Errorf("cluster % state transaction is not clear, installation state: %s ", c.ID, installationState) } -func (i *installingState) getClusterInstallationState(ctx context.Context, c *models.Cluster) (string, string, error) { +func (i *installingState) getClusterInstallationState(ctx context.Context, c *common.Cluster, db *gorm.DB) (string, string, error) { log := logutil.FromContext(ctx, i.log) - if err := i.db.Preload("Hosts").First(&c, "id = ?", c.ID).Error; err != nil { - return "", "", errors.Errorf("cluster %s not found", c.ID) - } - mappedMastersByRole := mapMasterHostsByStatus(c) - // Cluster is in installed + // Cluster is in finalizing mastersInInstalled, ok := mappedMastersByRole[intenralhost.HostStatusInstalled] if ok && len(mastersInInstalled) >= minHostsNeededForInstallation { log.Infof("Cluster %s has at least %d installed hosts, cluster is installed.", c.ID, len(mastersInInstalled)) - return clusterStatusInstalled, statusInfoInstalled, nil + return models.ClusterStatusFinalizing, statusInfoFinalizing, nil } // Cluster is installing - mastersInInstalling := mappedMastersByRole[intenralhost.HostStatusInstalling] - mastersInInstallingInProgress := mappedMastersByRole[intenralhost.HostStatusInstallingInProgress] - if (len(mastersInInstalling) + len(mastersInInstallingInProgress) + len(mastersInInstalled)) >= minHostsNeededForInstallation { + mastersInSomeInstallingStatus := len(mappedMastersByRole[intenralhost.HostStatusInstalling]) + + len(mappedMastersByRole[intenralhost.HostStatusInstallingInProgress]) + + len(mappedMastersByRole[intenralhost.HostStatusInstalled]) + + len(mappedMastersByRole[intenralhost.HostStatusInstallingPendingUserAction]) + if mastersInSomeInstallingStatus >= minHostsNeededForInstallation { return clusterStatusInstalling, statusInfoInstalling, nil } // Cluster is in error - mastersInError := mappedMastersByRole[intenralhost.HostStatusError] - log.Warningf("Cluster %s has %d hosts in error.", c.ID, len(mastersInError)) - return clusterStatusError, fmt.Sprintf("cluster %s has %d hosts in error", c.ID, len(mastersInError)), nil + mappedHostsRolesToIds := make(map[string][]strfmt.UUID, len(mappedMastersByRole)) + for role, hosts := range mappedMastersByRole { + ids := make([]strfmt.UUID, 0) + for _, h := range hosts { + ids = append(ids, *h.ID) + } + + mappedHostsRolesToIds[role] = ids + } + + log.Warningf("Cluster %s hosts status map is %+v", c.ID, mappedHostsRolesToIds) + return clusterStatusError, fmt.Sprintf("cluster %s has hosts in error", c.ID), nil } diff --git a/internal/cluster/insufficient.go b/internal/cluster/insufficient.go index e5a02b870..010b1f57a 100644 --- a/internal/cluster/insufficient.go +++ b/internal/cluster/insufficient.go @@ -3,45 +3,88 @@ package cluster import ( "context" - "github.com/sirupsen/logrus" - - intenralhost "github.com/filanov/bm-inventory/internal/host" + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/internal/host" "github.com/filanov/bm-inventory/models" - logutil "github.com/filanov/bm-inventory/pkg/log" + logutil "github.com/filanov/bm-inventory/pkg/log" + "github.com/go-openapi/swag" "github.com/jinzhu/gorm" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) -func NewInsufficientState(log logrus.FieldLogger, db *gorm.DB) *insufficientState { +func NewInsufficientState(log logrus.FieldLogger, db *gorm.DB, hostAPI host.API) *insufficientState { return &insufficientState{ - log: log, - db: db, + baseState: baseState{ + log: log, + db: db, + }, + hostAPI: hostAPI, } } -type insufficientState baseState - -func (i *insufficientState) RefreshStatus(ctx context.Context, c *models.Cluster, db *gorm.DB) (*UpdateReply, error) { +type insufficientState struct { + baseState + hostAPI host.API +} +func (i *insufficientState) RefreshStatus(ctx context.Context, c *common.Cluster, db *gorm.DB) (*common.Cluster, error) { log := logutil.FromContext(ctx, i.log) - if err := db.Preload("Hosts").First(&c, "id = ?", c.ID).Error; err != nil { - return &UpdateReply{ - State: clusterStatusInsufficient, - IsChanged: false}, errors.Errorf("cluster %s not found", c.ID) - } mappedMastersByRole := mapMasterHostsByStatus(c) + if i.isPendingUserResetRequired(c) { + log.Infof("Setting cluster: %s hosts to status: %s", + c.ID, models.HostStatusInstallingPendingUserAction) + if err := i.setPendingUserReset(ctx, c, db); err != nil { + return nil, errors.Wrapf(err, "failed setting cluster: %s hosts to status: %s", + c.ID, models.HostStatusInstallingPendingUserAction) + } + return c, nil + } + // Cluster is ready - mastersInKnown, ok := mappedMastersByRole[intenralhost.HostStatusKnown] - if ok && len(mastersInKnown) >= minHostsNeededForInstallation { - log.Infof("Cluster %s has at least %d known master hosts, cluster is ready.", c.ID, minHostsNeededForInstallation) - return updateState(clusterStatusReady, statusInfoReady, c, db, log) + mastersInKnown, ok := mappedMastersByRole[models.HostStatusKnown] + if ok && len(mastersInKnown) == minHostsNeededForInstallation && c.APIVip != "" && c.IngressVip != "" { + log.Infof("Cluster %s has %d known master hosts, cluster is ready.", c.ID, minHostsNeededForInstallation) + return updateClusterStatus(log, db, *c.ID, swag.StringValue(c.Status), clusterStatusReady, statusInfoReady) //cluster is still insufficient } else { - return &UpdateReply{State: clusterStatusInsufficient, - IsChanged: false}, nil + return c, nil + } +} + +func (i *insufficientState) isPendingUserResetRequired(c *common.Cluster) bool { + for _, h := range c.Hosts { + if i.hostAPI.IsRequireUserActionReset(h) { + return true + } + } + return false +} + +func (i *insufficientState) setPendingUserReset(ctx context.Context, c *common.Cluster, db *gorm.DB) error { + txSuccess := false + tx := db.Begin() + defer func() { + if !txSuccess { + tx.Rollback() + } + if r := recover(); r != nil { + tx.Rollback() + } + }() + + for _, h := range c.Hosts { + if err := i.hostAPI.ResetPendingUserAction(ctx, h, tx); err != nil { + return err + } + } + if err := tx.Commit().Error; err != nil { + return err } + txSuccess = true + return nil } diff --git a/internal/cluster/insufficient_test.go b/internal/cluster/insufficient_test.go index 9cf0be292..fd6e95e1c 100644 --- a/internal/cluster/insufficient_test.go +++ b/internal/cluster/insufficient_test.go @@ -3,9 +3,13 @@ package cluster import ( context "context" + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/internal/events" + "github.com/filanov/bm-inventory/internal/host" "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" + "github.com/golang/mock/gomock" "github.com/google/uuid" "github.com/jinzhu/gorm" . "github.com/onsi/ginkgo" @@ -15,56 +19,86 @@ import ( var _ = Describe("insufficient_state", func() { var ( ctx = context.Background() - state API + manager API db *gorm.DB - currentState = clusterStatusInsufficient + currentState = models.ClusterStatusInsufficient id strfmt.UUID - updateReply *UpdateReply - updateErr error - cluster models.Cluster + cluster common.Cluster + ctrl *gomock.Controller + mockHostAPI *host.MockAPI + dbName = "cluster_insufficient_state" ) BeforeEach(func() { - db = prepareDB() - state = &Manager{insufficient: NewInsufficientState(getTestLog(), db)} - registerManager := NewRegistrar(getTestLog(), db) + ctrl = gomock.NewController(GinkgoT()) + mockHostAPI = host.NewMockAPI(ctrl) + mockEvents := events.NewMockHandler(ctrl) + db = common.PrepareTestDB(dbName, &events.Event{}) + manager = &Manager{ + log: getTestLog(), + insufficient: NewInsufficientState(getTestLog(), db, mockHostAPI), + registrationAPI: NewRegistrar(getTestLog(), db), + eventsHandler: mockEvents, + } id = strfmt.UUID(uuid.New().String()) - cluster = models.Cluster{ + cluster = common.Cluster{Cluster: models.Cluster{ ID: &id, Status: swag.String(currentState), - } + }} - replyErr := registerManager.RegisterCluster(ctx, &cluster) + mockEvents.EXPECT().AddEvent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) + replyErr := manager.RegisterCluster(ctx, &cluster) Expect(replyErr).Should(BeNil()) - Expect(swag.StringValue(cluster.Status)).Should(Equal(clusterStatusInsufficient)) + Expect(swag.StringValue(cluster.Status)).Should(Equal(models.ClusterStatusInsufficient)) c := geCluster(*cluster.ID, db) - Expect(swag.StringValue(c.Status)).Should(Equal(clusterStatusInsufficient)) + Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusInsufficient)) }) + mockHostAPIIsRequireUserActionResetFalse := func(times int) { + mockHostAPI.EXPECT().IsRequireUserActionReset(gomock.Any()).Return(false).Times(times) + } + + mockHostAPIIsRequireUserActionResetTrue := func(times int) { + mockHostAPI.EXPECT().IsRequireUserActionReset(gomock.Any()).Return(true).Times(times) + } + + mockHostAPIResetPendingUserActionSuccess := func(times int) { + mockHostAPI.EXPECT().ResetPendingUserAction(gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil).Times(times) + } + Context("refresh_state", func() { It("not answering requirement to be ready", func() { - updateReply, updateErr = state.RefreshStatus(ctx, &cluster, db) + refreshedCluster, updateErr := manager.RefreshStatus(ctx, &cluster, db) Expect(updateErr).Should(BeNil()) - Expect(updateReply.State).Should(Equal(clusterStatusInsufficient)) + Expect(*refreshedCluster.Status).Should(Equal(models.ClusterStatusInsufficient)) + }) + + It("resetting when host in reboot stage", func() { + addHost(models.HostRoleMaster, models.HostStatusResetting, *cluster.ID, db) c := geCluster(*cluster.ID, db) - Expect(swag.StringValue(c.Status)).Should(Equal(clusterStatusInsufficient)) + Expect(len(c.Hosts)).Should(Equal(1)) + updateHostProgress(c.Hosts[0], models.HostStageRebooting, "rebooting", db) + mockHostAPIIsRequireUserActionResetTrue(1) + mockHostAPIResetPendingUserActionSuccess(1) + refreshedCluster, updateErr := manager.RefreshStatus(ctx, &c, db) + Expect(updateErr).Should(BeNil()) + Expect(*refreshedCluster.Status).Should(Equal(models.ClusterStatusInsufficient)) }) It("answering requirement to be ready", func() { addInstallationRequirements(id, db) - updateReply, updateErr = state.RefreshStatus(ctx, &cluster, db) + mockHostAPIIsRequireUserActionResetFalse(3) + refreshedCluster, updateErr := manager.RefreshStatus(ctx, &cluster, db) Expect(updateErr).Should(BeNil()) - Expect(updateReply.State).Should(Equal(clusterStatusReady)) - c := geCluster(*cluster.ID, db) - Expect(swag.StringValue(c.Status)).Should(Equal(clusterStatusReady)) + Expect(*refreshedCluster.Status).Should(Equal(models.ClusterStatusReady)) }) }) AfterEach(func() { - db.Close() - updateReply = nil - updateErr = nil + common.DeleteTestDB(db, dbName) + ctrl.Finish() }) }) diff --git a/internal/cluster/mock_cluster_api.go b/internal/cluster/mock_cluster_api.go index 46df8f915..6520e760c 100644 --- a/internal/cluster/mock_cluster_api.go +++ b/internal/cluster/mock_cluster_api.go @@ -8,140 +8,140 @@ import ( context "context" reflect "reflect" - models "github.com/filanov/bm-inventory/models" + common "github.com/filanov/bm-inventory/internal/common" strfmt "github.com/go-openapi/strfmt" gomock "github.com/golang/mock/gomock" gorm "github.com/jinzhu/gorm" ) -// MockStateAPI is a mock of StateAPI interface. +// MockStateAPI is a mock of StateAPI interface type MockStateAPI struct { ctrl *gomock.Controller recorder *MockStateAPIMockRecorder } -// MockStateAPIMockRecorder is the mock recorder for MockStateAPI. +// MockStateAPIMockRecorder is the mock recorder for MockStateAPI type MockStateAPIMockRecorder struct { mock *MockStateAPI } -// NewMockStateAPI creates a new mock instance. +// NewMockStateAPI creates a new mock instance func NewMockStateAPI(ctrl *gomock.Controller) *MockStateAPI { mock := &MockStateAPI{ctrl: ctrl} mock.recorder = &MockStateAPIMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use. +// EXPECT returns an object that allows the caller to indicate expected use func (m *MockStateAPI) EXPECT() *MockStateAPIMockRecorder { return m.recorder } -// RefreshStatus mocks base method. -func (m *MockStateAPI) RefreshStatus(ctx context.Context, c *models.Cluster, db *gorm.DB) (*UpdateReply, error) { +// RefreshStatus mocks base method +func (m *MockStateAPI) RefreshStatus(ctx context.Context, c *common.Cluster, db *gorm.DB) (*common.Cluster, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RefreshStatus", ctx, c, db) - ret0, _ := ret[0].(*UpdateReply) + ret0, _ := ret[0].(*common.Cluster) ret1, _ := ret[1].(error) return ret0, ret1 } -// RefreshStatus indicates an expected call of RefreshStatus. +// RefreshStatus indicates an expected call of RefreshStatus func (mr *MockStateAPIMockRecorder) RefreshStatus(ctx, c, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RefreshStatus", reflect.TypeOf((*MockStateAPI)(nil).RefreshStatus), ctx, c, db) } -// MockRegistrationAPI is a mock of RegistrationAPI interface. +// MockRegistrationAPI is a mock of RegistrationAPI interface type MockRegistrationAPI struct { ctrl *gomock.Controller recorder *MockRegistrationAPIMockRecorder } -// MockRegistrationAPIMockRecorder is the mock recorder for MockRegistrationAPI. +// MockRegistrationAPIMockRecorder is the mock recorder for MockRegistrationAPI type MockRegistrationAPIMockRecorder struct { mock *MockRegistrationAPI } -// NewMockRegistrationAPI creates a new mock instance. +// NewMockRegistrationAPI creates a new mock instance func NewMockRegistrationAPI(ctrl *gomock.Controller) *MockRegistrationAPI { mock := &MockRegistrationAPI{ctrl: ctrl} mock.recorder = &MockRegistrationAPIMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use. +// EXPECT returns an object that allows the caller to indicate expected use func (m *MockRegistrationAPI) EXPECT() *MockRegistrationAPIMockRecorder { return m.recorder } -// RegisterCluster mocks base method. -func (m *MockRegistrationAPI) RegisterCluster(ctx context.Context, c *models.Cluster) error { +// RegisterCluster mocks base method +func (m *MockRegistrationAPI) RegisterCluster(ctx context.Context, c *common.Cluster) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RegisterCluster", ctx, c) ret0, _ := ret[0].(error) return ret0 } -// RegisterCluster indicates an expected call of RegisterCluster. +// RegisterCluster indicates an expected call of RegisterCluster func (mr *MockRegistrationAPIMockRecorder) RegisterCluster(ctx, c interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterCluster", reflect.TypeOf((*MockRegistrationAPI)(nil).RegisterCluster), ctx, c) } -// DeregisterCluster mocks base method. -func (m *MockRegistrationAPI) DeregisterCluster(ctx context.Context, c *models.Cluster) error { +// DeregisterCluster mocks base method +func (m *MockRegistrationAPI) DeregisterCluster(ctx context.Context, c *common.Cluster) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeregisterCluster", ctx, c) ret0, _ := ret[0].(error) return ret0 } -// DeregisterCluster indicates an expected call of DeregisterCluster. +// DeregisterCluster indicates an expected call of DeregisterCluster func (mr *MockRegistrationAPIMockRecorder) DeregisterCluster(ctx, c interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeregisterCluster", reflect.TypeOf((*MockRegistrationAPI)(nil).DeregisterCluster), ctx, c) } -// MockInstallationAPI is a mock of InstallationAPI interface. +// MockInstallationAPI is a mock of InstallationAPI interface type MockInstallationAPI struct { ctrl *gomock.Controller recorder *MockInstallationAPIMockRecorder } -// MockInstallationAPIMockRecorder is the mock recorder for MockInstallationAPI. +// MockInstallationAPIMockRecorder is the mock recorder for MockInstallationAPI type MockInstallationAPIMockRecorder struct { mock *MockInstallationAPI } -// NewMockInstallationAPI creates a new mock instance. +// NewMockInstallationAPI creates a new mock instance func NewMockInstallationAPI(ctrl *gomock.Controller) *MockInstallationAPI { mock := &MockInstallationAPI{ctrl: ctrl} mock.recorder = &MockInstallationAPIMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use. +// EXPECT returns an object that allows the caller to indicate expected use func (m *MockInstallationAPI) EXPECT() *MockInstallationAPIMockRecorder { return m.recorder } -// Install mocks base method. -func (m *MockInstallationAPI) Install(ctx context.Context, c *models.Cluster, db *gorm.DB) error { +// Install mocks base method +func (m *MockInstallationAPI) Install(ctx context.Context, c *common.Cluster, db *gorm.DB) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Install", ctx, c, db) ret0, _ := ret[0].(error) return ret0 } -// Install indicates an expected call of Install. +// Install indicates an expected call of Install func (mr *MockInstallationAPIMockRecorder) Install(ctx, c, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Install", reflect.TypeOf((*MockInstallationAPI)(nil).Install), ctx, c, db) } -// GetMasterNodesIds mocks base method. -func (m *MockInstallationAPI) GetMasterNodesIds(ctx context.Context, c *models.Cluster, db *gorm.DB) ([]*strfmt.UUID, error) { +// GetMasterNodesIds mocks base method +func (m *MockInstallationAPI) GetMasterNodesIds(ctx context.Context, c *common.Cluster, db *gorm.DB) ([]*strfmt.UUID, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetMasterNodesIds", ctx, c, db) ret0, _ := ret[0].([]*strfmt.UUID) @@ -149,94 +149,94 @@ func (m *MockInstallationAPI) GetMasterNodesIds(ctx context.Context, c *models.C return ret0, ret1 } -// GetMasterNodesIds indicates an expected call of GetMasterNodesIds. +// GetMasterNodesIds indicates an expected call of GetMasterNodesIds func (mr *MockInstallationAPIMockRecorder) GetMasterNodesIds(ctx, c, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMasterNodesIds", reflect.TypeOf((*MockInstallationAPI)(nil).GetMasterNodesIds), ctx, c, db) } -// MockAPI is a mock of API interface. +// MockAPI is a mock of API interface type MockAPI struct { ctrl *gomock.Controller recorder *MockAPIMockRecorder } -// MockAPIMockRecorder is the mock recorder for MockAPI. +// MockAPIMockRecorder is the mock recorder for MockAPI type MockAPIMockRecorder struct { mock *MockAPI } -// NewMockAPI creates a new mock instance. +// NewMockAPI creates a new mock instance func NewMockAPI(ctrl *gomock.Controller) *MockAPI { mock := &MockAPI{ctrl: ctrl} mock.recorder = &MockAPIMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use. +// EXPECT returns an object that allows the caller to indicate expected use func (m *MockAPI) EXPECT() *MockAPIMockRecorder { return m.recorder } -// RefreshStatus mocks base method. -func (m *MockAPI) RefreshStatus(ctx context.Context, c *models.Cluster, db *gorm.DB) (*UpdateReply, error) { +// RefreshStatus mocks base method +func (m *MockAPI) RefreshStatus(ctx context.Context, c *common.Cluster, db *gorm.DB) (*common.Cluster, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RefreshStatus", ctx, c, db) - ret0, _ := ret[0].(*UpdateReply) + ret0, _ := ret[0].(*common.Cluster) ret1, _ := ret[1].(error) return ret0, ret1 } -// RefreshStatus indicates an expected call of RefreshStatus. +// RefreshStatus indicates an expected call of RefreshStatus func (mr *MockAPIMockRecorder) RefreshStatus(ctx, c, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RefreshStatus", reflect.TypeOf((*MockAPI)(nil).RefreshStatus), ctx, c, db) } -// RegisterCluster mocks base method. -func (m *MockAPI) RegisterCluster(ctx context.Context, c *models.Cluster) error { +// RegisterCluster mocks base method +func (m *MockAPI) RegisterCluster(ctx context.Context, c *common.Cluster) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RegisterCluster", ctx, c) ret0, _ := ret[0].(error) return ret0 } -// RegisterCluster indicates an expected call of RegisterCluster. +// RegisterCluster indicates an expected call of RegisterCluster func (mr *MockAPIMockRecorder) RegisterCluster(ctx, c interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterCluster", reflect.TypeOf((*MockAPI)(nil).RegisterCluster), ctx, c) } -// DeregisterCluster mocks base method. -func (m *MockAPI) DeregisterCluster(ctx context.Context, c *models.Cluster) error { +// DeregisterCluster mocks base method +func (m *MockAPI) DeregisterCluster(ctx context.Context, c *common.Cluster) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeregisterCluster", ctx, c) ret0, _ := ret[0].(error) return ret0 } -// DeregisterCluster indicates an expected call of DeregisterCluster. +// DeregisterCluster indicates an expected call of DeregisterCluster func (mr *MockAPIMockRecorder) DeregisterCluster(ctx, c interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeregisterCluster", reflect.TypeOf((*MockAPI)(nil).DeregisterCluster), ctx, c) } -// Install mocks base method. -func (m *MockAPI) Install(ctx context.Context, c *models.Cluster, db *gorm.DB) error { +// Install mocks base method +func (m *MockAPI) Install(ctx context.Context, c *common.Cluster, db *gorm.DB) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Install", ctx, c, db) ret0, _ := ret[0].(error) return ret0 } -// Install indicates an expected call of Install. +// Install indicates an expected call of Install func (mr *MockAPIMockRecorder) Install(ctx, c, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Install", reflect.TypeOf((*MockAPI)(nil).Install), ctx, c, db) } -// GetMasterNodesIds mocks base method. -func (m *MockAPI) GetMasterNodesIds(ctx context.Context, c *models.Cluster, db *gorm.DB) ([]*strfmt.UUID, error) { +// GetMasterNodesIds mocks base method +func (m *MockAPI) GetMasterNodesIds(ctx context.Context, c *common.Cluster, db *gorm.DB) ([]*strfmt.UUID, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetMasterNodesIds", ctx, c, db) ret0, _ := ret[0].([]*strfmt.UUID) @@ -244,20 +244,186 @@ func (m *MockAPI) GetMasterNodesIds(ctx context.Context, c *models.Cluster, db * return ret0, ret1 } -// GetMasterNodesIds indicates an expected call of GetMasterNodesIds. +// GetMasterNodesIds indicates an expected call of GetMasterNodesIds func (mr *MockAPIMockRecorder) GetMasterNodesIds(ctx, c, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMasterNodesIds", reflect.TypeOf((*MockAPI)(nil).GetMasterNodesIds), ctx, c, db) } -// ClusterMonitoring mocks base method. +// ClusterMonitoring mocks base method func (m *MockAPI) ClusterMonitoring() { m.ctrl.T.Helper() m.ctrl.Call(m, "ClusterMonitoring") } -// ClusterMonitoring indicates an expected call of ClusterMonitoring. +// ClusterMonitoring indicates an expected call of ClusterMonitoring func (mr *MockAPIMockRecorder) ClusterMonitoring() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterMonitoring", reflect.TypeOf((*MockAPI)(nil).ClusterMonitoring)) } + +// DownloadFiles mocks base method +func (m *MockAPI) DownloadFiles(c *common.Cluster) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DownloadFiles", c) + ret0, _ := ret[0].(error) + return ret0 +} + +// DownloadFiles indicates an expected call of DownloadFiles +func (mr *MockAPIMockRecorder) DownloadFiles(c interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadFiles", reflect.TypeOf((*MockAPI)(nil).DownloadFiles), c) +} + +// DownloadKubeconfig mocks base method +func (m *MockAPI) DownloadKubeconfig(c *common.Cluster) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DownloadKubeconfig", c) + ret0, _ := ret[0].(error) + return ret0 +} + +// DownloadKubeconfig indicates an expected call of DownloadKubeconfig +func (mr *MockAPIMockRecorder) DownloadKubeconfig(c interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadKubeconfig", reflect.TypeOf((*MockAPI)(nil).DownloadKubeconfig), c) +} + +// GetCredentials mocks base method +func (m *MockAPI) GetCredentials(c *common.Cluster) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCredentials", c) + ret0, _ := ret[0].(error) + return ret0 +} + +// GetCredentials indicates an expected call of GetCredentials +func (mr *MockAPIMockRecorder) GetCredentials(c interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCredentials", reflect.TypeOf((*MockAPI)(nil).GetCredentials), c) +} + +// UploadIngressCert mocks base method +func (m *MockAPI) UploadIngressCert(c *common.Cluster) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UploadIngressCert", c) + ret0, _ := ret[0].(error) + return ret0 +} + +// UploadIngressCert indicates an expected call of UploadIngressCert +func (mr *MockAPIMockRecorder) UploadIngressCert(c interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadIngressCert", reflect.TypeOf((*MockAPI)(nil).UploadIngressCert), c) +} + +// VerifyClusterUpdatability mocks base method +func (m *MockAPI) VerifyClusterUpdatability(c *common.Cluster) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyClusterUpdatability", c) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyClusterUpdatability indicates an expected call of VerifyClusterUpdatability +func (mr *MockAPIMockRecorder) VerifyClusterUpdatability(c interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyClusterUpdatability", reflect.TypeOf((*MockAPI)(nil).VerifyClusterUpdatability), c) +} + +// AcceptRegistration mocks base method +func (m *MockAPI) AcceptRegistration(c *common.Cluster) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AcceptRegistration", c) + ret0, _ := ret[0].(error) + return ret0 +} + +// AcceptRegistration indicates an expected call of AcceptRegistration +func (mr *MockAPIMockRecorder) AcceptRegistration(c interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcceptRegistration", reflect.TypeOf((*MockAPI)(nil).AcceptRegistration), c) +} + +// SetGeneratorVersion mocks base method +func (m *MockAPI) SetGeneratorVersion(c *common.Cluster, version string, db *gorm.DB) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetGeneratorVersion", c, version, db) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetGeneratorVersion indicates an expected call of SetGeneratorVersion +func (mr *MockAPIMockRecorder) SetGeneratorVersion(c, version, db interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetGeneratorVersion", reflect.TypeOf((*MockAPI)(nil).SetGeneratorVersion), c, version, db) +} + +// CancelInstallation mocks base method +func (m *MockAPI) CancelInstallation(ctx context.Context, c *common.Cluster, reason string, db *gorm.DB) *common.ApiErrorResponse { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CancelInstallation", ctx, c, reason, db) + ret0, _ := ret[0].(*common.ApiErrorResponse) + return ret0 +} + +// CancelInstallation indicates an expected call of CancelInstallation +func (mr *MockAPIMockRecorder) CancelInstallation(ctx, c, reason, db interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelInstallation", reflect.TypeOf((*MockAPI)(nil).CancelInstallation), ctx, c, reason, db) +} + +// ResetCluster mocks base method +func (m *MockAPI) ResetCluster(ctx context.Context, c *common.Cluster, reason string, db *gorm.DB) *common.ApiErrorResponse { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResetCluster", ctx, c, reason, db) + ret0, _ := ret[0].(*common.ApiErrorResponse) + return ret0 +} + +// ResetCluster indicates an expected call of ResetCluster +func (mr *MockAPIMockRecorder) ResetCluster(ctx, c, reason, db interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetCluster", reflect.TypeOf((*MockAPI)(nil).ResetCluster), ctx, c, reason, db) +} + +// PrepareForInstallation mocks base method +func (m *MockAPI) PrepareForInstallation(ctx context.Context, c *common.Cluster, db *gorm.DB) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PrepareForInstallation", ctx, c, db) + ret0, _ := ret[0].(error) + return ret0 +} + +// PrepareForInstallation indicates an expected call of PrepareForInstallation +func (mr *MockAPIMockRecorder) PrepareForInstallation(ctx, c, db interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareForInstallation", reflect.TypeOf((*MockAPI)(nil).PrepareForInstallation), ctx, c, db) +} + +// HandlePreInstallError mocks base method +func (m *MockAPI) HandlePreInstallError(ctx context.Context, c *common.Cluster, err error) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "HandlePreInstallError", ctx, c, err) +} + +// HandlePreInstallError indicates an expected call of HandlePreInstallError +func (mr *MockAPIMockRecorder) HandlePreInstallError(ctx, c, err interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandlePreInstallError", reflect.TypeOf((*MockAPI)(nil).HandlePreInstallError), ctx, c, err) +} + +// CompleteInstallation mocks base method +func (m *MockAPI) CompleteInstallation(ctx context.Context, c *common.Cluster, successfullyFinished bool, reason string) *common.ApiErrorResponse { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompleteInstallation", ctx, c, successfullyFinished, reason) + ret0, _ := ret[0].(*common.ApiErrorResponse) + return ret0 +} + +// CompleteInstallation indicates an expected call of CompleteInstallation +func (mr *MockAPIMockRecorder) CompleteInstallation(ctx, c, successfullyFinished, reason interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteInstallation", reflect.TypeOf((*MockAPI)(nil).CompleteInstallation), ctx, c, successfullyFinished, reason) +} diff --git a/internal/cluster/prepare.go b/internal/cluster/prepare.go new file mode 100644 index 000000000..9c13cfcd4 --- /dev/null +++ b/internal/cluster/prepare.go @@ -0,0 +1,43 @@ +package cluster + +import ( + "context" + "time" + + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/models" + logutil "github.com/filanov/bm-inventory/pkg/log" + "github.com/go-openapi/swag" + "github.com/jinzhu/gorm" + "github.com/sirupsen/logrus" +) + +type PrepareConfig struct { + InstallationTimeout time.Duration `envconfig:"PREPARE_FOR_INSTALLATION_TIMEOUT" default:"10m"` +} + +type prepare struct { + baseState + PrepareConfig +} + +var _ StateAPI = (*prepare)(nil) + +func NewPrepareForInstallation(cfg PrepareConfig, log logrus.FieldLogger, db *gorm.DB) *prepare { + return &prepare{ + baseState: baseState{ + log: log, + db: db, + }, + PrepareConfig: cfg, + } +} + +func (p *prepare) RefreshStatus(ctx context.Context, c *common.Cluster, _ *gorm.DB) (*common.Cluster, error) { + // can happen if the service was rebooted or somehow the async part crashed. + if time.Since(time.Time(c.StatusUpdatedAt)) > p.InstallationTimeout { + return updateClusterStatus(logutil.FromContext(ctx, p.log), p.db, + *c.ID, swag.StringValue(c.Status), models.ClusterStatusError, statusInfoPreparingForInstallationTimeout) + } + return c, nil +} diff --git a/internal/cluster/prepare_test.go b/internal/cluster/prepare_test.go new file mode 100644 index 000000000..2ae600e43 --- /dev/null +++ b/internal/cluster/prepare_test.go @@ -0,0 +1,65 @@ +package cluster + +import ( + "context" + "time" + + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/models" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/google/uuid" + "github.com/jinzhu/gorm" + "github.com/kelseyhightower/envconfig" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("prepare-for-installation refresh status", func() { + var ( + ctx = context.Background() + capi API + db *gorm.DB + clusterId strfmt.UUID + cl common.Cluster + dbName = "cluster_prepare_for_installation" + ) + BeforeEach(func() { + db = common.PrepareTestDB(dbName) + cfg := Config{} + Expect(envconfig.Process("myapp", &cfg)).NotTo(HaveOccurred()) + capi = NewManager(cfg, getTestLog(), db, nil, nil, nil) + clusterId = strfmt.UUID(uuid.New().String()) + cl = common.Cluster{ + Cluster: models.Cluster{ + ID: &clusterId, + Status: swag.String(models.ClusterStatusPreparingForInstallation), + StatusUpdatedAt: strfmt.DateTime(time.Now()), + }, + } + Expect(db.Create(&cl).Error).NotTo(HaveOccurred()) + }) + + It("no change", func() { + Expect(db.Take(&cl, "id = ?", clusterId).Error).NotTo(HaveOccurred()) + refreshedCluster, err := capi.RefreshStatus(ctx, &cl, db) + Expect(err).NotTo(HaveOccurred()) + Expect(*refreshedCluster.Status).To(Equal(models.ClusterStatusPreparingForInstallation)) + }) + + It("timeout", func() { + Expect(db.Model(&cl).Update("status_updated_at", strfmt.DateTime(time.Now().Add(-15*time.Minute))).Error). + NotTo(HaveOccurred()) + refreshedCluster, err := capi.RefreshStatus(ctx, &cl, db) + Expect(err).NotTo(HaveOccurred()) + Expect(swag.StringValue(refreshedCluster.Status)).To(Equal(models.ClusterStatusError)) + }) + + AfterEach(func() { + db.Close() + }) + + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) +}) diff --git a/internal/cluster/ready.go b/internal/cluster/ready.go index 12b75e469..98f59b518 100644 --- a/internal/cluster/ready.go +++ b/internal/cluster/ready.go @@ -3,14 +3,12 @@ package cluster import ( "context" - "github.com/sirupsen/logrus" - + "github.com/filanov/bm-inventory/internal/common" intenralhost "github.com/filanov/bm-inventory/internal/host" - "github.com/filanov/bm-inventory/models" logutil "github.com/filanov/bm-inventory/pkg/log" - + "github.com/go-openapi/swag" "github.com/jinzhu/gorm" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) func NewReadyState(log logrus.FieldLogger, db *gorm.DB) *readyState { @@ -24,33 +22,26 @@ type readyState baseState var _ StateAPI = (*Manager)(nil) -func (r *readyState) RefreshStatus(ctx context.Context, c *models.Cluster, db *gorm.DB) (*UpdateReply, error) { +func (r *readyState) RefreshStatus(ctx context.Context, c *common.Cluster, db *gorm.DB) (*common.Cluster, error) { log := logutil.FromContext(ctx, r.log) - if err := db.Preload("Hosts").First(&c, "id = ?", c.ID).Error; err != nil { - return &UpdateReply{ - State: clusterStatusInsufficient, - IsChanged: false}, errors.Errorf("cluster %s not found", c.ID) - } mappedMastersByRole := mapMasterHostsByStatus(c) // Installation has started mastersInInstalling := mappedMastersByRole[intenralhost.HostStatusInstalling] mastersInInstallingInProgress := mappedMastersByRole[intenralhost.HostStatusInstallingInProgress] if len(mastersInInstalling) > 0 || len(mastersInInstallingInProgress) > 0 { - return &UpdateReply{State: clusterStatusReady, - IsChanged: false}, nil + return c, nil } // Cluster is insufficient mastersInKnown := mappedMastersByRole[intenralhost.HostStatusKnown] - if len(mastersInKnown) < minHostsNeededForInstallation { - log.Infof("Cluster %s dos not have at least %d known master hosts, cluster is insufficient.", c.ID, minHostsNeededForInstallation) - return updateState(clusterStatusInsufficient, statusInfoInsufficient, c, db, log) + if len(mastersInKnown) != minHostsNeededForInstallation { + log.Infof("Cluster %s dos not have exactly %d known master hosts, cluster is insufficient.", c.ID, minHostsNeededForInstallation) + return updateClusterStatus(log, db, *c.ID, swag.StringValue(c.Status), clusterStatusInsufficient, statusInfoInsufficient) //cluster is still ready } else { - return &UpdateReply{State: clusterStatusReady, - IsChanged: false}, nil + return c, nil } } diff --git a/internal/cluster/ready_test.go b/internal/cluster/ready_test.go index d06f7d7ea..a033dfa2f 100644 --- a/internal/cluster/ready_test.go +++ b/internal/cluster/ready_test.go @@ -3,6 +3,7 @@ package cluster import ( context "context" + "github.com/filanov/bm-inventory/internal/common" "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -14,24 +15,23 @@ import ( var _ = Describe("ready_state", func() { var ( - ctx = context.Background() - state API - db *gorm.DB - id strfmt.UUID - updateReply *UpdateReply - updateErr error - cluster models.Cluster + ctx = context.Background() + state API + db *gorm.DB + id strfmt.UUID + cluster common.Cluster + dbName = "cluster_ready_state" ) BeforeEach(func() { - db = prepareDB() - state = &Manager{ready: NewReadyState(getTestLog(), db)} + db = common.PrepareTestDB(dbName) + state = &Manager{log: getTestLog(), ready: NewReadyState(getTestLog(), db)} id = strfmt.UUID(uuid.New().String()) - cluster = models.Cluster{ + cluster = common.Cluster{Cluster: models.Cluster{ ID: &id, Status: swag.String(clusterStatusReady), - } + }} Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred()) addInstallationRequirements(id, db) @@ -41,37 +41,24 @@ var _ = Describe("ready_state", func() { }) Context("refresh_state", func() { - It("cluster is satisfying the install requirements", func() { - updateReply, updateErr = state.RefreshStatus(ctx, &cluster, db) + clusterAfterRefresh, updateErr := state.RefreshStatus(ctx, &cluster, db) Expect(updateErr).Should(BeNil()) - Expect(updateReply.State).Should(Equal(clusterStatusReady)) - Expect(updateReply.IsChanged).Should(Equal(false)) - - cluster = geCluster(*cluster.ID, db) - Expect(swag.StringValue(cluster.Status)).Should(Equal(clusterStatusReady)) + Expect(*clusterAfterRefresh.Status).Should(Equal(clusterStatusReady)) }) It("cluster is not satisfying the install requirements", func() { Expect(db.Where("cluster_id = ?", cluster.ID).Delete(&models.Host{}).Error).NotTo(HaveOccurred()) cluster = geCluster(*cluster.ID, db) - updateReply, updateErr = state.RefreshStatus(ctx, &cluster, db) + clusterAfterRefresh, updateErr := state.RefreshStatus(ctx, &cluster, db) Expect(updateErr).Should(BeNil()) - Expect(updateReply.State).Should(Equal(clusterStatusInsufficient)) - Expect(updateReply.IsChanged).Should(Equal(true)) - - cluster = geCluster(*cluster.ID, db) - Expect(swag.StringValue(cluster.Status)).Should(Equal(clusterStatusInsufficient)) - + Expect(*clusterAfterRefresh.Status).Should(Equal(clusterStatusInsufficient)) }) }) - AfterEach(func() { - db.Close() - updateReply = nil - updateErr = nil + common.DeleteTestDB(db, dbName) }) }) diff --git a/internal/cluster/registrar.go b/internal/cluster/registrar.go index 4f7f75307..51dd430f3 100644 --- a/internal/cluster/registrar.go +++ b/internal/cluster/registrar.go @@ -2,11 +2,14 @@ package cluster import ( context "context" + "time" "github.com/pkg/errors" + "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" + "github.com/filanov/bm-inventory/internal/common" "github.com/filanov/bm-inventory/models" "github.com/jinzhu/gorm" "github.com/sirupsen/logrus" @@ -24,9 +27,10 @@ type registrar struct { db *gorm.DB } -func (r *registrar) RegisterCluster(ctx context.Context, cluster *models.Cluster) error { +func (r *registrar) RegisterCluster(ctx context.Context, cluster *common.Cluster) error { cluster.Status = swag.String(clusterStatusInsufficient) cluster.StatusInfo = swag.String(statusInfoInsufficient) + cluster.StatusUpdatedAt = strfmt.DateTime(time.Now()) tx := r.db.Begin() defer func() { if rec := recover(); rec != nil { @@ -52,7 +56,7 @@ func (r *registrar) RegisterCluster(ctx context.Context, cluster *models.Cluster return nil } -func (r *registrar) DeregisterCluster(ctx context.Context, cluster *models.Cluster) error { +func (r *registrar) DeregisterCluster(ctx context.Context, cluster *common.Cluster) error { var txErr error tx := r.db.Begin() diff --git a/internal/cluster/registrar_test.go b/internal/cluster/registrar_test.go index b48da06b9..251ef6c84 100644 --- a/internal/cluster/registrar_test.go +++ b/internal/cluster/registrar_test.go @@ -3,6 +3,7 @@ package cluster import ( context "context" + "github.com/filanov/bm-inventory/internal/common" "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -19,19 +20,20 @@ var _ = Describe("registrar", func() { db *gorm.DB id strfmt.UUID updateErr error - cluster models.Cluster + cluster common.Cluster host models.Host + dbName = "registar" ) BeforeEach(func() { - db = prepareDB() + db = common.PrepareTestDB(dbName) registerManager = NewRegistrar(getTestLog(), db) id = strfmt.UUID(uuid.New().String()) - cluster = models.Cluster{ + cluster = common.Cluster{Cluster: models.Cluster{ ID: &id, Status: swag.String(clusterStatusInsufficient), - } + }} //register cluster updateErr = registerManager.RegisterCluster(ctx, &cluster) @@ -78,7 +80,7 @@ var _ = Describe("registrar", func() { }) AfterEach(func() { - db.Close() + common.DeleteTestDB(db, dbName) updateErr = nil }) }) diff --git a/internal/cluster/statecluster.go b/internal/cluster/statecluster.go new file mode 100644 index 000000000..87f8285d6 --- /dev/null +++ b/internal/cluster/statecluster.go @@ -0,0 +1,28 @@ +package cluster + +import ( + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/stateswitch" + "github.com/go-openapi/swag" +) + +type stateCluster struct { + srcState string + cluster *common.Cluster +} + +func newStateCluster(c *common.Cluster) *stateCluster { + return &stateCluster{ + srcState: swag.StringValue(c.Status), + cluster: c, + } +} + +func (sh *stateCluster) State() stateswitch.State { + return stateswitch.State(swag.StringValue(sh.cluster.Status)) +} + +func (sh *stateCluster) SetState(state stateswitch.State) error { + sh.cluster.Status = swag.String(string(state)) + return nil +} diff --git a/internal/cluster/statemachine.go b/internal/cluster/statemachine.go new file mode 100644 index 000000000..806dc16b4 --- /dev/null +++ b/internal/cluster/statemachine.go @@ -0,0 +1,86 @@ +package cluster + +import ( + "github.com/filanov/bm-inventory/models" + "github.com/filanov/stateswitch" +) + +const ( + TransitionTypeCancelInstallation = "CancelInstallation" + TransitionTypeResetCluster = "ResetCluster" + TransitionTypePrepareForInstallation = "PrepareForInstallation" + TransitionTypeCompleteInstallation = "CompleteInstallation" + TransitionTypeHandlePreInstallationError = "Handle pre-installation-error" +) + +func NewClusterStateMachine(th *transitionHandler) stateswitch.StateMachine { + sm := stateswitch.NewStateMachine() + + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeCancelInstallation, + SourceStates: []stateswitch.State{ + stateswitch.State(models.ClusterStatusPreparingForInstallation), + stateswitch.State(models.ClusterStatusInstalling), + stateswitch.State(models.ClusterStatusError), + }, + DestinationState: stateswitch.State(models.ClusterStatusError), + PostTransition: th.PostCancelInstallation, + }) + + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeResetCluster, + SourceStates: []stateswitch.State{ + stateswitch.State(models.ClusterStatusPreparingForInstallation), + stateswitch.State(models.ClusterStatusInstalling), + stateswitch.State(models.ClusterStatusError), + }, + DestinationState: stateswitch.State(models.ClusterStatusInsufficient), + PostTransition: th.PostResetCluster, + }) + + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypePrepareForInstallation, + SourceStates: []stateswitch.State{ + stateswitch.State(models.ClusterStatusReady), + }, + DestinationState: stateswitch.State(models.ClusterStatusPreparingForInstallation), + PostTransition: th.PostPrepareForInstallation, + }) + + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeCompleteInstallation, + Condition: th.isSuccess, + Transition: func(stateSwitch stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + params, _ := args.(*TransitionArgsCompleteInstallation) + params.reason = statusInfoInstalled + return nil + }, + SourceStates: []stateswitch.State{ + stateswitch.State(models.ClusterStatusFinalizing), + }, + DestinationState: clusterStatusInstalled, + PostTransition: th.PostCompleteInstallation, + }) + + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeCompleteInstallation, + Condition: th.notSuccess, + SourceStates: []stateswitch.State{ + stateswitch.State(models.ClusterStatusFinalizing), + }, + DestinationState: clusterStatusError, + PostTransition: th.PostCompleteInstallation, + }) + + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeHandlePreInstallationError, + SourceStates: []stateswitch.State{ + stateswitch.State(models.ClusterStatusPreparingForInstallation), + stateswitch.State(models.ClusterStatusError), + }, + DestinationState: stateswitch.State(models.ClusterStatusError), + PostTransition: th.PostHandlePreInstallationError, + }) + + return sm +} diff --git a/internal/cluster/transition.go b/internal/cluster/transition.go new file mode 100644 index 000000000..1dcbbb669 --- /dev/null +++ b/internal/cluster/transition.go @@ -0,0 +1,155 @@ +package cluster + +import ( + "context" + "time" + + logutil "github.com/filanov/bm-inventory/pkg/log" + "github.com/filanov/stateswitch" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/jinzhu/gorm" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type transitionHandler struct { + log logrus.FieldLogger + db *gorm.DB +} + +//////////////////////////////////////////////////////////////////////////// +// CancelInstallation +//////////////////////////////////////////////////////////////////////////// + +type TransitionArgsCancelInstallation struct { + ctx context.Context + reason string + db *gorm.DB +} + +func (th *transitionHandler) PostCancelInstallation(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sCluster, ok := sw.(*stateCluster) + if !ok { + return errors.New("PostCancelInstallation incompatible type of StateSwitch") + } + params, ok := args.(*TransitionArgsCancelInstallation) + if !ok { + return errors.New("PostCancelInstallation invalid argument") + } + if sCluster.srcState == clusterStatusError { + return nil + } + + return th.updateTransitionCluster(logutil.FromContext(params.ctx, th.log), params.db, sCluster, + params.reason) +} + +//////////////////////////////////////////////////////////////////////////// +// ResetCluster +//////////////////////////////////////////////////////////////////////////// + +type TransitionArgsResetCluster struct { + ctx context.Context + reason string + db *gorm.DB +} + +func (th *transitionHandler) PostResetCluster(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sCluster, ok := sw.(*stateCluster) + if !ok { + return errors.New("PostResetCluster incompatible type of StateSwitch") + } + params, ok := args.(*TransitionArgsResetCluster) + if !ok { + return errors.New("PostResetCluster invalid argument") + } + + return th.updateTransitionCluster(logutil.FromContext(params.ctx, th.log), params.db, sCluster, + params.reason) +} + +//////////////////////////////////////////////////////////////////////////// +// Prepare for installation +//////////////////////////////////////////////////////////////////////////// + +type TransitionArgsPrepareForInstallation struct { + ctx context.Context + db *gorm.DB +} + +func (th *transitionHandler) PostPrepareForInstallation(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sCluster, ok := sw.(*stateCluster) + if !ok { + return errors.New("PostResetCluster incompatible type of StateSwitch") + } + params, ok := args.(*TransitionArgsPrepareForInstallation) + if !ok { + return errors.New("PostResetCluster invalid argument") + } + + return th.updateTransitionCluster(logutil.FromContext(params.ctx, th.log), th.db, sCluster, + statusInfoPreparingForInstallation, "install_started_at", strfmt.DateTime(time.Now())) +} + +//////////////////////////////////////////////////////////////////////////// +// Complete installation +//////////////////////////////////////////////////////////////////////////// + +type TransitionArgsCompleteInstallation struct { + ctx context.Context + isSuccess bool + reason string +} + +func (th *transitionHandler) PostCompleteInstallation(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sCluster, ok := sw.(*stateCluster) + if !ok { + return errors.New("PostCompleteInstallation incompatible type of StateSwitch") + } + params, ok := args.(*TransitionArgsCompleteInstallation) + if !ok { + return errors.New("PostCompleteInstallation invalid argument") + } + + return th.updateTransitionCluster(logutil.FromContext(params.ctx, th.log), th.db, sCluster, + params.reason, "install_completed_at", strfmt.DateTime(time.Now())) +} + +func (th *transitionHandler) isSuccess(stateSwitch stateswitch.StateSwitch, args stateswitch.TransitionArgs) (b bool, err error) { + params, _ := args.(*TransitionArgsCompleteInstallation) + return params.isSuccess, nil +} + +func (th *transitionHandler) notSuccess(stateSwitch stateswitch.StateSwitch, args stateswitch.TransitionArgs) (b bool, err error) { + params, _ := args.(*TransitionArgsCompleteInstallation) + return !params.isSuccess, nil +} + +//////////////////////////////////////////////////////////////////////////// +// Handle pre-installation error +//////////////////////////////////////////////////////////////////////////// + +type TransitionArgsHandlePreInstallationError struct { + ctx context.Context + installErr error +} + +func (th *transitionHandler) PostHandlePreInstallationError(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sCluster, _ := sw.(*stateCluster) + params, _ := args.(*TransitionArgsHandlePreInstallationError) + return th.updateTransitionCluster(logutil.FromContext(params.ctx, th.log), th.db, sCluster, + params.installErr.Error()) +} + +func (th *transitionHandler) updateTransitionCluster(log logrus.FieldLogger, db *gorm.DB, state *stateCluster, + statusInfo string, extra ...interface{}) error { + + if cluster, err := updateClusterStatus(log, db, *state.cluster.ID, state.srcState, + swag.StringValue(state.cluster.Status), statusInfo, extra...); err != nil { + return err + } else { + state.cluster = cluster + return nil + } +} diff --git a/internal/cluster/transition_test.go b/internal/cluster/transition_test.go new file mode 100644 index 000000000..574dcecd4 --- /dev/null +++ b/internal/cluster/transition_test.go @@ -0,0 +1,278 @@ +package cluster + +import ( + "context" + "fmt" + "net/http" + + "github.com/filanov/bm-inventory/internal/metrics" + "github.com/golang/mock/gomock" + "github.com/sirupsen/logrus" + + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/internal/events" + "github.com/filanov/bm-inventory/models" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/google/uuid" + "github.com/jinzhu/gorm" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Transition tests", func() { + var ( + ctx = context.Background() + capi API + db *gorm.DB + clusterId strfmt.UUID + eventsHandler events.Handler + ctrl *gomock.Controller + mockMetric *metrics.MockAPI + dbName = "cluster_transition_test" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + eventsHandler = events.New(db, logrus.New()) + ctrl = gomock.NewController(GinkgoT()) + mockMetric = metrics.NewMockAPI(ctrl) + capi = NewManager(defaultTestConfig, getTestLog(), db, eventsHandler, nil, mockMetric) + clusterId = strfmt.UUID(uuid.New().String()) + }) + + Context("cancel_installation", func() { + It("cancel_installation", func() { + c := common.Cluster{ + Cluster: models.Cluster{ID: &clusterId, Status: swag.String(clusterStatusInstalling)}, + } + Expect(db.Create(&c).Error).ShouldNot(HaveOccurred()) + mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), "canceled", c.OpenshiftVersion, c.InstallStartedAt) + Expect(capi.CancelInstallation(ctx, &c, "", db)).ShouldNot(HaveOccurred()) + + Expect(db.First(&c, "id = ?", c.ID).Error).ShouldNot(HaveOccurred()) + Expect(swag.StringValue(c.Status)).Should(Equal(clusterStatusError)) + }) + + It("cancel_installation_conflict", func() { + c := common.Cluster{ + Cluster: models.Cluster{ID: &clusterId, Status: swag.String(clusterStatusInsufficient)}, + } + Expect(db.Create(&c).Error).ShouldNot(HaveOccurred()) + mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), "canceled", c.OpenshiftVersion, c.InstallStartedAt) + replay := capi.CancelInstallation(ctx, &c, "", db) + Expect(replay).Should(HaveOccurred()) + Expect(int(replay.StatusCode())).Should(Equal(http.StatusConflict)) + + Expect(db.First(&c, "id = ?", c.ID).Error).ShouldNot(HaveOccurred()) + Expect(swag.StringValue(c.Status)).Should(Equal(clusterStatusInsufficient)) + }) + + It("cancel_failed_installation", func() { + c := common.Cluster{ + Cluster: models.Cluster{ + ID: &clusterId, + StatusInfo: swag.String("original error"), + Status: swag.String(clusterStatusError)}, + } + Expect(db.Create(&c).Error).ShouldNot(HaveOccurred()) + mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), "canceled", c.OpenshiftVersion, c.InstallStartedAt) + Expect(capi.CancelInstallation(ctx, &c, "", db)).ShouldNot(HaveOccurred()) + + Expect(db.First(&c, "id = ?", c.ID).Error).ShouldNot(HaveOccurred()) + Expect(swag.StringValue(c.Status)).Should(Equal(clusterStatusError)) + Expect(swag.StringValue(c.StatusInfo)).Should(Equal("original error")) + }) + }) + Context("cancel_installation", func() { + It("complete installation success", func() { + c := common.Cluster{ + Cluster: models.Cluster{ID: &clusterId, Status: swag.String(models.ClusterStatusFinalizing)}, + } + Expect(db.Create(&c).Error).ShouldNot(HaveOccurred()) + mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), models.ClusterStatusInstalled, c.OpenshiftVersion, c.InstallStartedAt) + Expect(capi.CompleteInstallation(ctx, &c, true, clusterStatusInstalled)).ShouldNot(HaveOccurred()) + + Expect(db.First(&c, "id = ?", c.ID).Error).ShouldNot(HaveOccurred()) + Expect(swag.StringValue(c.Status)).Should(Equal(clusterStatusInstalled)) + }) + + It("complete installation failed", func() { + c := common.Cluster{ + Cluster: models.Cluster{ID: &clusterId, Status: swag.String(models.ClusterStatusFinalizing)}, + } + Expect(db.Create(&c).Error).ShouldNot(HaveOccurred()) + mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), models.ClusterStatusError, c.OpenshiftVersion, c.InstallStartedAt) + Expect(capi.CompleteInstallation(ctx, &c, false, "aaaa")).ShouldNot(HaveOccurred()) + + Expect(db.First(&c, "id = ?", c.ID).Error).ShouldNot(HaveOccurred()) + Expect(swag.StringValue(c.Status)).Should(Equal(clusterStatusError)) + Expect(swag.StringValue(c.StatusInfo)).Should(Equal("aaaa")) + + }) + + It("complete_installation_conflict", func() { + c := common.Cluster{ + Cluster: models.Cluster{ID: &clusterId, Status: swag.String(clusterStatusInstalling)}, + } + Expect(db.Create(&c).Error).ShouldNot(HaveOccurred()) + mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), models.ClusterStatusInstalled, c.OpenshiftVersion, c.InstallStartedAt) + replay := capi.CompleteInstallation(ctx, &c, true, "") + Expect(replay).Should(HaveOccurred()) + Expect(int(replay.StatusCode())).Should(Equal(http.StatusConflict)) + + Expect(db.First(&c, "id = ?", c.ID).Error).ShouldNot(HaveOccurred()) + Expect(swag.StringValue(c.Status)).Should(Equal(clusterStatusInstalling)) + }) + + It("complete_installation_conflict_failed", func() { + c := common.Cluster{ + Cluster: models.Cluster{ID: &clusterId, Status: swag.String(clusterStatusInstalling)}, + } + Expect(db.Create(&c).Error).ShouldNot(HaveOccurred()) + mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), models.ClusterStatusError, c.OpenshiftVersion, c.InstallStartedAt) + replay := capi.CompleteInstallation(ctx, &c, false, "") + Expect(replay).Should(HaveOccurred()) + Expect(int(replay.StatusCode())).Should(Equal(http.StatusConflict)) + + Expect(db.First(&c, "id = ?", c.ID).Error).ShouldNot(HaveOccurred()) + Expect(swag.StringValue(c.Status)).Should(Equal(clusterStatusInstalling)) + }) + }) + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) +}) + +var _ = Describe("Cancel cluster installation", func() { + var ( + ctx = context.Background() + dbName = "cancel_cluster_installation_test" + capi API + db *gorm.DB + ctrl *gomock.Controller + mockEventsHandler *events.MockHandler + mockMetric *metrics.MockAPI + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + ctrl = gomock.NewController(GinkgoT()) + mockEventsHandler = events.NewMockHandler(ctrl) + mockMetric = metrics.NewMockAPI(ctrl) + capi = NewManager(defaultTestConfig, getTestLog(), db, mockEventsHandler, nil, mockMetric) + }) + + acceptNewEvents := func(times int) { + mockEventsHandler.EXPECT().AddEvent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(times) + } + + acceptClusterInstallationFinished := func(times int) { + mockMetric.EXPECT().ClusterInstallationFinished(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(times) + } + + tests := []struct { + state string + success bool + statusCode int32 + }{ + {state: models.ClusterStatusPreparingForInstallation, success: true}, + {state: models.ClusterStatusInstalling, success: true}, + {state: models.ClusterStatusError, success: true}, + {state: models.ClusterStatusInsufficient, success: false, statusCode: http.StatusConflict}, + {state: models.ClusterStatusReady, success: false, statusCode: http.StatusConflict}, + {state: models.ClusterStatusFinalizing, success: false, statusCode: http.StatusConflict}, + {state: models.ClusterStatusInstalled, success: false, statusCode: http.StatusConflict}, + } + + for _, t := range tests { + It(fmt.Sprintf("cancel from state %s", t.state), func() { + clusterId := strfmt.UUID(uuid.New().String()) + cluster := common.Cluster{ + Cluster: models.Cluster{ID: &clusterId, Status: swag.String(t.state)}, + } + Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred()) + eventsNum := 1 + if t.success { + eventsNum++ + acceptClusterInstallationFinished(1) + } + acceptNewEvents(eventsNum) + err := capi.CancelInstallation(ctx, &cluster, "reason", db) + if t.success { + Expect(err).ShouldNot(HaveOccurred()) + } else { + Expect(err).Should(HaveOccurred()) + Expect(err.StatusCode()).Should(Equal(t.statusCode)) + } + }) + } + + AfterEach(func() { + ctrl.Finish() + common.DeleteTestDB(db, dbName) + }) +}) + +var _ = Describe("Reset cluster", func() { + var ( + ctx = context.Background() + dbName = "reset_cluster_test" + capi API + db *gorm.DB + ctrl *gomock.Controller + mockEventsHandler *events.MockHandler + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + ctrl = gomock.NewController(GinkgoT()) + mockEventsHandler = events.NewMockHandler(ctrl) + capi = NewManager(defaultTestConfig, getTestLog(), db, mockEventsHandler, nil, nil) + }) + + acceptNewEvents := func(times int) { + mockEventsHandler.EXPECT().AddEvent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(times) + } + + tests := []struct { + state string + success bool + statusCode int32 + }{ + {state: models.ClusterStatusPreparingForInstallation, success: true}, + {state: models.ClusterStatusInstalling, success: true}, + {state: models.ClusterStatusError, success: true}, + {state: models.ClusterStatusInsufficient, success: false, statusCode: http.StatusConflict}, + {state: models.ClusterStatusReady, success: false, statusCode: http.StatusConflict}, + {state: models.ClusterStatusFinalizing, success: false, statusCode: http.StatusConflict}, + {state: models.ClusterStatusInstalled, success: false, statusCode: http.StatusConflict}, + } + + for _, t := range tests { + It(fmt.Sprintf("reset from state %s", t.state), func() { + clusterId := strfmt.UUID(uuid.New().String()) + cluster := common.Cluster{ + Cluster: models.Cluster{ID: &clusterId, Status: swag.String(t.state)}, + } + Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred()) + eventsNum := 1 + if t.success { + eventsNum++ + } + acceptNewEvents(eventsNum) + err := capi.ResetCluster(ctx, &cluster, "reason", db) + if t.success { + Expect(err).ShouldNot(HaveOccurred()) + } else { + Expect(err).Should(HaveOccurred()) + Expect(err.StatusCode()).Should(Equal(t.statusCode)) + } + }) + } + + AfterEach(func() { + ctrl.Finish() + common.DeleteTestDB(db, dbName) + }) +}) diff --git a/internal/cluster/validations/validation_test.go b/internal/cluster/validations/validation_test.go new file mode 100644 index 000000000..b0fac9fc7 --- /dev/null +++ b/internal/cluster/validations/validation_test.go @@ -0,0 +1,161 @@ +package validations + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go/service/route53/route53iface" + "github.com/danielerez/go-dns-client/pkg/dnsproviders" + _ "github.com/jinzhu/gorm/dialects/postgres" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +// #nosec +const ( + validSecretFormat = "{\"auths\":{\"cloud.openshift.com\":{\"auth\":\"dXNlcjpwYXNzd29yZAo=\",\"email\":\"r@r.com\"},\"quay.io\":{\"auth\":\"dXNlcjpwYXNzd29yZAo=\",\"email\":\"r@r.com\"},\"registry.connect.redhat.com\":{\"auth\":\"dXNlcjpwYXNzd29yZAo=\",\"email\":\"r@r.com\"},\"registry.redhat.io\":{\"auth\":\"dXNlcjpwYXNzd29yZAo=\",\"email\":\"r@r.com\"}}}" + invalidAuthFormat = "{\"auths\":{\"cloud.openshift.com\":{\"auth\":\"dXNlcjpwYXNzd29yZAo=\",\"email\":\"r@r.com\"},\"quay.io\":{\"auth\":\"dXNlcjpwYXNzd29yZAo=\",\"email\":\"r@r.com\"},\"registry.connect.redhat.com\":{\"auth\":\"dXNlcjpwYXNzd29yZAo=\",\"email\":\"r@r.com\"},\"registry.redhat.io\":{\"auth\":\"afsdfasf==\",\"email\":\"r@r.com\"}}}" + invalidSecretFormat = "{\"auths\":{\"cloud.openshift.com\":{\"key\":\"abcdef=\",\"email\":\"r@r.com\"},\"quay.io\":{\"auth\":\"adasfsdf=\",\"email\":\"r@r.com\"},\"registry.connect.redhat.com\":{\"auth\":\"tatastata==\",\"email\":\"r@r.com\"},\"registry.redhat.io\":{\"auth\":\"afsdfasf==\",\"email\":\"r@r.com\"}}}" +) + +var _ = Describe("Pull secret validation", func() { + + Context("test secret format", func() { + It("valid format", func() { + err := ValidatePullSecret(validSecretFormat) + Expect(err).Should(BeNil()) + }) + It("invalid format for the auth", func() { + err := ValidatePullSecret(invalidAuthFormat) + Expect(err).ShouldNot(BeNil()) + }) + It("invalid format", func() { + err := ValidatePullSecret(invalidSecretFormat) + Expect(err).ShouldNot(BeNil()) + }) + }) + +}) + +type mockRoute53Client struct { + route53iface.Route53API +} + +func (m *mockRoute53Client) ListResourceRecordSets(*route53.ListResourceRecordSetsInput) (*route53.ListResourceRecordSetsOutput, error) { + var output = route53.ListResourceRecordSetsOutput{ + ResourceRecordSets: []*route53.ResourceRecordSet{ + { + Name: aws.String("api.test.example.com."), + Type: aws.String("A"), + }, + { + Name: aws.String("*.apps.test.example.com."), + Type: aws.String("A"), + }, + }, + } + return &output, nil +} + +func (m *mockRoute53Client) GetHostedZone(*route53.GetHostedZoneInput) (*route53.GetHostedZoneOutput, error) { + var output = route53.GetHostedZoneOutput{ + HostedZone: &route53.HostedZone{ + Name: aws.String("test.example.com"), + }, + } + return &output, nil +} + +var _ = Describe("DNS Records validation", func() { + var dnsProvider dnsproviders.Route53 + + BeforeEach(func() { + mockSvc := &mockRoute53Client{} + dnsProvider = dnsproviders.Route53{ + RecordSet: dnsproviders.RecordSet{ + RecordSetType: "A", + TTL: 60, + }, + HostedZoneID: "abc", + } + dnsProvider.SVC = mockSvc + }) + + It("validation success", func() { + names := []string{"api.test2.example.com", "*.apps.test2.example.com"} + err := checkDNSRecordsExistence(names, dnsProvider) + Expect(err).ShouldNot(HaveOccurred()) + }) + It("validation failure - both names already exist", func() { + names := []string{"api.test.example.com", "*.apps.test.example.com"} + err := checkDNSRecordsExistence(names, dnsProvider) + Expect(err).Should(HaveOccurred()) + }) + It("validation failure - one name already exist", func() { + names := []string{"api.test.example.com", "*.apps.test2.example.com"} + err := checkDNSRecordsExistence(names, dnsProvider) + Expect(err).Should(HaveOccurred()) + }) +}) + +var _ = Describe("Base DNS validation", func() { + var dnsProvider dnsproviders.Route53 + + BeforeEach(func() { + mockSvc := &mockRoute53Client{} + dnsProvider = dnsproviders.Route53{ + HostedZoneID: "abc", + } + dnsProvider.SVC = mockSvc + }) + + It("validation success", func() { + err := validateBaseDNS("test.example.com", "abc", dnsProvider) + Expect(err).ShouldNot(HaveOccurred()) + }) + It("validation success - trailing dots", func() { + err := validateBaseDNS("test.example.com.", "abc", dnsProvider) + Expect(err).ShouldNot(HaveOccurred()) + }) + It("validation failure - invalid domain", func() { + err := validateBaseDNS("test2.example.com", "abc", dnsProvider) + Expect(err).Should(HaveOccurred()) + }) + It("validation success - valid subdomain", func() { + err := validateBaseDNS("abc.test.example.com", "abc", dnsProvider) + Expect(err).ShouldNot(HaveOccurred()) + }) + It("validation failure - invalid subdomain", func() { + err := validateBaseDNS("abc.deftest.example.com", "abc", dnsProvider) + Expect(err).Should(HaveOccurred()) + }) +}) + +var _ = Describe("Cluster name validation", func() { + It("success", func() { + err := ValidateClusterNameFormat("test-1") + Expect(err).ShouldNot(HaveOccurred()) + }) + It("invalid format - special character", func() { + err := ValidateClusterNameFormat("test!") + Expect(err).Should(HaveOccurred()) + }) + It("invalid format - capital letter", func() { + err := ValidateClusterNameFormat("testA") + Expect(err).Should(HaveOccurred()) + }) + It("invalid format - starts with number", func() { + err := ValidateClusterNameFormat("1test") + Expect(err).Should(HaveOccurred()) + }) + It("invalid format - ends with hyphen", func() { + err := ValidateClusterNameFormat("test-") + Expect(err).Should(HaveOccurred()) + }) +}) + +func TestCluster(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "cluster validations tests") +} diff --git a/internal/cluster/validations/validations.go b/internal/cluster/validations/validations.go new file mode 100644 index 000000000..d1111b35b --- /dev/null +++ b/internal/cluster/validations/validations.go @@ -0,0 +1,169 @@ +package validations + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/danielerez/go-dns-client/pkg/dnsproviders" +) + +const clusterNameRegex = "^([a-z]([-a-z0-9]*[a-z0-9])?)*$" + +type imagePullSecret struct { + Auths map[string]map[string]interface{} `json:"auths"` +} + +type PullSecretCreds struct { + Username string + Password string + Registry string + AuthRaw string +} + +func ParsePullSecret(secret string) (map[string]PullSecretCreds, error) { + result := make(map[string]PullSecretCreds) + var s imagePullSecret + err := json.Unmarshal([]byte(secret), &s) + if err != nil { + return nil, fmt.Errorf("invalid pull secret: %v", err) + } + if len(s.Auths) == 0 { + return nil, fmt.Errorf("invalid pull secret: missing 'auths' JSON-object field") + } + + for d, a := range s.Auths { + _, authPresent := a["auth"] + _, credsStorePresent := a["credsStore"] + if !authPresent && !credsStorePresent { + return nil, fmt.Errorf("invalid pull secret, '%q' JSON-object requires either 'auth' or 'credsStore' field", d) + } + data, err := base64.StdEncoding.DecodeString(a["auth"].(string)) + if err != nil { + return nil, fmt.Errorf("invalid pull secret, 'auth' fiels of '%q' is not base64 decodable", d) + } + res := bytes.Split(data, []byte(":")) + if len(res) != 2 { + return nil, fmt.Errorf("auth for %s has invalid format", d) + } + result[d] = PullSecretCreds{ + Password: string(res[1]), + Username: string(res[0]), + AuthRaw: a["auth"].(string), + Registry: d, + } + + } + return result, nil +} + +/* +const ( + registryCredsToCheck string = "registry.redhat.io" +) +*/ + +func ValidatePullSecret(secret string) error { + _, err := ParsePullSecret(secret) + if err != nil { + return err + } + /* + Actual credentials check is disabled for not until we solve how to do it in tests and subsystem + r, ok := creds[registryCredsToCheck] + if !ok { + return fmt.Errorf("Pull secret does not contain auth for %s", registryCredsToCheck) + } + dc, err := docker.NewEnvClient() + if err != nil { + return err + } + auth := types.AuthConfig{ + ServerAddress: r.Registry, + Username: r.Username, + Password: r.Password, + } + _, err = dc.RegistryLogin(context.Background(), auth) + if err != nil { + return err + } + */ + return nil +} + +// ValidateBaseDNS validates the specified base domain name +func ValidateBaseDNS(dnsDomainName, dnsDomainID, dnsProviderType string) error { + var dnsProvider dnsproviders.Provider + switch dnsProviderType { + case "route53": + dnsProvider = dnsproviders.Route53{ + HostedZoneID: dnsDomainID, + SharedCreds: true, + } + default: + return nil + } + return validateBaseDNS(dnsDomainName, dnsDomainID, dnsProvider) +} + +func validateBaseDNS(dnsDomainName, dnsDomainID string, dnsProvider dnsproviders.Provider) error { + dnsNameFromService, err := dnsProvider.GetDomainName() + if err != nil { + return fmt.Errorf("Can't validate base DNS domain: %v", err) + } + + dnsNameFromCluster := strings.TrimSuffix(dnsDomainName, ".") + if dnsNameFromService == dnsNameFromCluster { + // Valid domain + return nil + } + if matched, _ := regexp.MatchString(".*\\."+dnsNameFromService, dnsNameFromCluster); !matched { + return fmt.Errorf("Domain name isn't correlated properly to DNS service") + } + + return nil +} + +// CheckDNSRecordsExistence checks whether that specified record-set names already exist in the DNS service +func CheckDNSRecordsExistence(names []string, dnsDomainID, dnsProviderType string) error { + var dnsProvider dnsproviders.Provider + switch dnsProviderType { + case "route53": + dnsProvider = dnsproviders.Route53{ + RecordSet: dnsproviders.RecordSet{ + RecordSetType: "A", + }, + HostedZoneID: dnsDomainID, + SharedCreds: true, + } + default: + return nil + } + return checkDNSRecordsExistence(names, dnsProvider) +} + +func checkDNSRecordsExistence(names []string, dnsProvider dnsproviders.Provider) error { + for _, name := range names { + res, err := dnsProvider.GetRecordSet(name) + if err != nil { + return fmt.Errorf("Can't verify DNS record set existence: %v", err) + } + if res != "" { + return fmt.Errorf("DNS domain already exists") + } + } + return nil +} + +// ValidateClusterNameFormat validates specified cluster name format +func ValidateClusterNameFormat(name string) error { + if matched, _ := regexp.MatchString(clusterNameRegex, name); !matched { + return fmt.Errorf("Cluster name format is not valid: '%s'. "+ + "Name must consist of lower-case letters, numbers and hyphens. "+ + "It must start with a letter and end with a letter or number.", name) + } + return nil +} diff --git a/internal/common/common_unitest_db.go b/internal/common/common_unitest_db.go new file mode 100644 index 000000000..8963ae10a --- /dev/null +++ b/internal/common/common_unitest_db.go @@ -0,0 +1,114 @@ +package common + +import ( + "fmt" + "os" + "strings" + + "github.com/jinzhu/gorm" + _ "github.com/jinzhu/gorm/dialects/postgres" + "github.com/ory/dockertest/v3" + + "github.com/filanov/bm-inventory/models" + . "github.com/onsi/gomega" +) + +const ( + dbDockerName = "ut-postgres" + dbDefaultPort = "5432" +) + +type DBContext struct { + resource *dockertest.Resource + pool *dockertest.Pool +} + +func (c *DBContext) GetPort() string { + if c.resource == nil { + return dbDefaultPort + } else { + return c.resource.GetPort(fmt.Sprintf("%s/tcp", dbDefaultPort)) + } +} + +var gDbCtx DBContext = DBContext{ + resource: nil, + pool: nil, +} + +func InitializeDBTest() { + if os.Getenv("SKIP_UT_DB") != "" { + return + } + pool, err := dockertest.NewPool("") + Expect(err).ShouldNot(HaveOccurred()) + + //cleanup any old instances of the DB + if oldResource, isFound := pool.ContainerByName(dbDockerName); isFound { + oldResource.Close() + } + resource, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "postgres", + Tag: "12.3", + Env: []string{"POSTGRES_PASSWORD=admin", "POSTGRES_USER=admin"}, + Name: dbDockerName, + }) + Expect(err).ShouldNot(HaveOccurred()) + + gDbCtx.pool = pool + gDbCtx.resource = resource + + var dbTemp *gorm.DB + err = gDbCtx.pool.Retry(func() error { + var er error + + dbTemp, er = gorm.Open("postgres", fmt.Sprintf("host=127.0.0.1 port=%s user=admin password=admin sslmode=disable", gDbCtx.GetPort())) + return er + }) + Expect(err).ShouldNot(HaveOccurred()) + dbTemp.Close() +} + +func TerminateDBTest() { + if os.Getenv("SKIP_UT_DB") != "" { + return + } + Expect(gDbCtx.pool).ShouldNot(BeNil()) + err := gDbCtx.pool.Purge(gDbCtx.resource) + Expect(err).ShouldNot(HaveOccurred()) + gDbCtx.pool = nil +} + +func PrepareTestDB(dbName string, extrasSchemas ...interface{}) *gorm.DB { + dbTemp, err := gorm.Open("postgres", fmt.Sprintf("host=127.0.0.1 port=%s user=admin password=admin sslmode=disable", gDbCtx.GetPort())) + Expect(err).ShouldNot(HaveOccurred()) + defer dbTemp.Close() + + dbTemp = dbTemp.Exec(fmt.Sprintf("CREATE DATABASE %s;", strings.ToLower(dbName))) + Expect(dbTemp.Error).ShouldNot(HaveOccurred()) + + db, err := gorm.Open("postgres", + fmt.Sprintf("host=127.0.0.1 port=%s dbname=%s user=admin password=admin sslmode=disable", gDbCtx.GetPort(), strings.ToLower(dbName))) + Expect(err).ShouldNot(HaveOccurred()) + // db = db.Debug() + db.AutoMigrate(&models.Host{}, &Cluster{}) + if len(extrasSchemas) > 0 { + for _, schema := range extrasSchemas { + db = db.AutoMigrate(schema) + Expect(db.Error).ShouldNot(HaveOccurred()) + } + } + return db +} + +func DeleteTestDB(db *gorm.DB, dbName string) { + db.Close() + + db, err := gorm.Open("postgres", + fmt.Sprintf("host=127.0.0.1 port=%s user=admin password=admin sslmode=disable", gDbCtx.GetPort())) + Expect(err).ShouldNot(HaveOccurred()) + defer db.Close() + db = db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s;", strings.ToLower(dbName))) + + Expect(db.Error).ShouldNot(HaveOccurred()) +} diff --git a/internal/common/db.go b/internal/common/db.go new file mode 100644 index 000000000..355c03641 --- /dev/null +++ b/internal/common/db.go @@ -0,0 +1,9 @@ +package common + +import "github.com/filanov/bm-inventory/models" + +type Cluster struct { + models.Cluster + // The pull secret that obtained from the Pull Secret page on the Red Hat OpenShift Cluster Manager site. + PullSecret string `json:"pull_secret" gorm:"type:TEXT"` +} diff --git a/internal/common/error_utils.go b/internal/common/error_utils.go index c11a69027..ca4838b64 100644 --- a/internal/common/error_utils.go +++ b/internal/common/error_utils.go @@ -2,6 +2,10 @@ package common import ( "net/http" + "strconv" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" "github.com/filanov/bm-inventory/models" "github.com/go-openapi/swag" @@ -9,7 +13,7 @@ import ( func GenerateError(id int32, err error) *models.Error { return &models.Error{ - Code: swag.String(string(id)), + Code: swag.String(strconv.Itoa(int(id))), Href: swag.String(""), ID: swag.Int32(id), Kind: swag.String("Error"), @@ -26,3 +30,48 @@ func GenerateInternalFromError(err error) *models.Error { Reason: swag.String(err.Error()), } } + +type ApiErrorResponse struct { + statusCode int32 + err error +} + +func (a *ApiErrorResponse) Error() string { + return a.err.Error() +} + +func (a *ApiErrorResponse) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + rw.WriteHeader(int(a.statusCode)) + if err := producer.Produce(rw, GenerateError(a.statusCode, a.err)); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +func (a *ApiErrorResponse) StatusCode() int32 { + return a.statusCode +} + +func NewApiError(statusCode int32, err error) *ApiErrorResponse { + return &ApiErrorResponse{ + statusCode: statusCode, + err: err, + } +} + +func GenerateErrorResponder(err error) middleware.Responder { + switch errValue := err.(type) { + case *ApiErrorResponse: + return errValue + default: + return NewApiError(http.StatusInternalServerError, err) + } +} + +func GenerateErrorResponderWithDefault(err error, defaultCode int32) middleware.Responder { + switch errValue := err.(type) { + case *ApiErrorResponse: + return errValue + default: + return NewApiError(defaultCode, err) + } +} diff --git a/internal/common/host_utils.go b/internal/common/host_utils.go new file mode 100644 index 000000000..be38403c9 --- /dev/null +++ b/internal/common/host_utils.go @@ -0,0 +1,41 @@ +package common + +import ( + "encoding/json" + + "github.com/filanov/bm-inventory/models" +) + +func GetCurrentHostName(host *models.Host) (string, error) { + var inventory models.Inventory + if host.RequestedHostname != "" { + return host.RequestedHostname, nil + } + err := json.Unmarshal([]byte(host.Inventory), &inventory) + if err != nil { + return "", err + } + return inventory.Hostname, nil +} + +func GetHostnameForMsg(host *models.Host) string { + hostName, err := GetCurrentHostName(host) + // An error here probably indicates that the agent didn't send inventory yet, fall back to UUID + if err != nil || hostName == "" { + return host.ID.String() + } + return hostName +} + +func GetEventSeverityFromHostStatus(status string) string { + switch status { + case models.HostStatusDisconnected: + return models.EventSeverityWarning + case models.HostStatusInstallingPendingUserAction: + return models.EventSeverityWarning + case models.HostStatusError: + return models.EventSeverityError + default: + return models.EventSeverityInfo + } +} diff --git a/internal/connectivity/mock_connectivity_validator.go b/internal/connectivity/mock_connectivity_validator.go new file mode 100644 index 000000000..6be76ae23 --- /dev/null +++ b/internal/connectivity/mock_connectivity_validator.go @@ -0,0 +1,50 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: validator.go + +// Package connectivity is a generated GoMock package. +package connectivity + +import ( + reflect "reflect" + + models "github.com/filanov/bm-inventory/models" + gomock "github.com/golang/mock/gomock" +) + +// MockValidator is a mock of Validator interface +type MockValidator struct { + ctrl *gomock.Controller + recorder *MockValidatorMockRecorder +} + +// MockValidatorMockRecorder is the mock recorder for MockValidator +type MockValidatorMockRecorder struct { + mock *MockValidator +} + +// NewMockValidator creates a new mock instance +func NewMockValidator(ctrl *gomock.Controller) *MockValidator { + mock := &MockValidator{ctrl: ctrl} + mock.recorder = &MockValidatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockValidator) EXPECT() *MockValidatorMockRecorder { + return m.recorder +} + +// GetHostValidInterfaces mocks base method +func (m *MockValidator) GetHostValidInterfaces(host *models.Host) ([]*models.Interface, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHostValidInterfaces", host) + ret0, _ := ret[0].([]*models.Interface) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHostValidInterfaces indicates an expected call of GetHostValidInterfaces +func (mr *MockValidatorMockRecorder) GetHostValidInterfaces(host interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHostValidInterfaces", reflect.TypeOf((*MockValidator)(nil).GetHostValidInterfaces), host) +} diff --git a/internal/connectivity/validator.go b/internal/connectivity/validator.go new file mode 100644 index 000000000..b9fc61747 --- /dev/null +++ b/internal/connectivity/validator.go @@ -0,0 +1,35 @@ +package connectivity + +import ( + "encoding/json" + "fmt" + + "github.com/filanov/bm-inventory/models" + "github.com/sirupsen/logrus" +) + +//go:generate mockgen -source=validator.go -package=connectivity -destination=mock_connectivity_validator.go +type Validator interface { + GetHostValidInterfaces(host *models.Host) ([]*models.Interface, error) +} + +func NewValidator(log logrus.FieldLogger) Validator { + return &validator{ + log: log, + } +} + +type validator struct { + log logrus.FieldLogger +} + +func (v *validator) GetHostValidInterfaces(host *models.Host) ([]*models.Interface, error) { + var inventory models.Inventory + if err := json.Unmarshal([]byte(host.Inventory), &inventory); err != nil { + return nil, err + } + if len(inventory.Interfaces) == 0 { + return nil, fmt.Errorf("host %s doesn't have interfaces", host.ID) + } + return inventory.Interfaces, nil +} diff --git a/internal/connectivity/validator_test.go b/internal/connectivity/validator_test.go new file mode 100644 index 000000000..95138fa93 --- /dev/null +++ b/internal/connectivity/validator_test.go @@ -0,0 +1,60 @@ +package connectivity + +import ( + "encoding/json" + "testing" + + "github.com/sirupsen/logrus" + + "github.com/filanov/bm-inventory/models" + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestValidator(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Connectivity Validator tests Suite") +} + +var _ = Describe("get valid interfaces", func() { + var ( + connectivityValidator Validator + host *models.Host + inventory *models.Inventory + ) + BeforeEach(func() { + connectivityValidator = NewValidator(logrus.New()) + id := strfmt.UUID(uuid.New().String()) + clusterID := strfmt.UUID(uuid.New().String()) + host = &models.Host{ID: &id, ClusterID: clusterID} + inventory = &models.Inventory{ + Interfaces: []*models.Interface{ + { + IPV4Addresses: []string{ + "1.2.3.4/24", + }, + }, + }, + } + }) + + It("valid interfaces", func() { + hw, err := json.Marshal(&inventory) + Expect(err).NotTo(HaveOccurred()) + host.Inventory = string(hw) + interfaces, err := connectivityValidator.GetHostValidInterfaces(host) + Expect(err).NotTo(HaveOccurred()) + Expect(len(interfaces)).Should(Equal(1)) + }) + + It("invalid interfaces", func() { + + host.Inventory = "" + interfaces, err := connectivityValidator.GetHostValidInterfaces(host) + Expect(err).To(HaveOccurred()) + Expect(interfaces).To(BeNil()) + }) + +}) diff --git a/internal/domains/managed_domains.go b/internal/domains/managed_domains.go new file mode 100644 index 000000000..5220e59ab --- /dev/null +++ b/internal/domains/managed_domains.go @@ -0,0 +1,50 @@ +package domains + +import ( + "context" + "regexp" + + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/models" + "github.com/filanov/bm-inventory/restapi" + operations "github.com/filanov/bm-inventory/restapi/operations/managed_domains" + "github.com/go-openapi/runtime/middleware" + "github.com/pkg/errors" +) + +// NewHandler returns managed domains handler +func NewHandler(baseDNSDomains map[string]string) *Handler { + return &Handler{baseDNSDomains: baseDNSDomains} +} + +var _ restapi.ManagedDomainsAPI = (*Handler)(nil) + +// Handler represents managed domains handler +type Handler struct { + baseDNSDomains map[string]string +} + +func (h *Handler) parseDomainProvider(val string) (string, error) { + re := regexp.MustCompile("/") + if !re.MatchString(val) { + return "", errors.Errorf("Invalid format: %s", val) + } + s := re.Split(val, 2) + return s[1], nil +} + +func (h *Handler) ListManagedDomains(ctx context.Context, params operations.ListManagedDomainsParams) middleware.Responder { + managedDomains := models.ListManagedDomains{} + for k, v := range h.baseDNSDomains { + provider, err := h.parseDomainProvider(v) + if err != nil { + return operations.NewListManagedDomainsInternalServerError(). + WithPayload(common.GenerateInternalFromError(err)) + } + managedDomains = append(managedDomains, &models.ManagedDomain{ + Domain: k, + Provider: provider, + }) + } + return operations.NewListManagedDomainsOK().WithPayload(managedDomains) +} diff --git a/internal/domains/managed_domains_test.go b/internal/domains/managed_domains_test.go new file mode 100644 index 000000000..d9d7c413c --- /dev/null +++ b/internal/domains/managed_domains_test.go @@ -0,0 +1,52 @@ +package domains + +import ( + "context" + "testing" + + operations "github.com/filanov/bm-inventory/restapi/operations/managed_domains" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestHandler_ListManagedDomains(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "managed domains") +} + +var _ = Describe("list base domains", func() { + var ( + h *Handler + baseDNSDomains map[string]string + ) + It("valid", func() { + baseDNSDomains = map[string]string{ + "example.com": "abc/route53", + } + h = NewHandler(baseDNSDomains) + reply := h.ListManagedDomains(context.Background(), operations.ListManagedDomainsParams{}) + Expect(reply).Should(BeAssignableToTypeOf(operations.NewListManagedDomainsOK())) + val, _ := reply.(*operations.ListManagedDomainsOK) + domains := val.Payload + Expect(len(domains)).Should(Equal(1)) + Expect(domains[0].Domain).Should(Equal("example.com")) + Expect(domains[0].Provider).Should(Equal("route53")) + }) + It("empty", func() { + baseDNSDomains = map[string]string{} + h = NewHandler(baseDNSDomains) + reply := h.ListManagedDomains(context.Background(), operations.ListManagedDomainsParams{}) + Expect(reply).Should(BeAssignableToTypeOf(operations.NewListManagedDomainsOK())) + val, _ := reply.(*operations.ListManagedDomainsOK) + domains := val.Payload + Expect(len(domains)).Should(Equal(0)) + }) + It("invalid format", func() { + baseDNSDomains = map[string]string{ + "example.com": "abcroute53", + } + h = NewHandler(baseDNSDomains) + reply := h.ListManagedDomains(context.Background(), operations.ListManagedDomainsParams{}) + Expect(reply).Should(BeAssignableToTypeOf(operations.NewListManagedDomainsInternalServerError())) + }) +}) diff --git a/internal/events/event.go b/internal/events/event.go index b9b849613..e37323756 100644 --- a/internal/events/event.go +++ b/internal/events/event.go @@ -21,7 +21,7 @@ type Handler interface { // host added to cluster, we have the host-id as the main entityID and // the cluster-id as another ID that this event should be related to // otherEntities arguments provides for specifying mor IDs that are relevant for this event - AddEvent(ctx context.Context, entityID string, msg string, eventTime time.Time, otherEntities ...string) + AddEvent(ctx context.Context, entityID string, severity string, msg string, eventTime time.Time, otherEntities ...string) GetEvents(entityID string) ([]*Event, error) } @@ -44,7 +44,7 @@ func New(db *gorm.DB, log logrus.FieldLogger) *Events { } } -func addEventToDB(db *gorm.DB, id string, message string, t time.Time, requestID string) error { +func addEventToDB(log logrus.FieldLogger, db *gorm.DB, id string, severity string, message string, t time.Time, requestID string) error { tt := strfmt.DateTime(t) uid := strfmt.UUID(id) rid := strfmt.UUID(requestID) @@ -52,18 +52,19 @@ func addEventToDB(db *gorm.DB, id string, message string, t time.Time, requestID Event: models.Event{ EventTime: &tt, EntityID: &uid, + Severity: &severity, Message: &message, RequestID: rid, }, } if err := db.Create(&e).Error; err != nil { - logrus.WithError(err).Error("Error adding event") + log.WithError(err).Error("Error adding event") } return nil } -func (e *Events) AddEvent(ctx context.Context, entityID string, msg string, eventTime time.Time, otherEntities ...string) { +func (e *Events) AddEvent(ctx context.Context, entityID string, severity string, msg string, eventTime time.Time, otherEntities ...string) { log := logutil.FromContext(ctx, e.log) var isSuccess bool = false tx := e.db.Begin() @@ -77,7 +78,7 @@ func (e *Events) AddEvent(ctx context.Context, entityID string, msg string, even }() requestID := requestid.FromContext(ctx) - err := addEventToDB(tx, entityID, msg, eventTime, requestID) + err := addEventToDB(log, tx, entityID, severity, msg, eventTime, requestID) if err != nil { return } @@ -85,7 +86,7 @@ func (e *Events) AddEvent(ctx context.Context, entityID string, msg string, even // Since we don't keep different tables to support multiple IDs for a single event, // the workaround is to add to the DB a new event for every ID this event relates to for _, entity := range otherEntities { - err := addEventToDB(tx, entity, msg, eventTime, requestID) + err := addEventToDB(log, tx, entity, severity, msg, eventTime, requestID) if err != nil { return } diff --git a/internal/events/event_test.go b/internal/events/event_test.go index c54447800..c94703ea0 100644 --- a/internal/events/event_test.go +++ b/internal/events/event_test.go @@ -5,27 +5,22 @@ import ( "testing" "time" + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/pkg/requestid" "github.com/pborman/uuid" "github.com/filanov/bm-inventory/internal/events" + "github.com/filanov/bm-inventory/models" "github.com/go-openapi/swag" "github.com/jinzhu/gorm" - _ "github.com/jinzhu/gorm/dialects/sqlite" + _ "github.com/jinzhu/gorm/dialects/postgres" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/onsi/gomega/types" "github.com/sirupsen/logrus" ) -func prepareDB() *gorm.DB { - db, err := gorm.Open("sqlite3", ":memory:") - Expect(err).ShouldNot(HaveOccurred()) - //db = db.Debug() - db.AutoMigrate(&events.Event{}) - return db -} - /* Given events library Initially @@ -35,15 +30,12 @@ var _ = Describe("Events library", func() { var ( db *gorm.DB theEvents *events.Events + dbName = "events_test" ) BeforeEach(func() { - db = prepareDB() + db = common.PrepareTestDB(dbName, &events.Event{}) theEvents = events.New(db, logrus.WithField("pkg", "events")) }) - AfterEach(func() { - db.Close() - }) - numOfEvents := func(id string) int { evs, err := theEvents.GetEvents(id) Expect(err).Should(BeNil()) @@ -64,7 +56,7 @@ var _ = Describe("Events library", func() { Context("With events", func() { It("Adding a single event", func() { - theEvents.AddEvent(context.TODO(), "1", "the event1", time.Now()) + theEvents.AddEvent(context.TODO(), "1", models.EventSeverityInfo, "the event1", time.Now()) Expect(numOfEvents("1")).Should(Equal(1)) Expect(numOfEvents("2")).Should(Equal(0)) Expect(numOfEvents("3")).Should(Equal(0)) @@ -72,19 +64,20 @@ var _ = Describe("Events library", func() { evs, err := theEvents.GetEvents("1") Expect(err).Should(BeNil()) Expect(evs[0]).Should(WithMessage(swag.String("the event1"))) + Expect(evs[0]).Should(WithSeverity(swag.String(models.EventSeverityInfo))) - theEvents.AddEvent(context.TODO(), "2", "event2", time.Now()) + theEvents.AddEvent(context.TODO(), "2", models.EventSeverityInfo, "event2", time.Now()) Expect(numOfEvents("1")).Should(Equal(1)) Expect(numOfEvents("2")).Should(Equal(1)) Expect(numOfEvents("3")).Should(Equal(0)) }) It("Adding events for multiple ids ", func() { - theEvents.AddEvent(context.TODO(), "1", "event1", time.Now()) + theEvents.AddEvent(context.TODO(), "1", models.EventSeverityInfo, "event1", time.Now()) Expect(numOfEvents("1")).Should(Equal(1)) Expect(numOfEvents("2")).Should(Equal(0)) Expect(numOfEvents("3")).Should(Equal(0)) - theEvents.AddEvent(context.TODO(), "2", "event2", time.Now(), "1", "3") + theEvents.AddEvent(context.TODO(), "2", models.EventSeverityInfo, "event2", time.Now(), "1", "3") Expect(numOfEvents("1")).Should(Equal(2)) Expect(numOfEvents("2")).Should(Equal(1)) Expect(numOfEvents("3")).Should(Equal(1)) @@ -92,21 +85,23 @@ var _ = Describe("Events library", func() { It("Adding same event multiple times", func() { t1 := time.Now() - theEvents.AddEvent(context.TODO(), "1", "event1", t1) + theEvents.AddEvent(context.TODO(), "1", models.EventSeverityInfo, "event1", t1) Expect(numOfEvents("1")).Should(Equal(1)) evs, err := theEvents.GetEvents("1") Expect(err).Should(BeNil()) Expect(evs[0]).Should(WithMessage(swag.String("event1"))) Expect(evs[0]).Should(WithTime(t1)) + Expect(evs[0]).Should(WithSeverity(swag.String(models.EventSeverityInfo))) t2 := time.Now() - theEvents.AddEvent(context.TODO(), "1", "event1", t2) + theEvents.AddEvent(context.TODO(), "1", models.EventSeverityInfo, "event1", t2) Expect(numOfEvents("1")).Should(Equal(2)) evs, err = theEvents.GetEvents("1") Expect(err).Should(BeNil()) Expect(evs[0]).Should(WithMessage(swag.String("event1"))) Expect(evs[0]).Should(WithTime(t2)) + Expect(evs[0]).Should(WithSeverity(swag.String(models.EventSeverityInfo))) Expect(numOfEvents("2")).Should(Equal(0)) Expect(numOfEvents("3")).Should(Equal(0)) @@ -118,21 +113,27 @@ var _ = Describe("Events library", func() { ctx := context.Background() rid1 := uuid.NewRandom().String() ctx = requestid.ToContext(ctx, rid1) - theEvents.AddEvent(ctx, "1", "event1", time.Now(), "2") + theEvents.AddEvent(ctx, "1", models.EventSeverityInfo, "event1", time.Now(), "2") Expect(numOfEvents("1")).Should(Equal(1)) evs, err := theEvents.GetEvents("1") Expect(err).Should(BeNil()) Expect(evs[0]).Should(WithMessage(swag.String("event1"))) Expect(evs[0]).Should(WithRequestID(rid1)) + Expect(evs[0]).Should(WithSeverity(swag.String(models.EventSeverityInfo))) evs, err = theEvents.GetEvents("2") Expect(err).Should(BeNil()) Expect(evs[0]).Should(WithMessage(swag.String("event1"))) Expect(evs[0]).Should(WithRequestID(rid1)) - + Expect(evs[0]).Should(WithSeverity(swag.String(models.EventSeverityInfo))) }) }) + + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) + }) func WithRequestID(requestID string) types.GomegaMatcher { @@ -147,6 +148,12 @@ func WithMessage(msg *string) types.GomegaMatcher { }, Equal(msg)) } +func WithSeverity(severity *string) types.GomegaMatcher { + return WithTransform(func(e *events.Event) *string { + return e.Severity + }, Equal(severity)) +} + func WithTime(t time.Time) types.GomegaMatcher { return WithTransform(func(e *events.Event) time.Time { return time.Time(*e.EventTime) @@ -155,5 +162,7 @@ func WithTime(t time.Time) types.GomegaMatcher { func TestEvents(t *testing.T) { RegisterFailHandler(Fail) + common.InitializeDBTest() + defer common.TerminateDBTest() RunSpecs(t, "Events test Suite") } diff --git a/internal/events/events_api.go b/internal/events/events_api.go index 8eea84ae4..7de64d6cb 100644 --- a/internal/events/events_api.go +++ b/internal/events/events_api.go @@ -39,6 +39,7 @@ func (a *Api) ListEvents(ctx context.Context, params events.ListEventsParams) mi for i, ev := range evs { ret[i] = &models.Event{ EntityID: ev.EntityID, + Severity: ev.Severity, EventTime: ev.EventTime, Message: ev.Message, } diff --git a/internal/events/mock_event.go b/internal/events/mock_event.go index 09f71083e..59c68907e 100644 --- a/internal/events/mock_event.go +++ b/internal/events/mock_event.go @@ -12,47 +12,47 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockHandler is a mock of Handler interface. +// MockHandler is a mock of Handler interface type MockHandler struct { ctrl *gomock.Controller recorder *MockHandlerMockRecorder } -// MockHandlerMockRecorder is the mock recorder for MockHandler. +// MockHandlerMockRecorder is the mock recorder for MockHandler type MockHandlerMockRecorder struct { mock *MockHandler } -// NewMockHandler creates a new mock instance. +// NewMockHandler creates a new mock instance func NewMockHandler(ctrl *gomock.Controller) *MockHandler { mock := &MockHandler{ctrl: ctrl} mock.recorder = &MockHandlerMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use. +// EXPECT returns an object that allows the caller to indicate expected use func (m *MockHandler) EXPECT() *MockHandlerMockRecorder { return m.recorder } -// AddEvent mocks base method. -func (m *MockHandler) AddEvent(ctx context.Context, entityID, msg string, eventTime time.Time, otherEntities ...string) { +// AddEvent mocks base method +func (m *MockHandler) AddEvent(ctx context.Context, entityID, severity, msg string, eventTime time.Time, otherEntities ...string) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, entityID, msg, eventTime} + varargs := []interface{}{ctx, entityID, severity, msg, eventTime} for _, a := range otherEntities { varargs = append(varargs, a) } m.ctrl.Call(m, "AddEvent", varargs...) } -// AddEvent indicates an expected call of AddEvent. -func (mr *MockHandlerMockRecorder) AddEvent(ctx, entityID, msg, eventTime interface{}, otherEntities ...interface{}) *gomock.Call { +// AddEvent indicates an expected call of AddEvent +func (mr *MockHandlerMockRecorder) AddEvent(ctx, entityID, severity, msg, eventTime interface{}, otherEntities ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, entityID, msg, eventTime}, otherEntities...) + varargs := append([]interface{}{ctx, entityID, severity, msg, eventTime}, otherEntities...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddEvent", reflect.TypeOf((*MockHandler)(nil).AddEvent), varargs...) } -// GetEvents mocks base method. +// GetEvents mocks base method func (m *MockHandler) GetEvents(entityID string) ([]*Event, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetEvents", entityID) @@ -61,7 +61,7 @@ func (m *MockHandler) GetEvents(entityID string) ([]*Event, error) { return ret0, ret1 } -// GetEvents indicates an expected call of GetEvents. +// GetEvents indicates an expected call of GetEvents func (mr *MockHandlerMockRecorder) GetEvents(entityID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEvents", reflect.TypeOf((*MockHandler)(nil).GetEvents), entityID) diff --git a/internal/hardware/mock_validator.go b/internal/hardware/mock_validator.go index 7c174066d..5a3028b09 100644 --- a/internal/hardware/mock_validator.go +++ b/internal/hardware/mock_validator.go @@ -11,45 +11,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockValidator is a mock of Validator interface. +// MockValidator is a mock of Validator interface type MockValidator struct { ctrl *gomock.Controller recorder *MockValidatorMockRecorder } -// MockValidatorMockRecorder is the mock recorder for MockValidator. +// MockValidatorMockRecorder is the mock recorder for MockValidator type MockValidatorMockRecorder struct { mock *MockValidator } -// NewMockValidator creates a new mock instance. +// NewMockValidator creates a new mock instance func NewMockValidator(ctrl *gomock.Controller) *MockValidator { mock := &MockValidator{ctrl: ctrl} mock.recorder = &MockValidatorMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use. +// EXPECT returns an object that allows the caller to indicate expected use func (m *MockValidator) EXPECT() *MockValidatorMockRecorder { return m.recorder } -// IsSufficient mocks base method. -func (m *MockValidator) IsSufficient(host *models.Host) (*IsSufficientReply, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsSufficient", host) - ret0, _ := ret[0].(*IsSufficientReply) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// IsSufficient indicates an expected call of IsSufficient. -func (mr *MockValidatorMockRecorder) IsSufficient(host interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSufficient", reflect.TypeOf((*MockValidator)(nil).IsSufficient), host) -} - -// GetHostValidDisks mocks base method. +// GetHostValidDisks mocks base method func (m *MockValidator) GetHostValidDisks(host *models.Host) ([]*models.Disk, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetHostValidDisks", host) @@ -58,23 +43,8 @@ func (m *MockValidator) GetHostValidDisks(host *models.Host) ([]*models.Disk, er return ret0, ret1 } -// GetHostValidDisks indicates an expected call of GetHostValidDisks. +// GetHostValidDisks indicates an expected call of GetHostValidDisks func (mr *MockValidatorMockRecorder) GetHostValidDisks(host interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHostValidDisks", reflect.TypeOf((*MockValidator)(nil).GetHostValidDisks), host) } - -// GetHostValidInterfaces mocks base method. -func (m *MockValidator) GetHostValidInterfaces(host *models.Host) ([]*models.Interface, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHostValidInterfaces", host) - ret0, _ := ret[0].([]*models.Interface) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetHostValidInterfaces indicates an expected call of GetHostValidInterfaces. -func (mr *MockValidatorMockRecorder) GetHostValidInterfaces(host interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHostValidInterfaces", reflect.TypeOf((*MockValidator)(nil).GetHostValidInterfaces), host) -} diff --git a/internal/hardware/validator.go b/internal/hardware/validator.go index 8d959e6a3..442d44f3c 100644 --- a/internal/hardware/validator.go +++ b/internal/hardware/validator.go @@ -3,29 +3,27 @@ package hardware import ( "encoding/json" "fmt" - "regexp" "sort" + "strings" + + "github.com/thoas/go-funk" + + "github.com/sirupsen/logrus" "github.com/alecthomas/units" "github.com/filanov/bm-inventory/models" ) -type IsSufficientReply struct { - IsSufficient bool - Reason string -} - -const diskNameFilterRegex = "nvme" - //go:generate mockgen -source=validator.go -package=hardware -destination=mock_validator.go type Validator interface { - IsSufficient(host *models.Host) (*IsSufficientReply, error) GetHostValidDisks(host *models.Host) ([]*models.Disk, error) - GetHostValidInterfaces(host *models.Host) ([]*models.Interface, error) } -func NewValidator(cfg ValidatorCfg) Validator { - return &validator{ValidatorCfg: cfg} +func NewValidator(log logrus.FieldLogger, cfg ValidatorCfg) Validator { + return &validator{ + ValidatorCfg: cfg, + log: log, + } } type ValidatorCfg struct { @@ -35,63 +33,12 @@ type ValidatorCfg struct { MinRamGib int64 `envconfig:"HW_VALIDATOR_MIN_RAM_GIB" default:"8"` MinRamGibWorker int64 `envconfig:"HW_VALIDATOR_MIN_RAM_GIB_WORKER" default:"8"` MinRamGibMaster int64 `envconfig:"HW_VALIDATOR_MIN_RAM_GIB_MASTER" default:"16"` - MinDiskSizeGib int64 `envconfig:"HW_VALIDATOR_MIN_DISK_SIZE_GIB" default:"120"` + MinDiskSizeGb int64 `envconfig:"HW_VALIDATOR_MIN_DISK_SIZE_GIB" default:"120"` // Env variable is GIB to not break infra } type validator struct { ValidatorCfg -} - -func (v *validator) IsSufficient(host *models.Host) (*IsSufficientReply, error) { - var err error - var reason string - var isSufficient bool - var hwInfo models.Inventory - - if err = json.Unmarshal([]byte(host.Inventory), &hwInfo); err != nil { - return nil, err - } - - var minCpuCoresRequired int64 = v.MinCPUCores - var minRamRequired int64 = gibToBytes(v.MinRamGib) - var minDiskSizeRequired int64 = gibToBytes(v.MinDiskSizeGib) - - switch host.Role { - case "master": - minCpuCoresRequired = v.MinCPUCoresMaster - minRamRequired = gibToBytes(v.MinRamGibMaster) - case "worker": - minCpuCoresRequired = v.MinCPUCoresWorker - minRamRequired = gibToBytes(v.MinRamGibWorker) - } - - if hwInfo.CPU.Count < minCpuCoresRequired { - reason += fmt.Sprintf(", insufficient CPU cores, expected: <%d> got <%d>", minCpuCoresRequired, hwInfo.CPU.Count) - } - - if hwInfo.Memory.PhysicalBytes < minRamRequired { - reason += fmt.Sprintf(", insufficient RAM requirements, expected: <%s> got <%s>", - units.Base2Bytes(minRamRequired), units.Base2Bytes(hwInfo.Memory.PhysicalBytes)) - } - - if disks := listValidDisks(hwInfo, minDiskSizeRequired); len(disks) < 1 { - reason += fmt.Sprintf(", insufficient number of disks with required size, "+ - "expected at least 1 not removable, not readonly disk of size more than <%d>", minDiskSizeRequired) - } - - if len(reason) == 0 { - isSufficient = true - } else { - reason = fmt.Sprintf("host has insufficient hardware%s", reason) - if host.Role != "" { - reason = fmt.Sprintf("%s %s", host.Role, reason) - } - } - - return &IsSufficientReply{ - IsSufficient: isSufficient, - Reason: reason, - }, nil + log logrus.FieldLogger } func (v *validator) GetHostValidDisks(host *models.Host) ([]*models.Disk, error) { @@ -99,39 +46,44 @@ func (v *validator) GetHostValidDisks(host *models.Host) ([]*models.Disk, error) if err := json.Unmarshal([]byte(host.Inventory), &inventory); err != nil { return nil, err } - disks := listValidDisks(inventory, gibToBytes(v.MinDiskSizeGib)) + disks := ListValidDisks(&inventory, gbToBytes(v.MinDiskSizeGb)) if len(disks) == 0 { return nil, fmt.Errorf("host %s doesn't have valid disks", host.ID) } return disks, nil } -func (v *validator) GetHostValidInterfaces(host *models.Host) ([]*models.Interface, error) { - var inventory models.Inventory - if err := json.Unmarshal([]byte(host.Inventory), &inventory); err != nil { - return nil, err - } - if len(inventory.Interfaces) == 0 { - return nil, fmt.Errorf("host %s doesn't have interfaces", host.ID) - } - return inventory.Interfaces, nil +func gbToBytes(gb int64) int64 { + return gb * int64(units.GB) } -func gibToBytes(gib int64) int64 { - return gib * int64(units.GiB) +func isNvme(name string) bool { + return strings.HasPrefix(name, "nvme") } -func listValidDisks(inventory models.Inventory, minSizeRequiredInBytes int64) []*models.Disk { +func ListValidDisks(inventory *models.Inventory, minSizeRequiredInBytes int64) []*models.Disk { var disks []*models.Disk - filter, _ := regexp.Compile(diskNameFilterRegex) for _, disk := range inventory.Disks { - if disk.SizeBytes >= minSizeRequiredInBytes && disk.DriveType == "HDD" && !filter.MatchString(disk.Name) { + if disk.SizeBytes >= minSizeRequiredInBytes && funk.ContainsString([]string{"HDD", "SSD"}, disk.DriveType) { disks = append(disks, disk) } } + // Sorting list by size increase sort.Slice(disks, func(i, j int) bool { - return disks[i].SizeBytes < disks[j].SizeBytes + isNvme1 := isNvme(disks[i].Name) + isNvme2 := isNvme(disks[j].Name) + if isNvme1 != isNvme2 { + return isNvme2 + } + + // HDD is before SSD + switch v := strings.Compare(disks[i].DriveType, disks[j].DriveType); v { + case 0: + return disks[i].SizeBytes < disks[j].SizeBytes + default: + return v < 0 + } }) return disks } diff --git a/internal/hardware/validator_test.go b/internal/hardware/validator_test.go index 6a98c3622..7c4d10eb3 100644 --- a/internal/hardware/validator_test.go +++ b/internal/hardware/validator_test.go @@ -4,6 +4,10 @@ import ( "encoding/json" "testing" + "github.com/filanov/bm-inventory/internal/common" + + "github.com/sirupsen/logrus" + "github.com/alecthomas/units" "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" @@ -15,88 +19,52 @@ import ( func TestValidator(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Subsystem Suite") + RunSpecs(t, "Hardware Validator tests Suite") } var _ = Describe("hardware_validator", func() { var ( - hwvalidator Validator - host *models.Host - inventory *models.Inventory - validDiskSize = int64(128849018880) - notValidDiskSize = int64(108849018880) + hwvalidator Validator + host1 *models.Host + host2 *models.Host + host3 *models.Host + inventory *models.Inventory + cluster *common.Cluster + validDiskSize = int64(128849018880) + status = models.HostStatusKnown ) BeforeEach(func() { var cfg ValidatorCfg Expect(envconfig.Process("myapp", &cfg)).ShouldNot(HaveOccurred()) - hwvalidator = NewValidator(cfg) - id := strfmt.UUID(uuid.New().String()) - host = &models.Host{ID: &id, ClusterID: strfmt.UUID(uuid.New().String())} + hwvalidator = NewValidator(logrus.New(), cfg) + id1 := strfmt.UUID(uuid.New().String()) + id2 := strfmt.UUID(uuid.New().String()) + id3 := strfmt.UUID(uuid.New().String()) + clusterID := strfmt.UUID(uuid.New().String()) + host1 = &models.Host{ID: &id1, ClusterID: clusterID, Status: &status, RequestedHostname: "reqhostname1"} + host2 = &models.Host{ID: &id2, ClusterID: clusterID, Status: &status, RequestedHostname: "reqhostname2"} + host3 = &models.Host{ID: &id3, ClusterID: clusterID, Status: &status, RequestedHostname: "reqhostname3"} inventory = &models.Inventory{ CPU: &models.CPU{Count: 16}, Memory: &models.Memory{PhysicalBytes: int64(32 * units.GiB)}, + Interfaces: []*models.Interface{ + { + IPV4Addresses: []string{ + "1.2.3.4/24", + }, + }, + }, Disks: []*models.Disk{ {DriveType: "ODD", Name: "loop0", SizeBytes: validDiskSize}, {DriveType: "HDD", Name: "sdb", SizeBytes: validDiskSize}}, } - }) - - It("sufficient_hw", func() { - hw, err := json.Marshal(&inventory) - Expect(err).NotTo(HaveOccurred()) - host.Inventory = string(hw) - - roles := []string{"", "master", "worker"} - for _, role := range roles { - host.Role = role - sufficient(hwvalidator.IsSufficient(host)) - } - }) - - It("insufficient_minimal_hw_requirements", func() { - inventory.CPU = &models.CPU{Count: 1} - inventory.Memory = &models.Memory{PhysicalBytes: int64(3 * units.GiB)} - hw, err := json.Marshal(&inventory) - Expect(err).NotTo(HaveOccurred()) - host.Inventory = string(hw) - - roles := []string{"", "master", "worker"} - for _, role := range roles { - host.Role = role - insufficient(hwvalidator.IsSufficient(host)) - } - }) - - It("insufficient_master_but_valid_worker", func() { - inventory.CPU = &models.CPU{Count: 8} - inventory.Memory = &models.Memory{PhysicalBytes: int64(8 * units.GiB)} - hw, err := json.Marshal(&inventory) - Expect(err).NotTo(HaveOccurred()) - host.Inventory = string(hw) - host.Role = "master" - insufficient(hwvalidator.IsSufficient(host)) - host.Role = "worker" - sufficient(hwvalidator.IsSufficient(host)) - }) - - It("insufficient_number_of_valid_disks", func() { - inventory.Disks = []*models.Disk{ - // Not enough size - {DriveType: "HDD", Name: "sdb", SizeBytes: notValidDiskSize}, - // Removable - {DriveType: "FDD", Name: "sda", SizeBytes: validDiskSize}, - // Filtered Name - {DriveType: "HDD", Name: "nvme01fs", SizeBytes: validDiskSize}, - } - hw, err := json.Marshal(&inventory) - Expect(err).NotTo(HaveOccurred()) - - host.Inventory = string(hw) - insufficient(hwvalidator.IsSufficient(host)) - - disks, err := hwvalidator.GetHostValidDisks(host) - Expect(err).To(HaveOccurred()) - Expect(disks).To(BeNil()) + cluster = &common.Cluster{Cluster: models.Cluster{ + ID: &clusterID, + MachineNetworkCidr: "1.2.3.0/24", + }} + cluster.Hosts = append(cluster.Hosts, host1) + cluster.Hosts = append(cluster.Hosts, host2) + cluster.Hosts = append(cluster.Hosts, host3) }) It("validate_disk_list_return_order", func() { @@ -104,49 +72,26 @@ var _ = Describe("hardware_validator", func() { inventory.Disks = []*models.Disk{ // Not disk type {DriveType: "ODD", Name: "aaa", SizeBytes: validDiskSize}, - {DriveType: "HDD", Name: "sdb", SizeBytes: validDiskSize + 1}, + {DriveType: "SSD", Name: nvmename, SizeBytes: validDiskSize + 1}, + {DriveType: "SSD", Name: "stam", SizeBytes: validDiskSize}, + {DriveType: "HDD", Name: "sdb", SizeBytes: validDiskSize + 2}, {DriveType: "HDD", Name: "sda", SizeBytes: validDiskSize + 100}, - {DriveType: "HDD", Name: "sdh", SizeBytes: validDiskSize}, - {DriveType: "SDD", Name: nvmename, SizeBytes: validDiskSize}, + {DriveType: "HDD", Name: "sdh", SizeBytes: validDiskSize + 1}, } hw, err := json.Marshal(&inventory) Expect(err).NotTo(HaveOccurred()) - host.Inventory = string(hw) - disks, err := hwvalidator.GetHostValidDisks(host) + host1.Inventory = string(hw) + disks, err := hwvalidator.GetHostValidDisks(host1) Expect(err).NotTo(HaveOccurred()) Expect(disks[0].Name).Should(Equal("sdh")) - Expect(len(disks)).Should(Equal(3)) - Expect(isBlockDeviceNameInlist(disks, nvmename)).Should(Equal(false)) + Expect(len(disks)).Should(Equal(5)) + Expect(isBlockDeviceNameInlist(disks, nvmename)).Should(BeTrue()) + Expect(disks[3].DriveType).To(Equal("SSD")) + Expect(disks[4].DriveType).To(Equal("SSD")) + Expect(disks[4].Name).To(HavePrefix("nvme")) }) - - It("invalid_hw_info", func() { - host.Inventory = "not a valid json" - roles := []string{"", "master", "worker"} - for _, role := range roles { - host.Role = role - reply, err := hwvalidator.IsSufficient(host) - Expect(err).To(HaveOccurred()) - Expect(reply).To(BeNil()) - } - disks, err := hwvalidator.GetHostValidDisks(host) - Expect(err).To(HaveOccurred()) - Expect(disks).To(BeNil()) - }) - }) -func sufficient(reply *IsSufficientReply, err error) { - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - ExpectWithOffset(1, reply.IsSufficient).To(BeTrue()) - ExpectWithOffset(1, reply.Reason).Should(Equal("")) -} - -func insufficient(reply *IsSufficientReply, err error) { - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - ExpectWithOffset(1, reply.IsSufficient).To(BeFalse()) - ExpectWithOffset(1, reply.Reason).ShouldNot(Equal("")) -} - func isBlockDeviceNameInlist(disks []*models.Disk, name string) bool { for _, disk := range disks { // Valid disk: type=disk, not removable, not readonly and size bigger than minimum required diff --git a/internal/host/common.go b/internal/host/common.go index 5b49cb1a1..b3df0b467 100644 --- a/internal/host/common.go +++ b/internal/host/common.go @@ -1,21 +1,33 @@ package host import ( + "context" + "fmt" "time" - "github.com/filanov/bm-inventory/internal/hardware" - "github.com/filanov/bm-inventory/models" + "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" + + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/internal/events" + "github.com/filanov/bm-inventory/models" "github.com/jinzhu/gorm" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( - statusInfoDisconnected = "Host keepalive timeout" - statusInfoDisabled = "Host is disabled" - statusInfoDiscovering = "Waiting for host hardware info" - statusInfoInstalling = "Installation in progress" + statusInfoDisconnected = "Host keepalive timeout" + statusInfoDisabled = "Host is disabled" + statusInfoDiscovering = "Waiting for host hardware info" + statusInfoInsufficientHardware = "Host does not pass minimum hardware requirements" + statusInfoPendingForInput = "User input required" + statusInfoNotReadyForInstall = "Host not ready for install" + statusInfoInstalling = "Installation in progress" + statusInfoResettingPendingUserAction = "Reboot the host into the installation image to complete resetting the installation" + statusInfoPreparingForInstallation = "Preparing host for installation" + statusInfoPreparingTimedOut = "Cluster is no longer preparing for installation" + statusInfoAbortingDueClusterErrors = "Installation has been aborted due cluster errors" ) type UpdateReply struct { @@ -23,94 +35,90 @@ type UpdateReply struct { IsChanged bool } -type baseState struct { - log logrus.FieldLogger - db *gorm.DB -} +func updateHostProgress(ctx context.Context, log logrus.FieldLogger, db *gorm.DB, eventsHandler events.Handler, clusterId strfmt.UUID, hostId strfmt.UUID, + srcStatus string, newStatus string, statusInfo string, + srcStage models.HostStage, newStage models.HostStage, progressInfo string, extra ...interface{}) (*models.Host, error) { -func updateState(log logrus.FieldLogger, state, stateInfo string, h *models.Host, db *gorm.DB) (*UpdateReply, error) { - return updateStateWithParams(log, state, stateInfo, h, db) -} + extra = append(append(make([]interface{}, 0), "progress_current_stage", newStage, "progress_progress_info", progressInfo, + "progress_stage_updated_at", strfmt.DateTime(time.Now())), extra...) -func updateByKeepAlive(log logrus.FieldLogger, h *models.Host, db *gorm.DB) (*UpdateReply, error) { - if time.Since(time.Time(h.UpdatedAt)) > 3*time.Minute { - return updateState(log, HostStatusDisconnected, statusInfoDisconnected, h, db) + if newStage != srcStage { + extra = append(extra, "progress_stage_started_at", strfmt.DateTime(time.Now())) } - return &UpdateReply{ - State: swag.StringValue(h.Status), - IsChanged: false, - }, nil + + return updateHostStatus(ctx, log, db, eventsHandler, clusterId, hostId, srcStatus, newStatus, statusInfo, extra...) } -func updateStateWithParams(log logrus.FieldLogger, status, statusInfo string, h *models.Host, db *gorm.DB, extra ...interface{}) (*UpdateReply, error) { - updates := map[string]interface{}{"status": status, "status_info": statusInfo} - if len(extra)%2 != 0 { - return nil, errors.Errorf("invalid update extra parameters %+v", extra) - } - for i := 0; i < len(extra); i += 2 { - updates[extra[i].(string)] = extra[i+1] +func updateHostStatus(ctx context.Context, log logrus.FieldLogger, db *gorm.DB, eventsHandler events.Handler, clusterId strfmt.UUID, hostId strfmt.UUID, + srcStatus string, newStatus string, statusInfo string, extra ...interface{}) (*models.Host, error) { + var host *models.Host + var err error + + extra = append(append(make([]interface{}, 0), "status", newStatus, "status_info", statusInfo), extra...) + + if newStatus != srcStatus { + extra = append(extra, "status_updated_at", strfmt.DateTime(time.Now())) } - dbReply := db.Model(&models.Host{}).Where("id = ? and cluster_id = ? and status = ?", - h.ID.String(), h.ClusterID.String(), swag.StringValue(h.Status)). - Updates(updates) - if dbReply.Error != nil { - return nil, errors.Wrapf(dbReply.Error, "failed to update host %s from cluster %s state from %s to %s", - h.ID.String(), h.ClusterID, swag.StringValue(h.Status), status) + + if host, err = UpdateHost(log, db, clusterId, hostId, srcStatus, extra...); err != nil || + swag.StringValue(host.Status) != newStatus { + return nil, errors.Wrapf(err, "failed to update host %s from cluster %s state from %s to %s", + hostId, clusterId, srcStatus, newStatus) } - if dbReply.RowsAffected == 0 { - return nil, errors.Errorf("failed to update host %s from cluster %s state from %s to %s, nothing have changed", - h.ID.String(), h.ClusterID, swag.StringValue(h.Status), status) + + if newStatus != srcStatus { + eventsHandler.AddEvent(ctx, hostId.String(), common.GetEventSeverityFromHostStatus(newStatus), + fmt.Sprintf("Host %s: updated status from \"%s\" to \"%s\" (%s)", common.GetHostnameForMsg(host), srcStatus, newStatus, statusInfo), + time.Now(), clusterId.String()) + log.Infof("host %s from cluster %s has been updated with the following updates %+v", hostId, clusterId, extra) } - log.Infof("Updated host <%s> status from <%s> to <%s> with fields: %s", - h.ID.String(), swag.StringValue(h.Status), status, updates) - return &UpdateReply{ - State: status, - IsChanged: status != swag.StringValue(h.Status), - }, nil + + return host, nil } -func updateHostStateWithParams(log logrus.FieldLogger, srcStatus, statusInfo string, h *models.Host, db *gorm.DB, - extra ...interface{}) error { +func hostExistsInDB(db *gorm.DB, hostId, clusterId strfmt.UUID, where map[string]interface{}) bool { + where["id"] = hostId.String() + where["cluster_id"] = clusterId.String() + var host models.Host + return db.Select("id").Take(&host, where).Error == nil +} + +func UpdateHost(log logrus.FieldLogger, db *gorm.DB, clusterId strfmt.UUID, hostId strfmt.UUID, + srcStatus string, extra ...interface{}) (*models.Host, error) { + updates := make(map[string]interface{}) - updates := map[string]interface{}{"status": swag.StringValue(h.Status), "status_info": statusInfo} if len(extra)%2 != 0 { - return errors.Errorf("invalid update extra parameters %+v", extra) + return nil, errors.Errorf("invalid update extra parameters %+v", extra) } for i := 0; i < len(extra); i += 2 { updates[extra[i].(string)] = extra[i+1] } + + // Query by + // Status is required as well to avoid races between different components. dbReply := db.Model(&models.Host{}).Where("id = ? and cluster_id = ? and status = ?", - h.ID.String(), h.ClusterID.String(), srcStatus). + hostId, clusterId, srcStatus). Updates(updates) - if dbReply.Error != nil { - return errors.Wrapf(dbReply.Error, "failed to update host %s from cluster %s state from %s to %s", - h.ID.String(), h.ClusterID, srcStatus, swag.StringValue(h.Status)) - } - if dbReply.RowsAffected == 0 && swag.StringValue(h.Status) != srcStatus { - return errors.Errorf("failed to update host %s from cluster %s state from %s to %s, nothing have changed", - h.ID.String(), h.ClusterID, srcStatus, swag.StringValue(h.Status)) + + if dbReply.Error != nil || (dbReply.RowsAffected == 0 && !hostExistsInDB(db, hostId, clusterId, updates)) { + return nil, errors.Errorf("failed to update host %s from cluster %s. nothing have changed", hostId, clusterId) } - log.Infof("Updated host <%s> status from <%s> to <%s> with fields: %s", - h.ID.String(), srcStatus, swag.StringValue(h.Status), updates) - return nil -} -func updateHwInfo(log logrus.FieldLogger, hwValidator hardware.Validator, h *models.Host, db *gorm.DB) (*UpdateReply, error) { - status := "" - if h.Status != nil { - status = *h.Status + var host models.Host + + if err := db.First(&host, "id = ? and cluster_id = ?", hostId, clusterId).Error; err != nil { + return nil, errors.Wrapf(err, "failed to read from host %s from cluster %s from the database after the update", + hostId, clusterId) } - return updateStateWithParams(log, status, "", h, db, "hardware_info", h.HardwareInfo) + + return &host, nil } -func updateInventory(log logrus.FieldLogger, hwValidator hardware.Validator, h *models.Host, db *gorm.DB) (*UpdateReply, error) { - reply, err := hwValidator.IsSufficient(h) - if err != nil { - return nil, err - } - if !reply.IsSufficient { - return updateStateWithParams(log, HostStatusInsufficient, reply.Reason, h, db, - "inventory", h.Inventory) +func indexOfStage(element models.HostStage, data []models.HostStage) int { + for k, v := range data { + if element == v { + return k + } } - return updateStateWithParams(log, HostStatusKnown, "", h, db, "inventory", h.Inventory) + return -1 // not found. } diff --git a/internal/host/common_test.go b/internal/host/common_test.go new file mode 100644 index 000000000..cac99f125 --- /dev/null +++ b/internal/host/common_test.go @@ -0,0 +1,164 @@ +package host + +import ( + "context" + "fmt" + + "github.com/golang/mock/gomock" + + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/internal/events" + "github.com/filanov/bm-inventory/models" + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/jinzhu/gorm" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var defaultStatus = "status" +var defaultStatusInfo = "statusInfo" +var newStatus = "newStatus" +var newStatusInfo = "newStatusInfo" + +var _ = Describe("update_host_state", func() { + var ( + ctx = context.Background() + ctrl *gomock.Controller + db *gorm.DB + mockEvents *events.MockHandler + host models.Host + lastUpdatedTime strfmt.DateTime + returnedHost *models.Host + err error + dbName string = "host_common_test" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName) + ctrl = gomock.NewController(GinkgoT()) + mockEvents = events.NewMockHandler(ctrl) + id := strfmt.UUID(uuid.New().String()) + clusterId := strfmt.UUID(uuid.New().String()) + host = getTestHost(id, clusterId, defaultStatus) + host.StatusInfo = &defaultStatusInfo + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + lastUpdatedTime = host.StatusUpdatedAt + }) + + Describe("updateHostStatus", func() { + It("change_status", func() { + mockEvents.EXPECT().AddEvent(gomock.Any(), host.ID.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"status\" to \"newStatus\" (newStatusInfo)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + returnedHost, err = updateHostStatus(ctx, getTestLog(), db, mockEvents, host.ClusterID, *host.ID, defaultStatus, + newStatus, newStatusInfo) + Expect(err).ShouldNot(HaveOccurred()) + Expect(*returnedHost.Status).Should(Equal(newStatus)) + Expect(*returnedHost.StatusInfo).Should(Equal(newStatusInfo)) + Expect(returnedHost.StatusUpdatedAt.String()).ShouldNot(Equal(lastUpdatedTime.String())) + }) + + Describe("negative", func() { + It("invalid_extras_amount", func() { + returnedHost, err = updateHostStatus(ctx, getTestLog(), db, mockEvents, host.ClusterID, *host.ID, *host.Status, + newStatus, newStatusInfo, "1") + Expect(err).Should(HaveOccurred()) + Expect(returnedHost).Should(BeNil()) + returnedHost, err = updateHostStatus(ctx, getTestLog(), db, mockEvents, host.ClusterID, *host.ID, *host.Status, + newStatus, newStatusInfo, "1", "2", "3") + }) + + It("no_matching_rows", func() { + returnedHost, err = updateHostStatus(ctx, getTestLog(), db, mockEvents, host.ClusterID, *host.ID, "otherStatus", + newStatus, newStatusInfo) + }) + + AfterEach(func() { + Expect(err).Should(HaveOccurred()) + Expect(returnedHost).Should(BeNil()) + + hostFromDb := getHost(*host.ID, host.ClusterID, db) + Expect(*hostFromDb.Status).ShouldNot(Equal(newStatus)) + Expect(*hostFromDb.StatusInfo).ShouldNot(Equal(newStatusInfo)) + Expect(hostFromDb.StatusUpdatedAt.String()).Should(Equal(lastUpdatedTime.String())) + }) + }) + + It("db_failure", func() { + db.Close() + _, err = updateHostStatus(ctx, getTestLog(), db, mockEvents, host.ClusterID, *host.ID, *host.Status, + newStatus, newStatusInfo) + Expect(err).Should(HaveOccurred()) + }) + }) + + Describe("updateHostProgress", func() { + Describe("same_status", func() { + It("new_stage", func() { + returnedHost, err = updateHostProgress(ctx, getTestLog(), db, mockEvents, host.ClusterID, *host.ID, *host.Status, defaultStatus, defaultStatusInfo, + host.Progress.CurrentStage, defaultProgressStage, host.Progress.ProgressInfo) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(returnedHost.Progress.CurrentStage).Should(Equal(defaultProgressStage)) + Expect(returnedHost.Progress.ProgressInfo).Should(Equal(host.Progress.ProgressInfo)) + Expect(returnedHost.Progress.StageUpdatedAt.String()).ShouldNot(Equal(lastUpdatedTime.String())) + Expect(returnedHost.Progress.StageStartedAt.String()).ShouldNot(Equal(lastUpdatedTime.String())) + }) + + It("same_stage", func() { + // Still updates because stage_updated_at is being updated + returnedHost, err = updateHostProgress(ctx, getTestLog(), db, mockEvents, host.ClusterID, *host.ID, *host.Status, defaultStatus, defaultStatusInfo, + host.Progress.CurrentStage, host.Progress.CurrentStage, host.Progress.ProgressInfo) + Expect(err).ShouldNot(HaveOccurred()) + + Expect(returnedHost.Progress.CurrentStage).Should(Equal(models.HostStage(""))) + Expect(returnedHost.Progress.ProgressInfo).Should(Equal("")) + Expect(returnedHost.Progress.StageUpdatedAt.String()).ShouldNot(Equal(lastUpdatedTime.String())) + Expect(returnedHost.Progress.StageStartedAt.String()).Should(Equal(lastUpdatedTime.String())) + }) + + AfterEach(func() { + By("Same status info", func() { + Expect(*returnedHost.Status).Should(Equal(defaultStatus)) + Expect(*returnedHost.StatusInfo).Should(Equal(defaultStatusInfo)) + Expect(returnedHost.StatusUpdatedAt.String()).Should(Equal(lastUpdatedTime.String())) + }) + }) + }) + + It("new_status_new_stage", func() { + mockEvents.EXPECT().AddEvent(gomock.Any(), host.ID.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"status\" to \"newStatus\" (newStatusInfo)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + returnedHost, err = updateHostProgress(ctx, getTestLog(), db, mockEvents, host.ClusterID, *host.ID, *host.Status, newStatus, newStatusInfo, + host.Progress.CurrentStage, defaultProgressStage, "") + Expect(err).ShouldNot(HaveOccurred()) + + Expect(returnedHost.Progress.CurrentStage).Should(Equal(defaultProgressStage)) + Expect(returnedHost.Progress.ProgressInfo).Should(Equal("")) + Expect(returnedHost.Progress.StageUpdatedAt.String()).ShouldNot(Equal(lastUpdatedTime.String())) + Expect(returnedHost.Progress.StageStartedAt.String()).ShouldNot(Equal(lastUpdatedTime.String())) + + By("New status", func() { + Expect(*returnedHost.Status).Should(Equal(newStatus)) + Expect(*returnedHost.StatusInfo).Should(Equal(newStatusInfo)) + Expect(returnedHost.StatusUpdatedAt.String()).ShouldNot(Equal(lastUpdatedTime.String())) + }) + }) + + It("update_info", func() { + for _, i := range []int{5, 10, 15} { + returnedHost, err = updateHostProgress(ctx, getTestLog(), db, mockEvents, host.ClusterID, *host.ID, *host.Status, defaultStatus, defaultStatusInfo, + host.Progress.CurrentStage, host.Progress.CurrentStage, fmt.Sprintf("%d%%", i)) + Expect(err).ShouldNot(HaveOccurred()) + Expect(returnedHost.Progress.ProgressInfo).Should(Equal(fmt.Sprintf("%d%%", i))) + Expect(returnedHost.Progress.StageStartedAt.String()).Should(Equal(lastUpdatedTime.String())) + } + }) + }) + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) + +}) diff --git a/internal/host/connectivitycheckcmd.go b/internal/host/connectivitycheckcmd.go index eff53147c..3d64335ab 100644 --- a/internal/host/connectivitycheckcmd.go +++ b/internal/host/connectivitycheckcmd.go @@ -3,7 +3,7 @@ package host import ( "context" - "github.com/filanov/bm-inventory/internal/hardware" + "github.com/filanov/bm-inventory/internal/connectivity" "github.com/sirupsen/logrus" @@ -14,21 +14,21 @@ import ( type connectivityCheckCmd struct { baseCmd - db *gorm.DB - hwValidator hardware.Validator + db *gorm.DB + connectivityValidator connectivity.Validator + connectivityCheckImage string } -func NewConnectivityCheckCmd(log logrus.FieldLogger, db *gorm.DB, hwValidator hardware.Validator) *connectivityCheckCmd { +func NewConnectivityCheckCmd(log logrus.FieldLogger, db *gorm.DB, connectivityValidator connectivity.Validator, connectivityCheckImage string) *connectivityCheckCmd { return &connectivityCheckCmd{ - baseCmd: baseCmd{log: log}, - db: db, - hwValidator: hwValidator, + baseCmd: baseCmd{log: log}, + db: db, + connectivityValidator: connectivityValidator, + connectivityCheckImage: connectivityCheckImage, } } func (c *connectivityCheckCmd) GetStep(ctx context.Context, host *models.Host) (*models.Step, error) { - step := &models.Step{} - step.StepType = models.StepTypeConnectivityCheck var hosts []*models.Host if err := c.db.Find(&hosts, "cluster_id = ?", host.ClusterID).Error; err != nil { @@ -36,12 +36,23 @@ func (c *connectivityCheckCmd) GetStep(ctx context.Context, host *models.Host) ( return nil, err } - hostsData, err := convertHostsToConnectivityCheckParams(host.ID, hosts, c.hwValidator) + hostsData, err := convertHostsToConnectivityCheckParams(host.ID, hosts, c.connectivityValidator) if err != nil { c.log.Errorf("failed to convert hosts to connectivity params for host %s cluster %s", host.ID, host.ClusterID) return nil, err } - step.Args = append(step.Args, hostsData) + step := &models.Step{ + StepType: models.StepTypeConnectivityCheck, + Command: "podman", + Args: []string{ + "run", "--privileged", "--net=host", "--rm", "--quiet", + "-v", "/var/log:/var/log", + "-v", "/run/systemd/journal/socket:/run/systemd/journal/socket", + c.connectivityCheckImage, + "connectivity_check", + hostsData, + }, + } return step, nil } diff --git a/internal/host/connectivitycheckcmd_test.go b/internal/host/connectivitycheckcmd_test.go index 6282a9cce..a469bd29e 100644 --- a/internal/host/connectivitycheckcmd_test.go +++ b/internal/host/connectivitycheckcmd_test.go @@ -3,6 +3,8 @@ package host import ( "context" + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" "github.com/google/uuid" @@ -19,10 +21,11 @@ var _ = Describe("connectivitycheckcmd", func() { var id, clusterId strfmt.UUID var stepReply *models.Step var stepErr error + dbName := "connectivitycheckcmd" BeforeEach(func() { - db = prepareDB() - connectivityCheckCmd = NewConnectivityCheckCmd(getTestLog(), db, nil) + db = common.PrepareTestDB(dbName) + connectivityCheckCmd = NewConnectivityCheckCmd(getTestLog(), db, nil, "quay.io/ocpmetal/connectivity_check:latest") id = strfmt.UUID(uuid.New().String()) clusterId = strfmt.UUID(uuid.New().String()) @@ -44,7 +47,7 @@ var _ = Describe("connectivitycheckcmd", func() { }) AfterEach(func() { - db.Close() + common.DeleteTestDB(db, dbName) stepReply = nil stepErr = nil }) diff --git a/internal/host/connectivitycheckconvertor.go b/internal/host/connectivitycheckconvertor.go index 78c271b29..f7459bfd6 100644 --- a/internal/host/connectivitycheckconvertor.go +++ b/internal/host/connectivitycheckconvertor.go @@ -4,16 +4,17 @@ import ( "encoding/json" "strings" - "github.com/filanov/bm-inventory/internal/hardware" + "github.com/filanov/bm-inventory/internal/connectivity" + "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" ) -func convertHostsToConnectivityCheckParams(currentHostId *strfmt.UUID, hosts []*models.Host, hwValidator hardware.Validator) (string, error) { +func convertHostsToConnectivityCheckParams(currentHostId *strfmt.UUID, hosts []*models.Host, connectivityValidator connectivity.Validator) (string, error) { var connectivityCheckHosts models.ConnectivityCheckParams for i := range hosts { if hosts[i].ID.String() != currentHostId.String() { - interfaces, err := hwValidator.GetHostValidInterfaces(hosts[i]) + interfaces, err := connectivityValidator.GetHostValidInterfaces(hosts[i]) if err != nil { return "", err } diff --git a/internal/host/connectivitycheckconvertor_test.go b/internal/host/connectivitycheckconvertor_test.go index 4351c938d..2fd2a4bc5 100644 --- a/internal/host/connectivitycheckconvertor_test.go +++ b/internal/host/connectivitycheckconvertor_test.go @@ -3,7 +3,8 @@ package host import ( "strings" - "github.com/filanov/bm-inventory/internal/hardware" + "github.com/filanov/bm-inventory/internal/connectivity" + "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" "github.com/golang/mock/gomock" @@ -15,7 +16,7 @@ import ( var _ = Describe("connectivitycheckconvertor", func() { var ( ctrl *gomock.Controller - mockValidator *hardware.MockValidator + mockValidator *connectivity.MockValidator currentHostId, hostId2, hostId3, clusterId strfmt.UUID hosts []*models.Host interfaces []*models.Interface @@ -24,7 +25,7 @@ var _ = Describe("connectivitycheckconvertor", func() { BeforeEach(func() { ctrl = gomock.NewController(GinkgoT()) - mockValidator = hardware.NewMockValidator(ctrl) + mockValidator = connectivity.NewMockValidator(ctrl) clusterId = strfmt.UUID(uuid.New().String()) currentHostId = strfmt.UUID(uuid.New().String()) diff --git a/internal/host/disabled.go b/internal/host/disabled.go deleted file mode 100644 index b76a0736a..000000000 --- a/internal/host/disabled.go +++ /dev/null @@ -1,66 +0,0 @@ -package host - -import ( - "context" - - "github.com/filanov/bm-inventory/models" - logutil "github.com/filanov/bm-inventory/pkg/log" - "github.com/go-openapi/swag" - "github.com/jinzhu/gorm" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func NewDisabledState(log logrus.FieldLogger, db *gorm.DB) *disabledState { - return &disabledState{ - log: log, - db: db, - } -} - -type disabledState baseState - -func (d *disabledState) UpdateHwInfo(ctx context.Context, h *models.Host, hwInfo string) (*UpdateReply, error) { - return nil, errors.Errorf("unable to update hardware info to host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (d *disabledState) UpdateInventory(ctx context.Context, h *models.Host, inventory string) (*UpdateReply, error) { - return nil, errors.Errorf("unable to update inventory to host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (d *disabledState) UpdateRole(ctx context.Context, h *models.Host, role string, db *gorm.DB) (*UpdateReply, error) { - cdb := d.db - if db != nil { - cdb = db - } - return updateStateWithParams(logutil.FromContext(ctx, d.log), HostStatusDisabled, statusInfoDisabled, h, cdb, - "role", role) -} - -func (d *disabledState) RefreshStatus(ctx context.Context, h *models.Host) (*UpdateReply, error) { - // State in the same state - return &UpdateReply{ - State: HostStatusDisabled, - IsChanged: false, - }, nil -} - -func (d *disabledState) Install(ctx context.Context, h *models.Host, db *gorm.DB) (*UpdateReply, error) { - return nil, errors.Errorf("unable to install host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (d *disabledState) EnableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - return updateStateWithParams(logutil.FromContext(ctx, d.log), HostStatusDiscovering, "", h, d.db, - "hardware_info", "") -} - -func (d *disabledState) DisableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - // State in the same state - return &UpdateReply{ - State: HostStatusDisabled, - IsChanged: false, - }, nil -} diff --git a/internal/host/disabled_test.go b/internal/host/disabled_test.go deleted file mode 100644 index a336c85e2..000000000 --- a/internal/host/disabled_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package host - -import ( - "context" - "time" - - "github.com/filanov/bm-inventory/models" - "github.com/go-openapi/strfmt" - "github.com/google/uuid" - "github.com/jinzhu/gorm" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("disabled_state", func() { - ctx := context.Background() - var state API - var db *gorm.DB - currentState := HostStatusDisabled - var host models.Host - var id, clusterId strfmt.UUID - var updateReply *UpdateReply - var updateErr error - var expectedReply *expect - - BeforeEach(func() { - db = prepareDB() - state = &Manager{disabled: NewDisabledState(getTestLog(), db)} - - id = strfmt.UUID(uuid.New().String()) - clusterId = strfmt.UUID(uuid.New().String()) - host = getTestHost(id, clusterId, currentState) - Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) - expectedReply = &expect{expectedState: currentState} - }) - - It("update_hw_info", func() { - updateReply, updateErr = state.UpdateHwInfo(ctx, &host, "some hw info") - expectedReply.expectError = true - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.HardwareInfo).Should(Equal(defaultHwInfo)) - } - }) - - Context("update_role", func() { - It("master", func() { - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", nil) - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Role).Should(Equal("master")) - } - }) - It("master_with_tx", func() { - tx := db.Begin() - Expect(tx.Error).ShouldNot(HaveOccurred()) - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", tx) - Expect(tx.Rollback().Error).ShouldNot(HaveOccurred()) - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Role).Should(Equal("")) - } - }) - }) - - Context("refresh_status", func() { - It("keep_alive", func() { - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - It("still_disabled", func() { - host.UpdatedAt = strfmt.DateTime(time.Now().Add(-time.Hour)) - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - }) - - It("install", func() { - updateReply, updateErr = state.Install(ctx, &host, nil) - expectedReply.expectError = true - }) - - It("enable_host", func() { - updateReply, updateErr = state.EnableHost(ctx, &host) - expectedReply.expectedState = HostStatusDiscovering - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.HardwareInfo).Should(Equal("")) - Expect(*h.StatusInfo).Should(Equal("")) - } - }) - - It("disable_host", func() { - updateReply, updateErr = state.DisableHost(ctx, &host) - }) - - AfterEach(func() { - postValidation(expectedReply, currentState, db, id, clusterId, updateReply, updateErr) - // cleanup - db.Close() - expectedReply = nil - updateReply = nil - updateErr = nil - }) - -}) diff --git a/internal/host/disconnected.go b/internal/host/disconnected.go deleted file mode 100644 index 75d7124cb..000000000 --- a/internal/host/disconnected.go +++ /dev/null @@ -1,72 +0,0 @@ -package host - -import ( - "context" - - "github.com/filanov/bm-inventory/internal/hardware" - "github.com/filanov/bm-inventory/models" - logutil "github.com/filanov/bm-inventory/pkg/log" - "github.com/go-openapi/swag" - "github.com/jinzhu/gorm" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func NewDisconnectedState(log logrus.FieldLogger, db *gorm.DB, hwValidator hardware.Validator) *disconnectedState { - return &disconnectedState{ - baseState: baseState{ - log: log, - db: db, - }, - hwValidator: hwValidator, - } -} - -type disconnectedState struct { - baseState - hwValidator hardware.Validator -} - -func (d *disconnectedState) UpdateHwInfo(ctx context.Context, h *models.Host, hwInfo string) (*UpdateReply, error) { - h.HardwareInfo = hwInfo - return updateHwInfo(logutil.FromContext(ctx, d.log), d.hwValidator, h, d.db) -} - -func (d *disconnectedState) UpdateInventory(ctx context.Context, h *models.Host, inventory string) (*UpdateReply, error) { - h.Inventory = inventory - return updateInventory(logutil.FromContext(ctx, d.log), d.hwValidator, h, d.db) -} - -func (d *disconnectedState) UpdateRole(ctx context.Context, h *models.Host, role string, db *gorm.DB) (*UpdateReply, error) { - cdb := d.db - if db != nil { - cdb = db - } - return updateStateWithParams(logutil.FromContext(ctx, d.log), HostStatusDisconnected, - swag.StringValue(h.StatusInfo), h, cdb, "role", role) -} - -func (d *disconnectedState) RefreshStatus(ctx context.Context, h *models.Host) (*UpdateReply, error) { - // State in the same state - return &UpdateReply{ - State: HostStatusDisconnected, - IsChanged: false, - }, nil -} - -func (d *disconnectedState) Install(ctx context.Context, h *models.Host, db *gorm.DB) (*UpdateReply, error) { - return nil, errors.Errorf("unable to install host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (d *disconnectedState) EnableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - // State in the same state - return &UpdateReply{ - State: HostStatusDisconnected, - IsChanged: false, - }, nil -} - -func (d *disconnectedState) DisableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - return updateState(logutil.FromContext(ctx, d.log), HostStatusDisabled, statusInfoDisabled, h, d.db) -} diff --git a/internal/host/disconnected_test.go b/internal/host/disconnected_test.go deleted file mode 100644 index b6dd445af..000000000 --- a/internal/host/disconnected_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package host - -import ( - "context" - "time" - - "github.com/filanov/bm-inventory/internal/hardware" - "github.com/filanov/bm-inventory/models" - "github.com/go-openapi/strfmt" - "github.com/golang/mock/gomock" - "github.com/google/uuid" - "github.com/jinzhu/gorm" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/pkg/errors" -) - -var _ = Describe("disconnected_state", func() { - var ( - ctx = context.Background() - state API - db *gorm.DB - currentState = HostStatusDisconnected - host models.Host - id, clusterId strfmt.UUID - updateReply *UpdateReply - updateErr error - expectedReply *expect - ctrl *gomock.Controller - mockValidator *hardware.MockValidator - ) - - BeforeEach(func() { - db = prepareDB() - ctrl = gomock.NewController(GinkgoT()) - mockValidator = hardware.NewMockValidator(ctrl) - state = &Manager{disconnected: NewDisconnectedState(getTestLog(), db, mockValidator)} - - id = strfmt.UUID(uuid.New().String()) - clusterId = strfmt.UUID(uuid.New().String()) - host = getTestHost(id, clusterId, currentState) - Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) - expectedReply = &expect{expectedState: currentState} - }) - - Context("update hw info", func() { - It("update", func() { - updateReply, updateErr = state.UpdateHwInfo(ctx, &host, "some hw info") - expectedReply.expectedState = HostStatusDisconnected - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Inventory).Should(Equal("")) - Expect(h.HardwareInfo).Should(Equal("some hw info")) - } - }) - }) - - Context("update inventory", func() { - It("sufficient_hw", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(&hardware.IsSufficientReply{IsSufficient: true}, nil).Times(1) - updateReply, updateErr = state.UpdateInventory(ctx, &host, "some hw info") - expectedReply.expectedState = HostStatusKnown - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Inventory).Should(Equal("some hw info")) - } - }) - It("insufficient_hw", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(&hardware.IsSufficientReply{IsSufficient: false, Reason: "because"}, nil).Times(1) - updateReply, updateErr = state.UpdateInventory(ctx, &host, "some hw info") - expectedReply.expectedState = HostStatusInsufficient - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Inventory).Should(Equal("some hw info")) - Expect(*h.StatusInfo).Should(Equal("because")) - } - }) - It("hw_validation_error", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(nil, errors.New("error")).Times(1) - updateReply, updateErr = state.UpdateInventory(ctx, &host, "some hw info") - expectedReply.expectError = true - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Inventory).Should(Equal("")) - } - }) - }) - - Context("update_role", func() { - It("master", func() { - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", nil) - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Role).Should(Equal("master")) - } - }) - It("master_with_tx", func() { - tx := db.Begin() - Expect(tx.Error).ShouldNot(HaveOccurred()) - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", tx) - Expect(tx.Rollback().Error).ShouldNot(HaveOccurred()) - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Role).Should(Equal("")) - } - }) - }) - - Context("refresh_status", func() { - It("keep_alive", func() { - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - It("keep_alive_timeout", func() { - host.UpdatedAt = strfmt.DateTime(time.Now().Add(-time.Hour)) - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - }) - - It("install", func() { - updateReply, updateErr = state.Install(ctx, &host, nil) - expectedReply.expectError = true - }) - - It("enable_host", func() { - updateReply, updateErr = state.EnableHost(ctx, &host) - }) - - It("disable_host", func() { - updateReply, updateErr = state.DisableHost(ctx, &host) - expectedReply.expectedState = HostStatusDisabled - }) - - AfterEach(func() { - ctrl.Finish() - postValidation(expectedReply, currentState, db, id, clusterId, updateReply, updateErr) - // cleanup - db.Close() - expectedReply = nil - updateReply = nil - updateErr = nil - }) -}) diff --git a/internal/host/discovering.go b/internal/host/discovering.go deleted file mode 100644 index ab3d564ec..000000000 --- a/internal/host/discovering.go +++ /dev/null @@ -1,67 +0,0 @@ -package host - -import ( - "context" - - "github.com/filanov/bm-inventory/internal/hardware" - "github.com/filanov/bm-inventory/models" - logutil "github.com/filanov/bm-inventory/pkg/log" - "github.com/go-openapi/swag" - "github.com/jinzhu/gorm" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func NewDiscoveringState(log logrus.FieldLogger, db *gorm.DB, hwValidator hardware.Validator) *discoveringState { - return &discoveringState{ - baseState: baseState{ - log: log, - db: db, - }, - hwValidator: hwValidator, - } -} - -type discoveringState struct { - baseState - hwValidator hardware.Validator -} - -func (d *discoveringState) UpdateHwInfo(ctx context.Context, h *models.Host, hwInfo string) (*UpdateReply, error) { - h.HardwareInfo = hwInfo - return updateHwInfo(logutil.FromContext(ctx, d.log), d.hwValidator, h, d.db) -} - -func (d *discoveringState) UpdateInventory(ctx context.Context, h *models.Host, inventory string) (*UpdateReply, error) { - h.Inventory = inventory - return updateInventory(logutil.FromContext(ctx, d.log), d.hwValidator, h, d.db) -} - -func (d *discoveringState) UpdateRole(ctx context.Context, h *models.Host, role string, db *gorm.DB) (*UpdateReply, error) { - cdb := d.db - if db != nil { - cdb = db - } - return updateStateWithParams(logutil.FromContext(ctx, d.log), HostStatusDiscovering, statusInfoDiscovering, h, cdb, "role", role) -} - -func (d *discoveringState) RefreshStatus(ctx context.Context, h *models.Host) (*UpdateReply, error) { - return updateByKeepAlive(logutil.FromContext(ctx, d.log), h, d.db) -} - -func (d *discoveringState) Install(ctx context.Context, h *models.Host, db *gorm.DB) (*UpdateReply, error) { - return nil, errors.Errorf("unable to install host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (d *discoveringState) EnableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - // State in the same state - return &UpdateReply{ - State: HostStatusDiscovering, - IsChanged: false, - }, nil -} - -func (d *discoveringState) DisableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - return updateState(logutil.FromContext(ctx, d.log), HostStatusDisabled, statusInfoDisabled, h, d.db) -} diff --git a/internal/host/discovering_test.go b/internal/host/discovering_test.go deleted file mode 100644 index 1c3c78ec1..000000000 --- a/internal/host/discovering_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package host - -import ( - "context" - "time" - - "github.com/filanov/bm-inventory/internal/hardware" - "github.com/filanov/bm-inventory/models" - "github.com/go-openapi/strfmt" - "github.com/golang/mock/gomock" - "github.com/google/uuid" - "github.com/jinzhu/gorm" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/pkg/errors" -) - -var _ = Describe("discovering_state", func() { - var ( - ctx = context.Background() - state API - db *gorm.DB - currentState = HostStatusDiscovering - host models.Host - id, clusterId strfmt.UUID - updateReply *UpdateReply - updateErr error - expectedReply *expect - ctrl *gomock.Controller - mockValidator *hardware.MockValidator - ) - - BeforeEach(func() { - db = prepareDB() - ctrl = gomock.NewController(GinkgoT()) - mockValidator = hardware.NewMockValidator(ctrl) - state = &Manager{discovering: NewDiscoveringState(getTestLog(), db, mockValidator)} - - id = strfmt.UUID(uuid.New().String()) - clusterId = strfmt.UUID(uuid.New().String()) - host = getTestHost(id, clusterId, currentState) - Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) - expectedReply = &expect{expectedState: currentState} - }) - - Context("update inventory", func() { - It("sufficient_hw", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(&hardware.IsSufficientReply{IsSufficient: true}, nil).Times(1) - updateReply, updateErr = state.UpdateInventory(ctx, &host, "some hw info") - expectedReply.expectedState = HostStatusKnown - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Inventory).Should(Equal("some hw info")) - } - }) - It("insufficient_hw", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(&hardware.IsSufficientReply{IsSufficient: false, Reason: "because"}, nil).Times(1) - updateReply, updateErr = state.UpdateInventory(ctx, &host, "some hw info") - expectedReply.expectedState = HostStatusInsufficient - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Inventory).Should(Equal("some hw info")) - Expect(*h.StatusInfo).Should(Equal("because")) - } - }) - It("hw_validation_error", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(nil, errors.New("error")).Times(1) - updateReply, updateErr = state.UpdateInventory(ctx, &host, "some hw info") - expectedReply.expectError = true - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Inventory).Should(Equal("")) - } - }) - }) - - Context("update_role", func() { - It("master", func() { - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", nil) - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Role).Should(Equal("master")) - } - }) - It("master_with_tx", func() { - tx := db.Begin() - Expect(tx.Error).ShouldNot(HaveOccurred()) - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", tx) - Expect(tx.Rollback().Error).ShouldNot(HaveOccurred()) - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Role).Should(Equal("")) - } - }) - }) - - Context("refresh_status", func() { - It("keep_alive", func() { - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - It("keep_alive_timeout", func() { - host.UpdatedAt = strfmt.DateTime(time.Now().Add(-time.Hour)) - updateReply, updateErr = state.RefreshStatus(ctx, &host) - expectedReply.expectedState = HostStatusDisconnected - }) - }) - - It("install", func() { - updateReply, updateErr = state.Install(ctx, &host, nil) - expectedReply.expectError = true - }) - - It("enable_host", func() { - updateReply, updateErr = state.EnableHost(ctx, &host) - }) - - It("disable_host", func() { - updateReply, updateErr = state.DisableHost(ctx, &host) - expectedReply.expectedState = HostStatusDisabled - }) - - AfterEach(func() { - ctrl.Finish() - postValidation(expectedReply, currentState, db, id, clusterId, updateReply, updateErr) - // cleanup - db.Close() - expectedReply = nil - updateReply = nil - updateErr = nil - }) -}) diff --git a/internal/host/error.go b/internal/host/error.go deleted file mode 100644 index d7ec18483..000000000 --- a/internal/host/error.go +++ /dev/null @@ -1,61 +0,0 @@ -package host - -import ( - "context" - - "github.com/filanov/bm-inventory/models" - "github.com/go-openapi/swag" - "github.com/jinzhu/gorm" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func NewErrorState(log logrus.FieldLogger, db *gorm.DB) *errorState { - return &errorState{ - log: log, - db: db, - } -} - -type errorState baseState - -func (e *errorState) UpdateHwInfo(ctx context.Context, h *models.Host, hwInfo string) (*UpdateReply, error) { - return nil, errors.Errorf("unable to update hardware info to host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (i *errorState) UpdateInventory(ctx context.Context, h *models.Host, inventory string) (*UpdateReply, error) { - return nil, errors.Errorf("unable to update inventory to host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (e *errorState) UpdateRole(ctx context.Context, h *models.Host, role string, db *gorm.DB) (*UpdateReply, error) { - return nil, errors.Errorf("unable to update role to host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (e *errorState) RefreshStatus(ctx context.Context, h *models.Host) (*UpdateReply, error) { - // State in the same state - return &UpdateReply{ - State: HostStatusError, - IsChanged: false, - }, nil -} - -func (e *errorState) Install(ctx context.Context, h *models.Host, db *gorm.DB) (*UpdateReply, error) { - return nil, errors.Errorf("unable to install host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (e *errorState) EnableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - // State in the same state - return &UpdateReply{ - State: HostStatusError, - IsChanged: false, - }, nil -} - -func (e *errorState) DisableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - return nil, errors.Errorf("unable to disable host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} diff --git a/internal/host/error_test.go b/internal/host/error_test.go deleted file mode 100644 index d4f08d7fc..000000000 --- a/internal/host/error_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package host - -import ( - "context" - "time" - - "github.com/filanov/bm-inventory/models" - "github.com/go-openapi/strfmt" - "github.com/google/uuid" - "github.com/jinzhu/gorm" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("error_state", func() { - ctx := context.Background() - var state API - var db *gorm.DB - currentState := HostStatusError - var host models.Host - var id, clusterId strfmt.UUID - var updateReply *UpdateReply - var updateErr error - var expectedReply *expect - - BeforeEach(func() { - db = prepareDB() - state = &Manager{error: NewErrorState(getTestLog(), db)} - - id = strfmt.UUID(uuid.New().String()) - clusterId = strfmt.UUID(uuid.New().String()) - host = getTestHost(id, clusterId, currentState) - Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) - expectedReply = &expect{expectedState: currentState} - }) - - It("update_hw_info", func() { - updateReply, updateErr = state.UpdateHwInfo(ctx, &host, "some hw info") - expectedReply.expectError = true - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.HardwareInfo).Should(Equal(defaultHwInfo)) - } - }) - - It("update_role", func() { - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", nil) - expectedReply.expectError = true - }) - - Context("refresh_status", func() { - It("keep_alive", func() { - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - It("keep_alive_timeout", func() { - host.UpdatedAt = strfmt.DateTime(time.Now().Add(-time.Hour)) - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - }) - - It("install", func() { - updateReply, updateErr = state.Install(ctx, &host, nil) - expectedReply.expectError = true - }) - - It("enable_host", func() { - updateReply, updateErr = state.EnableHost(ctx, &host) - }) - - It("disable_host", func() { - updateReply, updateErr = state.DisableHost(ctx, &host) - expectedReply.expectError = true - }) - - AfterEach(func() { - postValidation(expectedReply, currentState, db, id, clusterId, updateReply, updateErr) - - // cleanup - db.Close() - expectedReply = nil - updateReply = nil - updateErr = nil - }) -}) diff --git a/internal/host/freeaddressescmd.go b/internal/host/freeaddressescmd.go new file mode 100644 index 000000000..5be6a4e62 --- /dev/null +++ b/internal/host/freeaddressescmd.go @@ -0,0 +1,81 @@ +package host + +import ( + "context" + "encoding/json" + "fmt" + "net" + + "github.com/sirupsen/logrus" + + "github.com/filanov/bm-inventory/models" +) + +type freeAddressesCmd struct { + baseCmd + freeAddressesImage string +} + +func NewFreeAddressesCmd(log logrus.FieldLogger, freeAddressesImage string) *freeAddressesCmd { + return &freeAddressesCmd{ + baseCmd: baseCmd{log: log}, + freeAddressesImage: freeAddressesImage, + } +} + +func (f *freeAddressesCmd) prepareParam(host *models.Host) (string, error) { + var inventory models.Inventory + err := json.Unmarshal([]byte(host.Inventory), &inventory) + if err != nil { + f.log.WithError(err).Warn("Inventory parse") + return "", err + } + m := make(map[string]struct{}) + for _, intf := range inventory.Interfaces { + for _, ipv4 := range intf.IPV4Addresses { + var cidr *net.IPNet + _, cidr, err = net.ParseCIDR(ipv4) + if err != nil { + f.log.WithError(err).Warn("Cidr parse") + return "", err + } + m[cidr.String()] = struct{}{} + } + } + if len(m) == 0 { + err = fmt.Errorf("No networks found for host %s", host.ID.String()) + f.log.WithError(err).Warn("Missing networks") + return "", err + } + request := models.FreeAddressesRequest{} + for cidr := range m { + request = append(request, cidr) + } + b, err := json.Marshal(&request) + if err != nil { + f.log.WithError(err).Warn("Json marshal") + return "", err + } + return string(b), nil +} + +func (f *freeAddressesCmd) GetStep(ctx context.Context, host *models.Host) (*models.Step, error) { + param, err := f.prepareParam(host) + if err != nil { + return nil, err + } + step := &models.Step{ + StepType: models.StepTypeFreeNetworkAddresses, + Command: "podman", + Args: []string{ + "run", "--privileged", "--net=host", "--rm", "--quiet", + "--name", "free_addresses_scanner", + "-v", "/var/log:/var/log", + "-v", "/run/systemd/journal/socket:/run/systemd/journal/socket", + f.freeAddressesImage, + "free_addresses", + param, + }, + } + return step, nil +} diff --git a/internal/host/freeaddressescmd_test.go b/internal/host/freeaddressescmd_test.go new file mode 100644 index 000000000..e45a180eb --- /dev/null +++ b/internal/host/freeaddressescmd_test.go @@ -0,0 +1,64 @@ +package host + +import ( + "context" + + "github.com/filanov/bm-inventory/internal/common" + + "github.com/filanov/bm-inventory/models" + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/jinzhu/gorm" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("inventory", func() { + ctx := context.Background() + var host models.Host + var db *gorm.DB + var fCmd *freeAddressesCmd + var id, clusterId strfmt.UUID + var stepReply *models.Step + var stepErr error + dbName := "freeaddresses_cmd" + + BeforeEach(func() { + db = common.PrepareTestDB(dbName) + fCmd = NewFreeAddressesCmd(getTestLog(), "quay.io/ocpmetal/free_addresses:latest") + + id = strfmt.UUID(uuid.New().String()) + clusterId = strfmt.UUID(uuid.New().String()) + host = getTestHost(id, clusterId, HostStatusInsufficient) + host.Inventory = defaultInventory() + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + }) + + It("happy flow", func() { + stepReply, stepErr = fCmd.GetStep(ctx, &host) + Expect(stepReply).ToNot(BeNil()) + Expect(stepReply.StepType).To(Equal(models.StepTypeFreeNetworkAddresses)) + Expect(stepErr).ShouldNot(HaveOccurred()) + }) + + It("Illegal inventory", func() { + host.Inventory = "blah" + stepReply, stepErr = fCmd.GetStep(ctx, &host) + Expect(stepReply).To(BeNil()) + Expect(stepErr).To(HaveOccurred()) + }) + + It("Missing networks", func() { + host.Inventory = "{}" + stepReply, stepErr = fCmd.GetStep(ctx, &host) + Expect(stepReply).To(BeNil()) + Expect(stepErr).To(HaveOccurred()) + }) + + AfterEach(func() { + // cleanup + common.DeleteTestDB(db, dbName) + stepReply = nil + stepErr = nil + }) +}) diff --git a/internal/host/host.go b/internal/host/host.go index 57238e8a7..ada8ba586 100644 --- a/internal/host/host.go +++ b/internal/host/host.go @@ -3,136 +3,124 @@ package host import ( "context" "fmt" - "strings" + "net/http" + "strconv" + "time" + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/internal/events" "github.com/filanov/bm-inventory/internal/hardware" + "github.com/filanov/bm-inventory/internal/metrics" "github.com/filanov/bm-inventory/models" logutil "github.com/filanov/bm-inventory/pkg/log" "github.com/filanov/stateswitch" + "github.com/go-openapi/swag" "github.com/jinzhu/gorm" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "github.com/thoas/go-funk" ) -//go:generate mockgen -source=host.go -package=host -aux_files=github.com/filanov/bm-inventory/internal/host=instructionmanager.go -destination=mock_host_api.go - -type StateAPI interface { - // Set a new HW information - UpdateHwInfo(ctx context.Context, h *models.Host, hwInfo string) (*UpdateReply, error) - // Set a new inventory information - UpdateInventory(ctx context.Context, h *models.Host, inventory string) (*UpdateReply, error) - // Set host state - UpdateRole(ctx context.Context, h *models.Host, role string, db *gorm.DB) (*UpdateReply, error) - // check keep alive - RefreshStatus(ctx context.Context, h *models.Host) (*UpdateReply, error) - // Install host - db is optional, for transactions - Install(ctx context.Context, h *models.Host, db *gorm.DB) (*UpdateReply, error) - // Enable host to get requests (disabled by default) - EnableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) - // Disable host from getting any requests - DisableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) -} - -type SpecificHardwareParams interface { - GetHostValidDisks(h *models.Host) ([]*models.Disk, error) -} - const ( - HostStatusDiscovering = "discovering" - HostStatusKnown = "known" - HostStatusDisconnected = "disconnected" - HostStatusInsufficient = "insufficient" - HostStatusDisabled = "disabled" - HostStatusInstalling = "installing" - HostStatusInstallingInProgress = "installing-in-progress" - HostStatusInstalled = "installed" - HostStatusError = "error" + HostStatusDiscovering = "discovering" + HostStatusKnown = "known" + HostStatusDisconnected = "disconnected" + HostStatusInsufficient = "insufficient" + HostStatusDisabled = "disabled" + HostStatusInstalling = "installing" + HostStatusInstallingInProgress = "installing-in-progress" + HostStatusInstallingPendingUserAction = "installing-pending-user-action" + HostStatusInstalled = "installed" + HostStatusError = "error" + HostStatusResetting = "resetting" + HostStatusPendingForInput = "pending-for-input" ) -const ( - RoleMaster = "master" - RoleBootstrap = "bootstrap" - RoleWorker = "worker" -) +var BootstrapStages = [...]models.HostStage{ + models.HostStageStartingInstallation, models.HostStageInstalling, + models.HostStageWritingImageToDisk, models.HostStageWaitingForControlPlane, + models.HostStageRebooting, models.HostStageConfiguring, models.HostStageDone, +} +var MasterStages = [...]models.HostStage{ + models.HostStageStartingInstallation, models.HostStageInstalling, + models.HostStageWritingImageToDisk, models.HostStageRebooting, + models.HostStageConfiguring, models.HostStageJoined, models.HostStageDone, +} +var WorkerStages = [...]models.HostStage{ + models.HostStageStartingInstallation, models.HostStageInstalling, + models.HostStageWritingImageToDisk, models.HostStageRebooting, + models.HostStageWaitingForIgnition, models.HostStageConfiguring, models.HostStageDone, +} -const ( - progressDone = "Done" - progressFailed = "Failed" -) +var manualRebootStages = [...]models.HostStage{ + models.HostStageRebooting, + models.HostStageWaitingForIgnition, + models.HostStageConfiguring, + models.HostStageJoined, + models.HostStageDone, +} +//go:generate mockgen -source=host.go -package=host -aux_files=github.com/filanov/bm-inventory/internal/host=instructionmanager.go -destination=mock_host_api.go type API interface { // Register a new host RegisterHost(ctx context.Context, h *models.Host) error - StateAPI + HandleInstallationFailure(ctx context.Context, h *models.Host) error InstructionApi - SpecificHardwareParams - UpdateInstallProgress(ctx context.Context, h *models.Host, progress string) error - SetBootstrap(ctx context.Context, h *models.Host, isbootstrap bool) error + UpdateInstallProgress(ctx context.Context, h *models.Host, progress *models.HostProgress) error + RefreshStatus(ctx context.Context, h *models.Host, db *gorm.DB) error + SetBootstrap(ctx context.Context, h *models.Host, isbootstrap bool, db *gorm.DB) error UpdateConnectivityReport(ctx context.Context, h *models.Host, connectivityReport string) error + HostMonitoring() + UpdateRole(ctx context.Context, h *models.Host, role models.HostRole, db *gorm.DB) error + UpdateHostname(ctx context.Context, h *models.Host, hostname string, db *gorm.DB) error + CancelInstallation(ctx context.Context, h *models.Host, reason string, db *gorm.DB) *common.ApiErrorResponse + IsRequireUserActionReset(h *models.Host) bool + ResetHost(ctx context.Context, h *models.Host, reason string, db *gorm.DB) *common.ApiErrorResponse + ResetPendingUserAction(ctx context.Context, h *models.Host, db *gorm.DB) error + // Disable host from getting any requests + DisableHost(ctx context.Context, h *models.Host) error + // Enable host to get requests (disabled by default) + EnableHost(ctx context.Context, h *models.Host) error + // Install host - db is optional, for transactions + Install(ctx context.Context, h *models.Host, db *gorm.DB) error + // Set a new inventory information + UpdateInventory(ctx context.Context, h *models.Host, inventory string) error + GetStagesByRole(role models.HostRole, isbootstrap bool) []models.HostStage + IsInstallable(h *models.Host) bool + PrepareForInstallation(ctx context.Context, h *models.Host, db *gorm.DB) error } type Manager struct { log logrus.FieldLogger db *gorm.DB - discovering StateAPI - known StateAPI - insufficient StateAPI - disconnected StateAPI - disabled StateAPI - installing StateAPI - installed StateAPI - error StateAPI instructionApi InstructionApi hwValidator hardware.Validator + eventsHandler events.Handler sm stateswitch.StateMachine + rp *refreshPreprocessor + metricApi metrics.API } -func NewManager(log logrus.FieldLogger, db *gorm.DB, hwValidator hardware.Validator, instructionApi InstructionApi) *Manager { +func NewManager(log logrus.FieldLogger, db *gorm.DB, eventsHandler events.Handler, hwValidator hardware.Validator, instructionApi InstructionApi, + hwValidatorCfg *hardware.ValidatorCfg, metricApi metrics.API) *Manager { th := &transitionHandler{ - db: db, - log: log, + db: db, + log: log, + eventsHandler: eventsHandler, } return &Manager{ log: log, db: db, - discovering: NewDiscoveringState(log, db, hwValidator), - known: NewKnownState(log, db, hwValidator), - insufficient: NewInsufficientState(log, db, hwValidator), - disconnected: NewDisconnectedState(log, db, hwValidator), - disabled: NewDisabledState(log, db), - installing: NewInstallingState(log, db), - installed: NewInstalledState(log, db), - error: NewErrorState(log, db), instructionApi: instructionApi, hwValidator: hwValidator, + eventsHandler: eventsHandler, sm: NewHostStateMachine(th), + rp: newRefreshPreprocessor(log, hwValidatorCfg), + metricApi: metricApi, } } -func (m *Manager) getCurrentState(status string) (StateAPI, error) { - switch status { - case "": - case HostStatusDiscovering: - return m.discovering, nil - case HostStatusKnown: - return m.known, nil - case HostStatusInsufficient: - return m.insufficient, nil - case HostStatusDisconnected: - return m.disconnected, nil - case HostStatusDisabled: - return m.disabled, nil - case HostStatusInstalling: - return m.installing, nil - case HostStatusInstalled: - return m.installed, nil - case HostStatusError: - return m.error, nil - } - return nil, fmt.Errorf("not supported host status: %s", status) -} - func (m *Manager) RegisterHost(ctx context.Context, h *models.Host) error { var host models.Host err := m.db.First(&host, "id = ? and cluster_id = ?", *h.ID, h.ClusterID).Error @@ -146,101 +134,140 @@ func (m *Manager) RegisterHost(ctx context.Context, h *models.Host) error { } return m.sm.Run(TransitionTypeRegisterHost, newStateHost(pHost), &TransitionArgsRegisterHost{ - ctx: ctx, + ctx: ctx, + discoveryAgentVersion: h.DiscoveryAgentVersion, }) } -func (m *Manager) UpdateHwInfo(ctx context.Context, h *models.Host, hwInfo string) (*UpdateReply, error) { - state, err := m.getCurrentState(swag.StringValue(h.Status)) - if err != nil { - return nil, err +func (m *Manager) HandleInstallationFailure(ctx context.Context, h *models.Host) error { + + lastStatusUpdateTime := h.StatusUpdatedAt + err := m.sm.Run(TransitionTypeHostInstallationFailed, newStateHost(h), &TransitionArgsHostInstallationFailed{ + ctx: ctx, + reason: "installation command failed", + }) + if err == nil { + m.reportInstallationMetrics(ctx, h, &models.HostProgressInfo{CurrentStage: "installation command failed", + StageStartedAt: lastStatusUpdateTime}, models.HostStageFailed) } - return state.UpdateHwInfo(ctx, h, hwInfo) + return err } -func (m *Manager) UpdateInventory(ctx context.Context, h *models.Host, inventory string) (*UpdateReply, error) { - state, err := m.getCurrentState(swag.StringValue(h.Status)) - if err != nil { - return nil, err +func (m *Manager) UpdateInventory(ctx context.Context, h *models.Host, inventory string) error { + hostStatus := swag.StringValue(h.Status) + allowedStatuses := []string{models.HostStatusDiscovering, models.HostStatusKnown, models.HostStatusDisconnected, + models.HostStatusInsufficient, models.HostStatusPendingForInput} + if !funk.ContainsString(allowedStatuses, hostStatus) { + return common.NewApiError(http.StatusConflict, + errors.Errorf("Host is in %s state, host can be updated only in one of %s states", + hostStatus, allowedStatuses)) } - return state.UpdateInventory(ctx, h, inventory) + h.Inventory = inventory + return m.db.Model(h).Update("inventory", inventory).Error } -func (m *Manager) UpdateRole(ctx context.Context, h *models.Host, role string, db *gorm.DB) (*UpdateReply, error) { - state, err := m.getCurrentState(swag.StringValue(h.Status)) +func (m *Manager) RefreshStatus(ctx context.Context, h *models.Host, db *gorm.DB) error { + if db == nil { + db = m.db + } + vc, err := newValidationContext(h, db) if err != nil { - return nil, err + return err } - return state.UpdateRole(ctx, h, role, db) -} - -func (m *Manager) RefreshStatus(ctx context.Context, h *models.Host) (*UpdateReply, error) { - state, err := m.getCurrentState(swag.StringValue(h.Status)) + conditions, validationsResults, err := m.rp.preprocess(vc) if err != nil { - return nil, err + return err } - return state.RefreshStatus(ctx, h) -} - -func (m *Manager) Install(ctx context.Context, h *models.Host, db *gorm.DB) (*UpdateReply, error) { - state, err := m.getCurrentState(swag.StringValue(h.Status)) + err = m.sm.Run(TransitionTypeRefresh, newStateHost(h), &TransitionArgsRefreshHost{ + ctx: ctx, + db: db, + eventHandler: m.eventsHandler, + conditions: conditions, + validationResults: validationsResults, + }) if err != nil { - return nil, err + return common.NewApiError(http.StatusConflict, err) } - return state.Install(ctx, h, db) + return nil } -func (m *Manager) EnableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - state, err := m.getCurrentState(swag.StringValue(h.Status)) - if err != nil { - return nil, err +func (m *Manager) Install(ctx context.Context, h *models.Host, db *gorm.DB) error { + cdb := m.db + if db != nil { + cdb = db } - return state.EnableHost(ctx, h) + return m.sm.Run(TransitionTypeInstallHost, newStateHost(h), &TransitionArgsInstallHost{ + ctx: ctx, + db: cdb, + }) } -func (m *Manager) DisableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - state, err := m.getCurrentState(swag.StringValue(h.Status)) - if err != nil { - return nil, err - } - return state.DisableHost(ctx, h) +func (m *Manager) EnableHost(ctx context.Context, h *models.Host) error { + return m.sm.Run(TransitionTypeEnableHost, newStateHost(h), &TransitionArgsEnableHost{ + ctx: ctx, + }) } -func (m *Manager) GetNextSteps(ctx context.Context, host *models.Host) (models.Steps, error) { - return m.instructionApi.GetNextSteps(ctx, host) +func (m *Manager) DisableHost(ctx context.Context, h *models.Host) error { + return m.sm.Run(TransitionTypeDisableHost, newStateHost(h), &TransitionArgsDisableHost{ + ctx: ctx, + }) } -func (m *Manager) GetHostValidDisks(host *models.Host) ([]*models.Disk, error) { - return m.hwValidator.GetHostValidDisks(host) +func (m *Manager) GetNextSteps(ctx context.Context, host *models.Host) (models.Steps, error) { + return m.instructionApi.GetNextSteps(ctx, host) } -func (m *Manager) UpdateInstallProgress(ctx context.Context, h *models.Host, progress string) error { - if swag.StringValue(h.Status) != HostStatusInstalling && swag.StringValue(h.Status) != HostStatusInstallingInProgress { +func (m *Manager) UpdateInstallProgress(ctx context.Context, h *models.Host, progress *models.HostProgress) error { + validStatuses := []string{HostStatusInstalling, HostStatusInstallingInProgress, HostStatusInstallingPendingUserAction} + if !funk.ContainsString(validStatuses, swag.StringValue(h.Status)) { return fmt.Errorf("can't set progress to host in status <%s>", swag.StringValue(h.Status)) } + previousProgress := h.Progress + if h.Progress.CurrentStage != "" && progress.CurrentStage != models.HostStageFailed { + // Verify the new stage is higher or equal to the current host stage according to its role stages array + stages := m.GetStagesByRole(h.Role, h.Bootstrap) + currentIndex := indexOfStage(progress.CurrentStage, stages) - // installation done - if progress == progressDone { - _, err := updateStateWithParams(logutil.FromContext(ctx, m.log), - HostStatusInstalled, HostStatusInstalled, h, m.db) - return err + if currentIndex == -1 { + return errors.Errorf("Stages %s isn't available for host role %s bootstrap %s", + progress.CurrentStage, h.Role, strconv.FormatBool(h.Bootstrap)) + } + if currentIndex < indexOfStage(h.Progress.CurrentStage, stages) { + return errors.Errorf("Can't assign lower stage \"%s\" after host has been in stage \"%s\"", + progress.CurrentStage, h.Progress.CurrentStage) + } } - // installation failed - if strings.HasPrefix(progress, progressFailed) { - _, err := updateStateWithParams(logutil.FromContext(ctx, m.log), - HostStatusError, progress, h, m.db) - return err - } + statusInfo := string(progress.CurrentStage) + + var err error + switch progress.CurrentStage { + case models.HostStageDone: + _, err = updateHostProgress(ctx, logutil.FromContext(ctx, m.log), m.db, m.eventsHandler, h.ClusterID, *h.ID, + swag.StringValue(h.Status), HostStatusInstalled, statusInfo, + h.Progress.CurrentStage, progress.CurrentStage, progress.ProgressInfo) + case models.HostStageFailed: + // Keeps the last progress + + if progress.ProgressInfo != "" { + statusInfo += fmt.Sprintf(" - %s", progress.ProgressInfo) + } - _, err := updateStateWithParams(logutil.FromContext(ctx, m.log), - HostStatusInstallingInProgress, progress, h, m.db) + _, err = updateHostStatus(ctx, logutil.FromContext(ctx, m.log), m.db, m.eventsHandler, h.ClusterID, *h.ID, + swag.StringValue(h.Status), HostStatusError, statusInfo) + default: + _, err = updateHostProgress(ctx, logutil.FromContext(ctx, m.log), m.db, m.eventsHandler, h.ClusterID, *h.ID, + swag.StringValue(h.Status), HostStatusInstallingInProgress, statusInfo, + h.Progress.CurrentStage, progress.CurrentStage, progress.ProgressInfo) + } + m.reportInstallationMetrics(ctx, h, previousProgress, progress.CurrentStage) return err } -func (m *Manager) SetBootstrap(ctx context.Context, h *models.Host, isbootstrap bool) error { +func (m *Manager) SetBootstrap(ctx context.Context, h *models.Host, isbootstrap bool, db *gorm.DB) error { if h.Bootstrap != isbootstrap { - err := m.db.Model(h).Update("bootstrap", isbootstrap).Error + err := db.Model(h).Update("bootstrap", isbootstrap).Error if err != nil { return errors.Wrapf(err, "failed to set bootstrap to host %s", h.ID.String()) } @@ -256,5 +283,139 @@ func (m *Manager) UpdateConnectivityReport(ctx context.Context, h *models.Host, } } return nil +} + +func (m *Manager) UpdateRole(ctx context.Context, h *models.Host, role models.HostRole, db *gorm.DB) error { + hostStatus := swag.StringValue(h.Status) + allowedStatuses := []string{HostStatusDiscovering, HostStatusKnown, HostStatusDisconnected, HostStatusInsufficient, HostStatusPendingForInput} + if !funk.ContainsString(allowedStatuses, hostStatus) { + return common.NewApiError(http.StatusBadRequest, + errors.Errorf("Host is in %s state, host role can be set only in one of %s states", + hostStatus, allowedStatuses)) + } + + h.Role = role + cdb := m.db + if db != nil { + cdb = db + } + return cdb.Model(h).Update("role", role).Error +} + +func (m *Manager) UpdateHostname(ctx context.Context, h *models.Host, hostname string, db *gorm.DB) error { + hostStatus := swag.StringValue(h.Status) + allowedStatuses := []string{HostStatusDiscovering, HostStatusKnown, HostStatusDisconnected, HostStatusInsufficient, + HostStatusPendingForInput} + if !funk.ContainsString(allowedStatuses, hostStatus) { + return common.NewApiError(http.StatusBadRequest, + errors.Errorf("Host is in %s state, host name can be set only in one of %s states", + hostStatus, allowedStatuses)) + } + + h.RequestedHostname = hostname + cdb := m.db + if db != nil { + cdb = db + } + return cdb.Model(h).Update("requested_hostname", hostname).Error +} + +func (m *Manager) CancelInstallation(ctx context.Context, h *models.Host, reason string, db *gorm.DB) *common.ApiErrorResponse { + eventSeverity := models.EventSeverityInfo + eventInfo := fmt.Sprintf("Installation canceled for host %s", common.GetHostnameForMsg(h)) + defer func() { + m.eventsHandler.AddEvent(ctx, h.ID.String(), eventSeverity, eventInfo, time.Now(), h.ClusterID.String()) + }() + + err := m.sm.Run(TransitionTypeCancelInstallation, newStateHost(h), &TransitionArgsCancelInstallation{ + ctx: ctx, + reason: reason, + db: db, + }) + if err != nil { + eventSeverity = models.EventSeverityError + eventInfo = fmt.Sprintf("Failed to cancel installation of host %s: %s", common.GetHostnameForMsg(h), err.Error()) + return common.NewApiError(http.StatusConflict, err) + } + return nil +} +func (m *Manager) IsRequireUserActionReset(h *models.Host) bool { + if swag.StringValue(h.Status) != models.HostStatusResetting { + return false + } + if !funk.Contains(manualRebootStages, h.Progress.CurrentStage) { + return false + } + return true +} + +func (m *Manager) ResetHost(ctx context.Context, h *models.Host, reason string, db *gorm.DB) *common.ApiErrorResponse { + eventSeverity := models.EventSeverityInfo + eventInfo := fmt.Sprintf("Installation reset for host %s", common.GetHostnameForMsg(h)) + defer func() { + m.eventsHandler.AddEvent(ctx, h.ID.String(), eventSeverity, eventInfo, time.Now(), h.ClusterID.String()) + }() + + err := m.sm.Run(TransitionTypeResetHost, newStateHost(h), &TransitionArgsResetHost{ + ctx: ctx, + reason: reason, + db: db, + }) + if err != nil { + eventSeverity = models.EventSeverityError + eventInfo = fmt.Sprintf("Failed to reset installation of host %s. Error: %s", common.GetHostnameForMsg(h), err.Error()) + return common.NewApiError(http.StatusConflict, err) + } + return nil +} + +func (m *Manager) ResetPendingUserAction(ctx context.Context, h *models.Host, db *gorm.DB) error { + err := m.sm.Run(TransitionTypeResettingPendingUserAction, newStateHost(h), &TransitionResettingPendingUserAction{ + ctx: ctx, + db: db, + }) + if err != nil { + return err + } + return nil +} + +func (m *Manager) GetStagesByRole(role models.HostRole, isbootstrap bool) []models.HostStage { + if isbootstrap || role == models.HostRoleBootstrap { + return BootstrapStages[:] + } + + switch role { + case models.HostRoleMaster: + return MasterStages[:] + case models.HostRoleWorker: + return WorkerStages[:] + default: + return []models.HostStage{} + } +} + +func (m *Manager) IsInstallable(h *models.Host) bool { + return swag.StringValue(h.Status) == models.HostStatusKnown +} + +func (m *Manager) PrepareForInstallation(ctx context.Context, h *models.Host, db *gorm.DB) error { + return m.sm.Run(TransitionTypePrepareForInstallation, newStateHost(h), &TransitionArgsPrepareForInstallation{ + ctx: ctx, + db: db, + }) +} + +func (m *Manager) reportInstallationMetrics(ctx context.Context, h *models.Host, previousProgress *models.HostProgressInfo, CurrentStage models.HostStage) { + log := logutil.FromContext(ctx, m.log) + //get openshift version from cluster + var cluster common.Cluster + + err := m.db.First(&cluster, "id = ?", h.ClusterID).Error + if err != nil { + log.WithError(err).Errorf("not reporting installation metrics - failed to find cluster %s", h.ClusterID) + } else { + m.metricApi.ReportHostInstallationMetrics(log, cluster.OpenshiftVersion, h, previousProgress, CurrentStage) + } } diff --git a/internal/host/host_test.go b/internal/host/host_test.go index 3f4dbad29..a0654def3 100644 --- a/internal/host/host_test.go +++ b/internal/host/host_test.go @@ -2,76 +2,432 @@ package host import ( "context" + "encoding/json" "fmt" "io/ioutil" - "testing" + "net/http" + "strconv" + "time" - "github.com/filanov/bm-inventory/internal/hardware" + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/internal/events" + "github.com/filanov/bm-inventory/internal/metrics" "github.com/filanov/bm-inventory/models" + "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "github.com/golang/mock/gomock" "github.com/google/uuid" "github.com/jinzhu/gorm" - _ "github.com/jinzhu/gorm/dialects/sqlite" + _ "github.com/jinzhu/gorm/dialects/postgres" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/sirupsen/logrus" ) -var defaultHwInfo = "default hw info" // invalid hw info used only for tests +var defaultHwInfo = "default hw info" // invalid hw info used only for tests +var defaultInventoryS = "default inventory" // invalid inventory info used only for tests +var defaultProgressStage = models.HostStage("default progress stage") // invalid progress stage used only for tests -var _ = Describe("statemachine", func() { +var _ = Describe("update_role", func() { var ( ctx = context.Background() db *gorm.DB - ctrl *gomock.Controller - mockValidator *hardware.MockValidator state API host models.Host - stateReply *UpdateReply - stateErr error + id, clusterID strfmt.UUID + dbName = "update_role" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + state = NewManager(getTestLog(), db, nil, nil, nil, createValidatorCfg(), nil) + id = strfmt.UUID(uuid.New().String()) + clusterID = strfmt.UUID(uuid.New().String()) + }) + + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) + + Context("update role by src state", func() { + success := func(srcState string) { + host = getTestHost(id, clusterID, srcState) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, nil)).ShouldNot(HaveOccurred()) + h := getHost(id, clusterID, db) + Expect(h.Role).To(Equal(models.HostRoleMaster)) + } + + failure := func(srcState string) { + host = getTestHost(id, clusterID, srcState) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, nil)).To(HaveOccurred()) + h := getHost(id, clusterID, db) + Expect(h.Role).To(Equal(models.HostRoleWorker)) + } + + tests := []struct { + name string + srcState string + testFunc func(srcState string) + }{ + { + name: "discovering", + srcState: HostStatusDiscovering, + testFunc: success, + }, + { + name: "known", + srcState: HostStatusKnown, + testFunc: success, + }, + { + name: "disconnected", + srcState: HostStatusDisconnected, + testFunc: success, + }, + { + name: "insufficient", + srcState: HostStatusInsufficient, + testFunc: success, + }, + { + name: "disabled", + srcState: HostStatusDisabled, + testFunc: failure, + }, + { + name: "error", + srcState: HostStatusError, + testFunc: failure, + }, + { + name: "installing", + srcState: HostStatusInstalling, + testFunc: failure, + }, + { + name: "installed", + srcState: HostStatusInstalled, + testFunc: failure, + }, + { + name: "installing-in-progress", + srcState: HostStatusInstallingInProgress, + testFunc: failure, + }, + } + + for i := range tests { + t := tests[i] + It(t.name, func() { + t.testFunc(t.srcState) + }) + } + }) + + It("update role with transaction", func() { + host = getTestHost(id, clusterID, HostStatusKnown) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + By("rollback transaction", func() { + tx := db.Begin() + Expect(tx.Error).ShouldNot(HaveOccurred()) + Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, tx)).NotTo(HaveOccurred()) + Expect(tx.Rollback().Error).ShouldNot(HaveOccurred()) + h := getHost(id, clusterID, db) + Expect(h.Role).Should(Equal(models.HostRoleWorker)) + }) + By("commit transaction", func() { + tx := db.Begin() + Expect(tx.Error).ShouldNot(HaveOccurred()) + Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, tx)).NotTo(HaveOccurred()) + Expect(tx.Commit().Error).ShouldNot(HaveOccurred()) + h := getHost(id, clusterID, db) + Expect(h.Role).Should(Equal(models.HostRoleMaster)) + }) + }) + + It("update role master to worker", func() { + host = getTestHost(id, clusterID, HostStatusKnown) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, nil)).NotTo(HaveOccurred()) + h := getHost(id, clusterID, db) + Expect(h.Role).To(Equal(models.HostRoleMaster)) + Expect(state.UpdateRole(ctx, &host, models.HostRoleWorker, nil)).NotTo(HaveOccurred()) + h = getHost(id, clusterID, db) + Expect(h.Role).To(Equal(models.HostRoleWorker)) + }) +}) + +var _ = Describe("update_progress", func() { + var ( + ctx = context.Background() + db *gorm.DB + state API + host models.Host + ctrl *gomock.Controller + mockEvents *events.MockHandler + mockMetric *metrics.MockAPI + dbName = "host_update_progress" ) + setDefaultReportHostInstallationMetrics := func(mockMetricApi *metrics.MockAPI) { + mockMetricApi.EXPECT().ReportHostInstallationMetrics(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + } + BeforeEach(func() { - db = prepareDB() + db = common.PrepareTestDB(dbName, &events.Event{}) ctrl = gomock.NewController(GinkgoT()) - mockValidator = hardware.NewMockValidator(ctrl) - state = NewManager(getTestLog(), db, mockValidator, nil) + mockEvents = events.NewMockHandler(ctrl) + mockMetric = metrics.NewMockAPI(ctrl) + state = NewManager(getTestLog(), db, mockEvents, nil, nil, createValidatorCfg(), mockMetric) id := strfmt.UUID(uuid.New().String()) clusterId := strfmt.UUID(uuid.New().String()) - host = getTestHost(id, clusterId, "unknown invalid state") + host = getTestHost(id, clusterId, "") + }) + + AfterEach(func() { + common.DeleteTestDB(db, dbName) }) - Context("unknown_host_state", func() { + Context("installing host", func() { + var ( + progress models.HostProgress + hostFromDB *models.Host + ) + + BeforeEach(func() { + ctrl = gomock.NewController(GinkgoT()) + mockMetric = metrics.NewMockAPI(ctrl) + setDefaultReportHostInstallationMetrics(mockMetric) + host.Status = swag.String(HostStatusInstalling) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + mockMetric.EXPECT().ReportHostInstallationMetrics(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + }) + + Context("positive stages", func() { + It("some_progress", func() { + progress.CurrentStage = defaultProgressStage + mockEvents.EXPECT().AddEvent(gomock.Any(), host.ID.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"installing\" to \"installing-in-progress\" (default progress stage)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + Expect(state.UpdateInstallProgress(ctx, &host, &progress)).ShouldNot(HaveOccurred()) + hostFromDB = getHost(*host.ID, host.ClusterID, db) + Expect(*hostFromDB.Status).Should(Equal(HostStatusInstallingInProgress)) + }) + + It("writing to disk", func() { + progress.CurrentStage = models.HostStageWritingImageToDisk + progress.ProgressInfo = "20%" + mockEvents.EXPECT().AddEvent(gomock.Any(), host.ID.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"installing\" to \"installing-in-progress\" (Writing image to disk)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + Expect(state.UpdateInstallProgress(ctx, &host, &progress)).ShouldNot(HaveOccurred()) + hostFromDB = getHost(*host.ID, host.ClusterID, db) + + Expect(*hostFromDB.Status).Should(Equal(HostStatusInstallingInProgress)) + }) + + It("done", func() { + progress.CurrentStage = models.HostStageDone + mockEvents.EXPECT().AddEvent(gomock.Any(), host.ID.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"installing\" to \"installed\" (Done)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + Expect(state.UpdateInstallProgress(ctx, &host, &progress)).ShouldNot(HaveOccurred()) + hostFromDB = getHost(*host.ID, host.ClusterID, db) + + Expect(*hostFromDB.Status).Should(Equal(HostStatusInstalled)) + }) + + AfterEach(func() { + Expect(*hostFromDB.StatusInfo).Should(Equal(string(progress.CurrentStage))) + Expect(hostFromDB.Progress.CurrentStage).Should(Equal(progress.CurrentStage)) + Expect(hostFromDB.Progress.ProgressInfo).Should(Equal(progress.ProgressInfo)) + }) + }) + + Context("Negative stages", func() { + It("progress_failed", func() { + progress.CurrentStage = models.HostStageFailed + progress.ProgressInfo = "reason" + mockEvents.EXPECT().AddEvent(gomock.Any(), host.ID.String(), models.EventSeverityError, + fmt.Sprintf("Host %s: updated status from \"installing\" to \"error\" (Failed - reason)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + Expect(state.UpdateInstallProgress(ctx, &host, &progress)).ShouldNot(HaveOccurred()) + hostFromDB = getHost(*host.ID, host.ClusterID, db) + + Expect(*hostFromDB.Status).Should(Equal(HostStatusError)) + Expect(*hostFromDB.StatusInfo).Should(Equal(fmt.Sprintf("%s - %s", progress.CurrentStage, progress.ProgressInfo))) + }) + + It("progress_failed_empty_reason", func() { + progress.CurrentStage = models.HostStageFailed + progress.ProgressInfo = "" + mockEvents.EXPECT().AddEvent(gomock.Any(), host.ID.String(), models.EventSeverityError, + fmt.Sprintf("Host %s: updated status from \"installing\" to \"error\" "+ + "(Failed)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + Expect(state.UpdateInstallProgress(ctx, &host, &progress)).ShouldNot(HaveOccurred()) + hostFromDB = getHost(*host.ID, host.ClusterID, db) + Expect(*hostFromDB.Status).Should(Equal(HostStatusError)) + Expect(*hostFromDB.StatusInfo).Should(Equal(string(progress.CurrentStage))) + }) + + It("progress_failed_after_a_stage", func() { + By("Some stage", func() { + progress.CurrentStage = models.HostStageWritingImageToDisk + progress.ProgressInfo = "20%" + mockEvents.EXPECT().AddEvent(gomock.Any(), host.ID.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"installing\" to \"installing-in-progress\" "+ + "(Writing image to disk)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + Expect(state.UpdateInstallProgress(ctx, &host, &progress)).ShouldNot(HaveOccurred()) + hostFromDB = getHost(*host.ID, host.ClusterID, db) + Expect(*hostFromDB.Status).Should(Equal(HostStatusInstallingInProgress)) + Expect(*hostFromDB.StatusInfo).Should(Equal(string(progress.CurrentStage))) + + Expect(hostFromDB.Progress.CurrentStage).Should(Equal(progress.CurrentStage)) + Expect(hostFromDB.Progress.ProgressInfo).Should(Equal(progress.ProgressInfo)) + }) + + By("Failed", func() { + newProgress := models.HostProgress{ + CurrentStage: models.HostStageFailed, + ProgressInfo: "reason", + } + mockEvents.EXPECT().AddEvent(gomock.Any(), host.ID.String(), models.EventSeverityError, + fmt.Sprintf("Host %s: updated status from \"installing-in-progress\" to \"error\" "+ + "(Failed - reason)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + Expect(state.UpdateInstallProgress(ctx, hostFromDB, &newProgress)).ShouldNot(HaveOccurred()) + hostFromDB = getHost(*host.ID, host.ClusterID, db) + Expect(*hostFromDB.Status).Should(Equal(HostStatusError)) + Expect(*hostFromDB.StatusInfo).Should(Equal(fmt.Sprintf("%s - %s", newProgress.CurrentStage, newProgress.ProgressInfo))) + + Expect(hostFromDB.Progress.CurrentStage).Should(Equal(progress.CurrentStage)) + Expect(hostFromDB.Progress.ProgressInfo).Should(Equal(progress.ProgressInfo)) + }) + }) - It("enable_host", func() { - stateReply, stateErr = state.EnableHost(ctx, &host) + It("lower_stage", func() { + verifyDb := func() { + hostFromDB = getHost(*host.ID, host.ClusterID, db) + Expect(*hostFromDB.Status).Should(Equal(HostStatusInstallingInProgress)) + Expect(*hostFromDB.StatusInfo).Should(Equal(string(progress.CurrentStage))) + + Expect(hostFromDB.Progress.CurrentStage).Should(Equal(progress.CurrentStage)) + Expect(hostFromDB.Progress.ProgressInfo).Should(Equal(progress.ProgressInfo)) + } + + By("Some stage", func() { + progress.CurrentStage = models.HostStageWritingImageToDisk + progress.ProgressInfo = "20%" + mockMetric.EXPECT().ReportHostInstallationMetrics(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + mockEvents.EXPECT().AddEvent(gomock.Any(), host.ID.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"installing\" to \"installing-in-progress\" "+ + "(Writing image to disk)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + Expect(state.UpdateInstallProgress(ctx, &host, &progress)).ShouldNot(HaveOccurred()) + verifyDb() + }) + + By("Lower stage", func() { + newProgress := models.HostProgress{ + CurrentStage: models.HostStageInstalling, + } + mockEvents.EXPECT().AddEvent(gomock.Any(), host.ID.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"installing\" to \"installing-in-progress\" "+ + "(Writing image to disk)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + Expect(state.UpdateInstallProgress(ctx, hostFromDB, &newProgress)).Should(HaveOccurred()) + verifyDb() + }) + }) }) + }) + + It("invalid stage", func() { + Expect(state.UpdateInstallProgress(ctx, &host, + &models.HostProgress{CurrentStage: defaultProgressStage})).Should(HaveOccurred()) + }) +}) + +var _ = Describe("monitor_disconnection", func() { + var ( + ctx = context.Background() + db *gorm.DB + state API + host models.Host + ctrl *gomock.Controller + mockEvents *events.MockHandler + dbName = "monitor_disconnection" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + ctrl = gomock.NewController(GinkgoT()) + mockEvents = events.NewMockHandler(ctrl) + state = NewManager(getTestLog(), db, mockEvents, nil, nil, createValidatorCfg(), nil) + clusterID := strfmt.UUID(uuid.New().String()) + host = getTestHost(strfmt.UUID(uuid.New().String()), clusterID, HostStatusDiscovering) + cluster := getTestCluster(clusterID, "1.1.0.0/16") + Expect(db.Save(&cluster).Error).ToNot(HaveOccurred()) + host.Inventory = workerInventory() + err := state.RegisterHost(ctx, &host) + Expect(err).ShouldNot(HaveOccurred()) + db.First(&host, "id = ? and cluster_id = ?", host.ID, host.ClusterID) + }) - It("disable_host", func() { - stateReply, stateErr = state.DisableHost(ctx, &host) + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) + + Context("host_disconnecting", func() { + It("known_host_disconnects", func() { + host.CheckedInAt = strfmt.DateTime(time.Now().Add(-4 * time.Minute)) + host.Status = swag.String(HostStatusKnown) + db.Save(&host) }) - It("update role", func() { - stateReply, stateErr = state.UpdateRole(ctx, &host, "master", nil) + It("discovering_host_disconnects", func() { + host.CheckedInAt = strfmt.DateTime(time.Now().Add(-4 * time.Minute)) + host.Status = swag.String(HostStatusDiscovering) + db.Save(&host) }) - It("install", func() { - stateReply, stateErr = state.Install(ctx, &host, nil) + It("known_host_insufficient", func() { + host.CheckedInAt = strfmt.DateTime(time.Now().Add(-4 * time.Minute)) + host.Status = swag.String(HostStatusInsufficient) + db.Save(&host) }) - It("update_hw_info", func() { - stateReply, stateErr = state.UpdateHwInfo(ctx, &host, "some hw info") + AfterEach(func() { + mockEvents.EXPECT().AddEvent(gomock.Any(), host.ID.String(), models.EventSeverityWarning, + fmt.Sprintf("Host %s: updated status from \"%s\" to \"disconnected\" (Host keepalive timeout)", + host.ID.String(), *host.Status), + gomock.Any(), host.ClusterID.String()) + state.HostMonitoring() + db.First(&host, "id = ? and cluster_id = ?", host.ID, host.ClusterID) + Expect(*host.Status).Should(Equal(HostStatusDisconnected)) }) + }) - It("update_hw_info", func() { - stateReply, stateErr = state.RefreshStatus(ctx, &host) + Context("host_reconnecting", func() { + It("host_connects", func() { + host.CheckedInAt = strfmt.DateTime(time.Now()) + host.Inventory = "" + host.Status = swag.String(HostStatusDisconnected) + db.Save(&host) }) AfterEach(func() { - Expect(stateReply).To(BeNil()) - Expect(stateErr).Should(HaveOccurred()) + mockEvents.EXPECT().AddEvent(gomock.Any(), host.ID.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"disconnected\" to \"discovering\" (Waiting for host hardware info)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + state.HostMonitoring() + db.First(&host, "id = ? and cluster_id = ?", host.ID, host.ClusterID) + Expect(*host.Status).Should(Equal(HostStatusDiscovering)) }) }) @@ -81,58 +437,138 @@ var _ = Describe("statemachine", func() { }) }) -var _ = Describe("update_progress", func() { +var _ = Describe("cancel_installation", func() { var ( - ctx = context.Background() - db *gorm.DB - state API - host models.Host + ctx = context.Background() + db *gorm.DB + state API + h models.Host + eventsHandler events.Handler + dbName = "cancel_installation" ) BeforeEach(func() { - db = prepareDB() - state = NewManager(getTestLog(), db, nil, nil) + db = common.PrepareTestDB(dbName, &events.Event{}) + eventsHandler = events.New(db, logrus.New()) + state = NewManager(getTestLog(), db, eventsHandler, nil, nil, nil, nil) id := strfmt.UUID(uuid.New().String()) clusterId := strfmt.UUID(uuid.New().String()) - host = getTestHost(id, clusterId, "") + h = getTestHost(id, clusterId, HostStatusDiscovering) }) - Context("installaing host", func() { - BeforeEach(func() { - host.Status = swag.String(HostStatusInstalling) - Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) - }) - It("some_progress", func() { - Expect(state.UpdateInstallProgress(ctx, &host, "some progress")).ShouldNot(HaveOccurred()) - h := getHost(*host.ID, host.ClusterID, db) - Expect(*h.Status).Should(Equal(HostStatusInstallingInProgress)) - Expect(*h.StatusInfo).Should(Equal("some progress")) + + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) + + Context("cancel_installation", func() { + It("cancel_installation", func() { + h.Status = swag.String(HostStatusInstalling) + Expect(db.Create(&h).Error).ShouldNot(HaveOccurred()) + Expect(state.CancelInstallation(ctx, &h, "some reason", db)).ShouldNot(HaveOccurred()) + events, err := eventsHandler.GetEvents(h.ID.String()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(events)).ShouldNot(Equal(0)) + cancelEvent := events[len(events)-1] + Expect(*cancelEvent.Severity).Should(Equal(models.EventSeverityInfo)) + eventMessage := fmt.Sprintf("Installation canceled for host %s", common.GetHostnameForMsg(&h)) + Expect(*cancelEvent.Message).Should(Equal(eventMessage)) }) - It("done", func() { - Expect(state.UpdateInstallProgress(ctx, &host, progressDone)).ShouldNot(HaveOccurred()) - h := getHost(*host.ID, host.ClusterID, db) - Expect(*h.Status).Should(Equal(HostStatusInstalled)) - Expect(*h.StatusInfo).Should(Equal(HostStatusInstalled)) + It("cancel_failed_installation", func() { + h.Status = swag.String(HostStatusError) + Expect(db.Create(&h).Error).ShouldNot(HaveOccurred()) + Expect(state.CancelInstallation(ctx, &h, "some reason", db)).ShouldNot(HaveOccurred()) + events, err := eventsHandler.GetEvents(h.ID.String()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(events)).ShouldNot(Equal(0)) + cancelEvent := events[len(events)-1] + Expect(*cancelEvent.Severity).Should(Equal(models.EventSeverityInfo)) + eventMessage := fmt.Sprintf("Installation canceled for host %s", common.GetHostnameForMsg(&h)) + Expect(*cancelEvent.Message).Should(Equal(eventMessage)) }) - It("progress_failed", func() { - failedProgress := fmt.Sprintf("%s because of something", progressFailed) - Expect(state.UpdateInstallProgress(ctx, &host, failedProgress)).ShouldNot(HaveOccurred()) - h := getHost(*host.ID, host.ClusterID, db) + AfterEach(func() { + db.First(&h, "id = ? and cluster_id = ?", h.ID, h.ClusterID) Expect(*h.Status).Should(Equal(HostStatusError)) - Expect(*h.StatusInfo).Should(Equal(failedProgress)) }) }) - It("invalid state", func() { - Expect(state.UpdateInstallProgress(ctx, &host, "don't care")).Should(HaveOccurred()) + Context("invalid_cancel_installation", func() { + It("nothing_to_cancel", func() { + Expect(state.CancelInstallation(ctx, &h, "some reason", db)).Should(HaveOccurred()) + events, err := eventsHandler.GetEvents(h.ID.String()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(events)).ShouldNot(Equal(0)) + cancelEvent := events[len(events)-1] + Expect(*cancelEvent.Severity).Should(Equal(models.EventSeverityError)) + }) }) }) -func TestSubsystem(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "host state machine tests") -} +var _ = Describe("reset_host", func() { + var ( + ctx = context.Background() + db *gorm.DB + state API + h models.Host + eventsHandler events.Handler + dbName = "reset_host" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + eventsHandler = events.New(db, logrus.New()) + state = NewManager(getTestLog(), db, eventsHandler, nil, nil, nil, nil) + }) + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) + + Context("reset_installation", func() { + It("reset_installation", func() { + id := strfmt.UUID(uuid.New().String()) + clusterId := strfmt.UUID(uuid.New().String()) + h = getTestHost(id, clusterId, HostStatusError) + Expect(db.Create(&h).Error).ShouldNot(HaveOccurred()) + Expect(state.ResetHost(ctx, &h, "some reason", db)).ShouldNot(HaveOccurred()) + db.First(&h, "id = ? and cluster_id = ?", h.ID, h.ClusterID) + Expect(*h.Status).Should(Equal(HostStatusResetting)) + events, err := eventsHandler.GetEvents(h.ID.String()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(events)).ShouldNot(Equal(0)) + resetEvent := events[len(events)-1] + Expect(*resetEvent.Severity).Should(Equal(models.EventSeverityInfo)) + eventMessage := fmt.Sprintf("Installation reset for host %s", common.GetHostnameForMsg(&h)) + Expect(*resetEvent.Message).Should(Equal(eventMessage)) + }) + + It("register resetting host", func() { + id := strfmt.UUID(uuid.New().String()) + clusterId := strfmt.UUID(uuid.New().String()) + h = getTestHost(id, clusterId, HostStatusResetting) + Expect(db.Create(&h).Error).ShouldNot(HaveOccurred()) + Expect(state.RegisterHost(ctx, &h)).ShouldNot(HaveOccurred()) + db.First(&h, "id = ? and cluster_id = ?", h.ID, h.ClusterID) + Expect(*h.Status).Should(Equal(HostStatusDiscovering)) + }) + }) + + Context("invalid_reset_installation", func() { + It("nothing_to_reset", func() { + id := strfmt.UUID(uuid.New().String()) + clusterId := strfmt.UUID(uuid.New().String()) + h = getTestHost(id, clusterId, HostStatusDiscovering) + reply := state.ResetHost(ctx, &h, "some reason", db) + Expect(int(reply.StatusCode())).Should(Equal(http.StatusConflict)) + events, err := eventsHandler.GetEvents(h.ID.String()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(events)).ShouldNot(Equal(0)) + resetEvent := events[len(events)-1] + Expect(*resetEvent.Severity).Should(Equal(models.EventSeverityError)) + }) + }) + +}) func getHost(hostId, clusterId strfmt.UUID, db *gorm.DB) *models.Host { var host models.Host @@ -140,57 +576,472 @@ func getHost(hostId, clusterId strfmt.UUID, db *gorm.DB) *models.Host { return &host } -func prepareDB() *gorm.DB { - db, err := gorm.Open("sqlite3", ":memory:") - Expect(err).ShouldNot(HaveOccurred()) - //db = db.Debug() - db.AutoMigrate(&models.Host{}, &models.Cluster{}) - return db +func getTestLog() logrus.FieldLogger { + l := logrus.New() + l.SetOutput(ioutil.Discard) + return l } -type expect struct { - expectError bool - expectedState string - postCheck func() +func getTestHost(hostID, clusterID strfmt.UUID, state string) models.Host { + return models.Host{ + ID: &hostID, + ClusterID: clusterID, + Status: swag.String(state), + Inventory: defaultInventory(), + Role: models.HostRoleWorker, + CheckedInAt: strfmt.DateTime(time.Now()), + } } -func postValidation(expectedReply *expect, firstState string, db *gorm.DB, id, clusterId strfmt.UUID, - updateReply *UpdateReply, updateErr error) { - if expectedReply != nil { - h := getHost(id, clusterId, db) - if expectedReply.expectError { - Expect(updateReply).To(BeNil()) - Expect(updateErr).Should(HaveOccurred()) - Expect(swag.StringValue(h.Status)).Should(Equal(firstState)) - } else { - Expect(updateErr).ShouldNot(HaveOccurred()) - Expect(updateReply).NotTo(BeNil()) - Expect(updateReply.State).Should(Equal(expectedReply.expectedState)) - if updateReply.State == firstState { - Expect(updateReply.IsChanged).Should(BeFalse()) - } else { - Expect(updateReply.IsChanged).Should(BeTrue()) - } - Expect(swag.StringValue(h.Status)).Should(Equal(expectedReply.expectedState)) - } +func getTestCluster(clusterID strfmt.UUID, machineNetworkCidr string) common.Cluster { + return common.Cluster{ + Cluster: models.Cluster{ + ID: &clusterID, + MachineNetworkCidr: machineNetworkCidr, + }, + } +} - if expectedReply.postCheck != nil { - expectedReply.postCheck() - } +func defaultInventory() string { + inventory := models.Inventory{ + Interfaces: []*models.Interface{ + { + Name: "eth0", + IPV4Addresses: []string{ + "1.2.3.4/24", + }, + }, + }, } + b, err := json.Marshal(&inventory) + Expect(err).To(Not(HaveOccurred())) + return string(b) } -func getTestLog() logrus.FieldLogger { - l := logrus.New() - l.SetOutput(ioutil.Discard) - return l +func insufficientHWInventory() string { + inventory := models.Inventory{ + CPU: &models.CPU{Count: 2}, + Disks: []*models.Disk{ + { + SizeBytes: 130, + DriveType: "HDD", + }, + }, + Interfaces: []*models.Interface{ + { + Name: "eth0", + IPV4Addresses: []string{ + "1.2.3.4/24", + }, + }, + }, + Memory: &models.Memory{PhysicalBytes: 130}, + } + b, err := json.Marshal(&inventory) + Expect(err).To(Not(HaveOccurred())) + return string(b) } -func getTestHost(hostID, clusterID strfmt.UUID, state string) models.Host { - return models.Host{ - ID: &hostID, - ClusterID: clusterID, - Status: swag.String(state), - HardwareInfo: defaultHwInfo, +func workerInventory() string { + inventory := models.Inventory{ + CPU: &models.CPU{Count: 2}, + Disks: []*models.Disk{ + { + SizeBytes: 128849018880, + DriveType: "HDD", + }, + }, + Interfaces: []*models.Interface{ + { + Name: "eth0", + IPV4Addresses: []string{ + "1.2.3.4/24", + }, + }, + }, + Memory: &models.Memory{PhysicalBytes: gibToBytes(8)}, + } + b, err := json.Marshal(&inventory) + Expect(err).To(Not(HaveOccurred())) + return string(b) +} + +func masterInventory() string { + return masterInventoryWithHostname("master-hostname") +} + +func masterInventoryWithHostname(hostname string) string { + inventory := models.Inventory{ + CPU: &models.CPU{Count: 8}, + Disks: []*models.Disk{ + { + SizeBytes: 128849018880, + DriveType: "HDD", + }, + }, + Interfaces: []*models.Interface{ + { + Name: "eth0", + IPV4Addresses: []string{ + "1.2.3.4/24", + }, + }, + }, + Memory: &models.Memory{PhysicalBytes: gibToBytes(16)}, + Hostname: hostname, } + b, err := json.Marshal(&inventory) + Expect(err).To(Not(HaveOccurred())) + return string(b) } + +var _ = Describe("UpdateInventory", func() { + var ( + ctx = context.Background() + hapi API + db *gorm.DB + hostId, clusterId strfmt.UUID + host models.Host + dbName = "update_inventory" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + hapi = NewManager(getTestLog(), db, nil, nil, nil, createValidatorCfg(), nil) + hostId = strfmt.UUID(uuid.New().String()) + clusterId = strfmt.UUID(uuid.New().String()) + }) + + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) + + Context("enable host", func() { + newInventory := "new inventory stuff" + success := func(reply error) { + Expect(reply).To(BeNil()) + h := getHost(hostId, clusterId, db) + Expect(h.Inventory).To(Equal(newInventory)) + } + + failure := func(reply error) { + Expect(reply).To(HaveOccurred()) + h := getHost(hostId, clusterId, db) + Expect(h.Inventory).To(Equal(defaultInventoryS)) + } + + tests := []struct { + name string + srcState string + validation func(error) + }{ + { + name: models.HostStatusKnown, + srcState: models.HostStatusKnown, + validation: success, + }, + { + name: models.HostStatusDisabled, + srcState: models.HostStatusDisabled, + validation: failure, + }, + { + name: models.HostStatusDisconnected, + srcState: models.HostStatusDisconnected, + validation: success, + }, + { + name: models.HostStatusDiscovering, + srcState: models.HostStatusDiscovering, + validation: success, + }, + { + name: models.HostStatusError, + srcState: models.HostStatusError, + validation: failure, + }, + { + name: models.HostStatusInstalled, + srcState: models.HostStatusInstalled, + validation: failure, + }, + { + name: models.HostStatusInstalling, + srcState: models.HostStatusInstalling, + validation: failure, + }, + { + name: models.HostStatusInstallingInProgress, + srcState: models.HostStatusInstallingInProgress, + validation: failure, + }, + { + name: models.HostStatusResettingPendingUserAction, + srcState: models.HostStatusResettingPendingUserAction, + validation: failure, + }, + { + name: models.HostStatusInsufficient, + srcState: models.HostStatusInsufficient, + validation: success, + }, + { + name: models.HostStatusResetting, + srcState: models.HostStatusResetting, + validation: failure, + }, + { + name: models.HostStatusPendingForInput, + srcState: models.HostStatusPendingForInput, + validation: success, + }, + } + + for i := range tests { + t := tests[i] + It(t.name, func() { + host = getTestHost(hostId, clusterId, t.srcState) + host.Inventory = defaultInventoryS + + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + t.validation(hapi.UpdateInventory(ctx, &host, newInventory)) + }) + } + }) +}) + +var _ = Describe("Update hostname", func() { + var ( + ctx = context.Background() + hapi API + db *gorm.DB + hostId, clusterId strfmt.UUID + host models.Host + dbName = "update_inventory" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + hapi = NewManager(getTestLog(), db, nil, nil, nil, createValidatorCfg(), nil) + hostId = strfmt.UUID(uuid.New().String()) + clusterId = strfmt.UUID(uuid.New().String()) + }) + + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) + + Context("set hostname", func() { + success := func(reply error) { + Expect(reply).To(BeNil()) + h := getHost(hostId, clusterId, db) + Expect(h.RequestedHostname).To(Equal("my-hostname")) + } + + failure := func(reply error) { + Expect(reply).To(HaveOccurred()) + h := getHost(hostId, clusterId, db) + Expect(h.RequestedHostname).To(Equal("")) + } + + tests := []struct { + name string + srcState string + validation func(error) + }{ + { + name: models.HostStatusKnown, + srcState: models.HostStatusKnown, + validation: success, + }, + { + name: models.HostStatusDisabled, + srcState: models.HostStatusDisabled, + validation: failure, + }, + { + name: models.HostStatusDisconnected, + srcState: models.HostStatusDisconnected, + validation: success, + }, + { + name: models.HostStatusDiscovering, + srcState: models.HostStatusDiscovering, + validation: success, + }, + { + name: models.HostStatusError, + srcState: models.HostStatusError, + validation: failure, + }, + { + name: models.HostStatusInstalled, + srcState: models.HostStatusInstalled, + validation: failure, + }, + { + name: models.HostStatusInstalling, + srcState: models.HostStatusInstalling, + validation: failure, + }, + { + name: models.HostStatusInstallingInProgress, + srcState: models.HostStatusInstallingInProgress, + validation: failure, + }, + { + name: models.HostStatusResettingPendingUserAction, + srcState: models.HostStatusResettingPendingUserAction, + validation: failure, + }, + { + name: models.HostStatusInsufficient, + srcState: models.HostStatusInsufficient, + validation: success, + }, + { + name: models.HostStatusResetting, + srcState: models.HostStatusResetting, + validation: failure, + }, + { + name: models.HostStatusPendingForInput, + srcState: models.HostStatusPendingForInput, + validation: success, + }, + } + + for i := range tests { + t := tests[i] + It(t.name, func() { + host = getTestHost(hostId, clusterId, t.srcState) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + t.validation(hapi.UpdateHostname(ctx, &host, "my-hostname", db)) + }) + } + }) +}) + +var _ = Describe("SetBootstrap", func() { + var ( + ctx = context.Background() + hapi API + db *gorm.DB + ctrl *gomock.Controller + mockEvents *events.MockHandler + hostId, clusterId strfmt.UUID + host models.Host + dbName = "SetBootstrap" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + ctrl = gomock.NewController(GinkgoT()) + mockEvents = events.NewMockHandler(ctrl) + hapi = NewManager(getTestLog(), db, mockEvents, nil, nil, createValidatorCfg(), nil) + hostId = strfmt.UUID(uuid.New().String()) + clusterId = strfmt.UUID(uuid.New().String()) + + host = getTestHost(hostId, clusterId, HostStatusResetting) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + + h := getHost(*host.ID, host.ClusterID, db) + Expect(h.Bootstrap).Should(Equal(false)) + }) + + tests := []struct { + IsBootstrap bool + }{ + { + IsBootstrap: true, + }, + { + IsBootstrap: false, + }, + } + + for i := range tests { + t := tests[i] + It(fmt.Sprintf("Boostrap %s", strconv.FormatBool(t.IsBootstrap)), func() { + Expect(hapi.SetBootstrap(ctx, &host, t.IsBootstrap, db)).ShouldNot(HaveOccurred()) + + h := getHost(*host.ID, host.ClusterID, db) + Expect(h.Bootstrap).Should(Equal(t.IsBootstrap)) + }) + } + + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) +}) + +var _ = Describe("PrepareForInstallation", func() { + var ( + ctx = context.Background() + hapi API + db *gorm.DB + ctrl *gomock.Controller + mockEvents *events.MockHandler + hostId, clusterId strfmt.UUID + host models.Host + dbName = "prepare_for_installation" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + ctrl = gomock.NewController(GinkgoT()) + mockEvents = events.NewMockHandler(ctrl) + hapi = NewManager(getTestLog(), db, mockEvents, nil, nil, createValidatorCfg(), nil) + hostId = strfmt.UUID(uuid.New().String()) + clusterId = strfmt.UUID(uuid.New().String()) + }) + + It("success", func() { + host = getTestHost(hostId, clusterId, models.HostStatusKnown) + mockEvents.EXPECT().AddEvent(gomock.Any(), hostId.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"known\" to \"preparing-for-installation\" (Preparing host for installation)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + Expect(hapi.PrepareForInstallation(ctx, &host, db)).NotTo(HaveOccurred()) + h := getHost(hostId, clusterId, db) + Expect(swag.StringValue(h.Status)).To(Equal(models.HostStatusPreparingForInstallation)) + Expect(swag.StringValue(h.StatusInfo)).To(Equal(statusInfoPreparingForInstallation)) + }) + + It("failure - no role set", func() { + host = getTestHost(hostId, clusterId, models.HostStatusKnown) + host.Role = "" + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + Expect(hapi.PrepareForInstallation(ctx, &host, db)).To(HaveOccurred()) + h := getHost(hostId, clusterId, db) + Expect(swag.StringValue(h.Status)).To(Equal(models.HostStatusKnown)) + }) + + Context("forbidden", func() { + + forbiddenStates := []string{ + models.HostStatusDisabled, + models.HostStatusDisconnected, + models.HostStatusError, + models.HostStatusInstalling, + models.HostStatusInstallingInProgress, + models.HostStatusDiscovering, + models.HostStatusPreparingForInstallation, + models.HostStatusResetting, + } + + for _, state := range forbiddenStates { + It(fmt.Sprintf("forbidden state %s", state), func() { + host = getTestHost(hostId, clusterId, state) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + Expect(hapi.PrepareForInstallation(ctx, &host, db)).To(HaveOccurred()) + h := getHost(hostId, clusterId, db) + Expect(swag.StringValue(h.Status)).To(Equal(state)) + }) + } + }) + + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) +}) diff --git a/internal/host/hosts_suite_test.go b/internal/host/hosts_suite_test.go new file mode 100644 index 000000000..0082e9108 --- /dev/null +++ b/internal/host/hosts_suite_test.go @@ -0,0 +1,17 @@ +package host_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/filanov/bm-inventory/internal/common" +) + +func TestHost(t *testing.T) { + RegisterFailHandler(Fail) + common.InitializeDBTest() + defer common.TerminateDBTest() + RunSpecs(t, "host state machine tests") +} diff --git a/internal/host/hwinfocmd.go b/internal/host/hwinfocmd.go deleted file mode 100644 index 04d4cddff..000000000 --- a/internal/host/hwinfocmd.go +++ /dev/null @@ -1,26 +0,0 @@ -package host - -import ( - "context" - "strings" - - "github.com/sirupsen/logrus" - - "github.com/filanov/bm-inventory/models" -) - -type hwInfoCmd baseCmd - -func NewHwInfoCmd(log logrus.FieldLogger) *hwInfoCmd { - return &hwInfoCmd{ - log: log, - } -} - -func (h *hwInfoCmd) GetStep(ctx context.Context, host *models.Host) (*models.Step, error) { - step := &models.Step{} - step.StepType = models.StepTypeHardwareInfo - step.Command = "podman" - step.Args = strings.Split("run,--rm,--privileged,--quiet,--net=host,-v,/var/log:/var/log,quay.io/oamizur/hardware_info,/usr/bin/hardware_info", ",") - return step, nil -} diff --git a/internal/host/installcmd.go b/internal/host/installcmd.go index 6c083b1c8..219318836 100644 --- a/internal/host/installcmd.go +++ b/internal/host/installcmd.go @@ -5,11 +5,13 @@ import ( "context" "fmt" "html/template" + "strings" "github.com/sirupsen/logrus" "github.com/jinzhu/gorm" + "github.com/filanov/bm-inventory/internal/common" "github.com/filanov/bm-inventory/internal/hardware" "github.com/filanov/bm-inventory/models" ) @@ -32,11 +34,11 @@ func NewInstallCmd(log logrus.FieldLogger, db *gorm.DB, hwValidator hardware.Val func (i *installCmd) GetStep(ctx context.Context, host *models.Host) (*models.Step, error) { step := &models.Step{} - step.StepType = models.StepTypeExecute + step.StepType = models.StepTypeInstall step.Command = "bash" //get openshift version - var cluster models.Cluster + var cluster common.Cluster if err := i.db.First(&cluster, "id = ?", host.ClusterID).Error; err != nil { i.log.Errorf("failed to get cluster %s", host.ClusterID) return nil, err @@ -44,38 +46,54 @@ func (i *installCmd) GetStep(ctx context.Context, host *models.Host) (*models.St var role = host.Role if host.Bootstrap { - role = RoleBootstrap + role = models.HostRoleBootstrap } - const cmdArgsTmpl = "sudo podman run -v /dev:/dev:rw -v /opt:/opt:rw --privileged --pid=host --net=host " + - "--name assisted-installer {{.INSTALLER}} --role {{.ROLE}} --cluster-id {{.CLUSTER_ID}} --host {{.HOST}} " + - "--port {{.PORT}} --boot-device {{.BOOT_DEVICE}} --host-id {{.HOST_ID}} --openshift-version {{.OPENSHIFT_VERSION}}" - t, err := template.New("cmd").Parse(cmdArgsTmpl) - if err != nil { - return nil, err - } + cmdArgsTmpl := "podman run -v /dev:/dev:rw -v /opt:/opt:rw -v /run/systemd/journal/socket:/run/systemd/journal/socket --privileged --pid=host --net=host " + + "-v /var/log:/var/log:rw --env PULL_SECRET_TOKEN --name assisted-installer {{.INSTALLER}} --role {{.ROLE}} --cluster-id {{.CLUSTER_ID}} --host {{.HOST}} " + + "--port {{.PORT}} --boot-device {{.BOOT_DEVICE}} --host-id {{.HOST_ID}} --openshift-version {{.OPENSHIFT_VERSION}} " + + "--controller-image {{.CONTROLLER_IMAGE}}" data := map[string]string{ - "HOST": i.instructionConfig.InventoryURL, - "PORT": i.instructionConfig.InventoryPort, + "HOST": strings.TrimSpace(i.instructionConfig.InventoryURL), + "PORT": strings.TrimSpace(i.instructionConfig.InventoryPort), "CLUSTER_ID": string(host.ClusterID), "HOST_ID": string(*host.ID), - "ROLE": role, + "ROLE": string(role), "INSTALLER": i.instructionConfig.InstallerImage, + "CONTROLLER_IMAGE": i.instructionConfig.ControllerImage, "BOOT_DEVICE": "", "OPENSHIFT_VERSION": cluster.OpenshiftVersion, } + + hostname, _ := common.GetCurrentHostName(host) + if hostname != "" { + cmdArgsTmpl = cmdArgsTmpl + " --host-name {{.HOST_NAME}}" + data["HOST_NAME"] = hostname + } + bootdevice, err := getBootDevice(i.log, i.hwValidator, *host) if err != nil { return nil, err } data["BOOT_DEVICE"] = bootdevice + + t, err := template.New("cmd").Parse(cmdArgsTmpl) + if err != nil { + return nil, err + } + buf := &bytes.Buffer{} if err := t.Execute(buf, data); err != nil { return nil, err } step.Args = []string{"-c", buf.String()} + if err := i.db.Model(&models.Host{}).Where("id = ?", host.ID.String()). + Update("installer_version", i.instructionConfig.InstallerImage).Error; err != nil { + return nil, err + } + return step, nil } diff --git a/internal/host/installcmd_test.go b/internal/host/installcmd_test.go index 27137ad91..bfc19e701 100644 --- a/internal/host/installcmd_test.go +++ b/internal/host/installcmd_test.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + "github.com/filanov/bm-inventory/internal/common" "github.com/filanov/bm-inventory/internal/hardware" "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" @@ -17,11 +18,18 @@ import ( "github.com/pkg/errors" ) +var defaultInstructionConfig = InstructionConfig{ + InventoryURL: "10.35.59.36", + InventoryPort: "30485", + InstallerImage: "quay.io/ocpmetal/assisted-installer:latest", + ControllerImage: "quay.io/ocpmetal/assisted-installer-controller:latest", +} + var _ = Describe("installcmd", func() { var ( ctx = context.Background() host models.Host - cluster models.Cluster + cluster common.Cluster db *gorm.DB installCmd *installCmd clusterId strfmt.UUID @@ -31,20 +39,17 @@ var _ = Describe("installcmd", func() { mockValidator *hardware.MockValidator instructionConfig InstructionConfig disks []*models.Disk + dbName = "install_cmd" ) BeforeEach(func() { - db = prepareDB() + db = common.PrepareTestDB(dbName) ctrl = gomock.NewController(GinkgoT()) mockValidator = hardware.NewMockValidator(ctrl) - instructionConfig = InstructionConfig{ - InventoryURL: "10.35.59.36", - InventoryPort: "30485", - InstallerImage: "quay.io/ocpmetal/assisted-installer:stable", - } + instructionConfig = defaultInstructionConfig installCmd = NewInstallCmd(getTestLog(), db, mockValidator, instructionConfig) cluster = createClusterInDb(db) clusterId = *cluster.ID - host = createHostInDb(db, clusterId, RoleMaster, false) + host = createHostInDb(db, clusterId, models.HostRoleMaster, false, "") validDiskSize := int64(128849018880) disks = []*models.Disk{ {DriveType: "HDD", Name: "sdb", SizeBytes: validDiskSize}, @@ -70,61 +75,64 @@ var _ = Describe("installcmd", func() { It("get_step_one_master_success", func() { mockValidator.EXPECT().GetHostValidDisks(gomock.Any()).Return(disks, nil).Times(1) stepReply, stepErr = installCmd.GetStep(ctx, &host) - postvalidation(false, false, stepReply, stepErr, RoleMaster) - validateInstallCommand(stepReply, RoleMaster, string(clusterId), string(*host.ID)) + postvalidation(false, false, stepReply, stepErr, models.HostRoleMaster) + validateInstallCommand(stepReply, models.HostRoleMaster, string(clusterId), string(*host.ID), "") + Expect(getHost(*host.ID, clusterId, db).InstallerVersion). + To(Equal(defaultInstructionConfig.InstallerImage)) }) It("get_step_three_master_success", func() { - host2 := createHostInDb(db, clusterId, RoleMaster, false) - host3 := createHostInDb(db, clusterId, RoleMaster, true) + host2 := createHostInDb(db, clusterId, models.HostRoleMaster, false, "") + host3 := createHostInDb(db, clusterId, models.HostRoleMaster, true, "some_hostname") mockValidator.EXPECT().GetHostValidDisks(gomock.Any()).Return(disks, nil).Times(3) stepReply, stepErr = installCmd.GetStep(ctx, &host) - postvalidation(false, false, stepReply, stepErr, RoleMaster) - validateInstallCommand(stepReply, RoleMaster, string(clusterId), string(*host.ID)) + postvalidation(false, false, stepReply, stepErr, models.HostRoleMaster) + validateInstallCommand(stepReply, models.HostRoleMaster, string(clusterId), string(*host.ID), "") stepReply, stepErr = installCmd.GetStep(ctx, &host2) - postvalidation(false, false, stepReply, stepErr, RoleMaster) - validateInstallCommand(stepReply, RoleMaster, string(clusterId), string(*host2.ID)) + postvalidation(false, false, stepReply, stepErr, models.HostRoleMaster) + validateInstallCommand(stepReply, models.HostRoleMaster, string(clusterId), string(*host2.ID), "") stepReply, stepErr = installCmd.GetStep(ctx, &host3) - postvalidation(false, false, stepReply, stepErr, RoleBootstrap) - validateInstallCommand(stepReply, RoleBootstrap, string(clusterId), string(*host3.ID)) + postvalidation(false, false, stepReply, stepErr, models.HostRoleBootstrap) + validateInstallCommand(stepReply, models.HostRoleBootstrap, string(clusterId), string(*host3.ID), "some_hostname") }) AfterEach(func() { // cleanup - db.Close() + common.DeleteTestDB(db, dbName) ctrl.Finish() stepReply = nil stepErr = nil }) }) -func createClusterInDb(db *gorm.DB) models.Cluster { +func createClusterInDb(db *gorm.DB) common.Cluster { clusterId := strfmt.UUID(uuid.New().String()) - cluster := models.Cluster{ + cluster := common.Cluster{Cluster: models.Cluster{ ID: &clusterId, OpenshiftVersion: "4.5", - } + }} Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred()) return cluster } -func createHostInDb(db *gorm.DB, clusterId strfmt.UUID, role string, bootstrap bool) models.Host { +func createHostInDb(db *gorm.DB, clusterId strfmt.UUID, role models.HostRole, bootstrap bool, hostname string) models.Host { id := strfmt.UUID(uuid.New().String()) host := models.Host{ - ID: &id, - ClusterID: clusterId, - Status: swag.String(HostStatusDiscovering), - Role: role, - Bootstrap: bootstrap, - HardwareInfo: defaultHwInfo, + ID: &id, + ClusterID: clusterId, + Status: swag.String(HostStatusDiscovering), + Role: role, + Bootstrap: bootstrap, + Inventory: defaultInventory(), + RequestedHostname: hostname, } Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) return host } -func postvalidation(isstepreplynil bool, issteperrnil bool, expectedstepreply *models.Step, expectedsteperr error, expectedrole string) { +func postvalidation(isstepreplynil bool, issteperrnil bool, expectedstepreply *models.Step, expectedsteperr error, expectedrole models.HostRole) { if issteperrnil { ExpectWithOffset(1, expectedsteperr).Should(HaveOccurred()) } else { @@ -133,15 +141,34 @@ func postvalidation(isstepreplynil bool, issteperrnil bool, expectedstepreply *m if isstepreplynil { ExpectWithOffset(1, expectedstepreply).Should(BeNil()) } else { - ExpectWithOffset(1, expectedstepreply.StepType).To(Equal(models.StepTypeExecute)) - ExpectWithOffset(1, strings.Contains(expectedstepreply.Args[1], expectedrole)).To(Equal(true)) + ExpectWithOffset(1, expectedstepreply.StepType).To(Equal(models.StepTypeInstall)) + ExpectWithOffset(1, strings.Contains(expectedstepreply.Args[1], string(expectedrole))).To(Equal(true)) } } -func validateInstallCommand(reply *models.Step, role string, clusterId string, hostId string) { - installCommand := "sudo podman run -v /dev:/dev:rw -v /opt:/opt:rw --privileged --pid=host --net=host " + - "--name assisted-installer quay.io/ocpmetal/assisted-installer:stable --role %s " + - "--cluster-id %s --host 10.35.59.36 --port 30485 " + - "--boot-device /dev/sdb --host-id %s --openshift-version 4.5" - ExpectWithOffset(1, reply.Args[1]).Should(Equal(fmt.Sprintf(installCommand, role, clusterId, hostId))) +func validateInstallCommand(reply *models.Step, role models.HostRole, clusterId string, hostId string, hostname string) { + if hostname != "" { + installCommand := "podman run -v /dev:/dev:rw -v /opt:/opt:rw -v /run/systemd/journal/socket:/run/systemd/journal/socket " + + "--privileged --pid=host " + + "--net=host -v /var/log:/var/log:rw --env PULL_SECRET_TOKEN " + + "--name assisted-installer quay.io/ocpmetal/assisted-installer:latest --role %s " + + "--cluster-id %s --host %s --port %s " + + "--boot-device /dev/sdb --host-id %s --openshift-version 4.5 " + + "--controller-image %s --host-name %s" + ExpectWithOffset(1, reply.Args[1]).Should(Equal(fmt.Sprintf(installCommand, role, clusterId, + defaultInstructionConfig.InventoryURL, defaultInstructionConfig.InventoryPort, hostId, + defaultInstructionConfig.ControllerImage, hostname))) + } else { + installCommand := "podman run -v /dev:/dev:rw -v /opt:/opt:rw -v /run/systemd/journal/socket:/run/systemd/journal/socket " + + "--privileged --pid=host " + + "--net=host -v /var/log:/var/log:rw --env PULL_SECRET_TOKEN " + + "--name assisted-installer quay.io/ocpmetal/assisted-installer:latest --role %s " + + "--cluster-id %s --host %s --port %s " + + "--boot-device /dev/sdb --host-id %s --openshift-version 4.5 " + + "--controller-image %s" + ExpectWithOffset(1, reply.Args[1]).Should(Equal(fmt.Sprintf(installCommand, role, clusterId, + defaultInstructionConfig.InventoryURL, defaultInstructionConfig.InventoryPort, hostId, + defaultInstructionConfig.ControllerImage))) + } + ExpectWithOffset(1, reply.StepType).To(Equal(models.StepTypeInstall)) } diff --git a/internal/host/installed.go b/internal/host/installed.go deleted file mode 100644 index 119e8b716..000000000 --- a/internal/host/installed.go +++ /dev/null @@ -1,61 +0,0 @@ -package host - -import ( - "context" - - "github.com/filanov/bm-inventory/models" - "github.com/go-openapi/swag" - "github.com/jinzhu/gorm" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func NewInstalledState(log logrus.FieldLogger, db *gorm.DB) *installedState { - return &installedState{ - log: log, - db: db, - } -} - -type installedState baseState - -func (i *installedState) UpdateHwInfo(ctx context.Context, h *models.Host, hwInfo string) (*UpdateReply, error) { - return nil, errors.Errorf("unable to update hardware info to host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (i *installedState) UpdateInventory(ctx context.Context, h *models.Host, inventory string) (*UpdateReply, error) { - return nil, errors.Errorf("unable to update inventory to host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (i *installedState) UpdateRole(ctx context.Context, h *models.Host, role string, db *gorm.DB) (*UpdateReply, error) { - return nil, errors.Errorf("unable to set role host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (i *installedState) RefreshStatus(ctx context.Context, h *models.Host) (*UpdateReply, error) { - // State in the same state - return &UpdateReply{ - State: HostStatusInstalled, - IsChanged: false, - }, nil -} - -func (i *installedState) Install(ctx context.Context, h *models.Host, db *gorm.DB) (*UpdateReply, error) { - return nil, errors.Errorf("unable to install host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (i *installedState) EnableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - // State in the same state - return &UpdateReply{ - State: HostStatusInstalled, - IsChanged: false, - }, nil -} - -func (i *installedState) DisableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - return nil, errors.Errorf("unable to disable host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} diff --git a/internal/host/installed_test.go b/internal/host/installed_test.go deleted file mode 100644 index a37bea0e5..000000000 --- a/internal/host/installed_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package host - -import ( - "context" - "time" - - "github.com/filanov/bm-inventory/models" - "github.com/go-openapi/strfmt" - "github.com/google/uuid" - "github.com/jinzhu/gorm" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("installed_state", func() { - ctx := context.Background() - var state API - var db *gorm.DB - currentState := HostStatusInstalled - var host models.Host - var id, clusterId strfmt.UUID - var updateReply *UpdateReply - var updateErr error - var expectedReply *expect - - BeforeEach(func() { - db = prepareDB() - state = &Manager{installed: NewInstalledState(getTestLog(), db)} - - id = strfmt.UUID(uuid.New().String()) - clusterId = strfmt.UUID(uuid.New().String()) - host = getTestHost(id, clusterId, currentState) - Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) - expectedReply = &expect{expectedState: currentState} - }) - - It("update_hw_info", func() { - updateReply, updateErr = state.UpdateHwInfo(ctx, &host, "some hw info") - expectedReply.expectError = true - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.HardwareInfo).Should(Equal(defaultHwInfo)) - } - }) - - It("update_role", func() { - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", nil) - expectedReply.expectError = true - }) - - Context("refresh_status", func() { - It("keep_alive", func() { - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - It("keep_alive_timeout", func() { - host.UpdatedAt = strfmt.DateTime(time.Now().Add(-time.Hour)) - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - }) - - It("install", func() { - updateReply, updateErr = state.Install(ctx, &host, nil) - expectedReply.expectError = true - }) - - It("enable_host", func() { - updateReply, updateErr = state.EnableHost(ctx, &host) - }) - - It("disable_host", func() { - updateReply, updateErr = state.DisableHost(ctx, &host) - expectedReply.expectError = true - }) - - AfterEach(func() { - postValidation(expectedReply, currentState, db, id, clusterId, updateReply, updateErr) - // cleanup - db.Close() - expectedReply = nil - updateReply = nil - updateErr = nil - }) -}) diff --git a/internal/host/installing.go b/internal/host/installing.go deleted file mode 100644 index 790b5f1cc..000000000 --- a/internal/host/installing.go +++ /dev/null @@ -1,62 +0,0 @@ -package host - -import ( - "context" - - "github.com/filanov/bm-inventory/models" - "github.com/go-openapi/swag" - "github.com/jinzhu/gorm" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func NewInstallingState(log logrus.FieldLogger, db *gorm.DB) *installingState { - return &installingState{ - log: log, - db: db, - } -} - -type installingState baseState - -func (i *installingState) UpdateHwInfo(ctx context.Context, h *models.Host, hwInfo string) (*UpdateReply, error) { - return nil, errors.Errorf("unable to update hardware info to host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (i *installingState) UpdateInventory(ctx context.Context, h *models.Host, inventory string) (*UpdateReply, error) { - return nil, errors.Errorf("unable to update inventory to host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (i *installingState) UpdateRole(ctx context.Context, h *models.Host, role string, db *gorm.DB) (*UpdateReply, error) { - return nil, errors.Errorf("unable to update role to host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (i *installingState) RefreshStatus(ctx context.Context, h *models.Host) (*UpdateReply, error) { - // State in the same state - return &UpdateReply{ - State: HostStatusInstalling, - IsChanged: false, - }, nil -} - -func (i *installingState) Install(ctx context.Context, h *models.Host, db *gorm.DB) (*UpdateReply, error) { - // TODO: maybe need to jump to the next sub installation state - return nil, errors.Errorf("unable to install host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (i *installingState) EnableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - // State in the same state - return &UpdateReply{ - State: HostStatusInstalling, - IsChanged: false, - }, nil -} - -func (i *installingState) DisableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - return nil, errors.Errorf("unable to disable host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} diff --git a/internal/host/installing_test.go b/internal/host/installing_test.go deleted file mode 100644 index 617dfa3d4..000000000 --- a/internal/host/installing_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package host - -import ( - "context" - "time" - - "github.com/filanov/bm-inventory/models" - "github.com/go-openapi/strfmt" - "github.com/google/uuid" - "github.com/jinzhu/gorm" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("installing_state", func() { - ctx := context.Background() - var state API - var db *gorm.DB - currentState := HostStatusInstalling - var host models.Host - var id, clusterId strfmt.UUID - var updateReply *UpdateReply - var updateErr error - var expectedReply *expect - - BeforeEach(func() { - db = prepareDB() - state = &Manager{installing: NewInstallingState(getTestLog(), db)} - - id = strfmt.UUID(uuid.New().String()) - clusterId = strfmt.UUID(uuid.New().String()) - host = getTestHost(id, clusterId, currentState) - Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) - expectedReply = &expect{expectedState: currentState} - }) - - It("update_hw_info", func() { - updateReply, updateErr = state.UpdateHwInfo(ctx, &host, "some hw info") - expectedReply.expectError = true - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.HardwareInfo).Should(Equal(defaultHwInfo)) - } - }) - - It("update_role", func() { - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", nil) - expectedReply.expectError = true - }) - - Context("refresh_status", func() { - It("keep_alive", func() { - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - It("keep_alive_timeout", func() { - host.UpdatedAt = strfmt.DateTime(time.Now().Add(-time.Hour)) - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - }) - - It("install", func() { - updateReply, updateErr = state.Install(ctx, &host, nil) - expectedReply.expectError = true - }) - - It("enable_host", func() { - updateReply, updateErr = state.EnableHost(ctx, &host) - }) - - It("disable_host", func() { - updateReply, updateErr = state.DisableHost(ctx, &host) - expectedReply.expectError = true - }) - - AfterEach(func() { - postValidation(expectedReply, currentState, db, id, clusterId, updateReply, updateErr) - // cleanup - db.Close() - expectedReply = nil - updateReply = nil - updateErr = nil - }) -}) diff --git a/internal/host/instructionmanager.go b/internal/host/instructionmanager.go index b94ca86a7..a3b9c6cb1 100644 --- a/internal/host/instructionmanager.go +++ b/internal/host/instructionmanager.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + "github.com/filanov/bm-inventory/internal/connectivity" + "github.com/jinzhu/gorm" "github.com/filanov/bm-inventory/internal/hardware" @@ -20,7 +22,17 @@ type InstructionApi interface { GetNextSteps(ctx context.Context, host *models.Host) (models.Steps, error) } -type stateToStepsMap map[string][]CommandGetter +const ( + defaultNextInstructionInSec = int64(60) + defaultBackedOffInstructionInSec = int64(120) +) + +type StepsStruct struct { + Commands []CommandGetter + NextStepInSec int64 +} + +type stateToStepsMap map[string]StepsStruct type InstructionManager struct { log logrus.FieldLogger @@ -28,29 +40,38 @@ type InstructionManager struct { stateToSteps stateToStepsMap } type InstructionConfig struct { - InventoryURL string `envconfig:"INVENTORY_URL" default:"10.35.59.36"` - InventoryPort string `envconfig:"INVENTORY_PORT" default:"30485"` - InstallerImage string `envconfig:"INSTALLER_IMAGE" default:"quay.io/ocpmetal/assisted-installer:stable"` + InventoryURL string `envconfig:"INVENTORY_URL" default:"10.35.59.36"` + InventoryPort string `envconfig:"INVENTORY_PORT" default:"30485"` + InstallerImage string `envconfig:"INSTALLER_IMAGE" default:"quay.io/ocpmetal/assisted-installer:latest"` + ControllerImage string `envconfig:"CONTROLLER_IMAGE" default:"quay.io/ocpmetal/assisted-installer-controller:latest"` + ConnectivityCheckImage string `envconfig:"CONNECTIVITY_CHECK_IMAGE" default:"quay.io/ocpmetal/connectivity_check:latest"` + InventoryImage string `envconfig:"INVENTORY_IMAGE" default:"quay.io/ocpmetal/inventory:latest"` + FreeAddressesImage string `envconfig:"FREE_ADDRESSES_IMAGE" default:"quay.io/ocpmetal/free_addresses:latest"` } -func NewInstructionManager(log logrus.FieldLogger, db *gorm.DB, hwValidator hardware.Validator, instructionConfig InstructionConfig) *InstructionManager { - connectivityCmd := NewConnectivityCheckCmd(log, db, hwValidator) +func NewInstructionManager(log logrus.FieldLogger, db *gorm.DB, hwValidator hardware.Validator, instructionConfig InstructionConfig, connectivityValidator connectivity.Validator) *InstructionManager { + connectivityCmd := NewConnectivityCheckCmd(log, db, connectivityValidator, instructionConfig.ConnectivityCheckImage) installCmd := NewInstallCmd(log, db, hwValidator, instructionConfig) - hwCmd := NewHwInfoCmd(log) - inventoryCmd := NewInventoryCmd(log) + inventoryCmd := NewInventoryCmd(log, instructionConfig.InventoryImage) + freeAddressesCmd := NewFreeAddressesCmd(log, instructionConfig.FreeAddressesImage) + resetCmd := NewResetInstallationCmd(log) + stopCmd := NewStopInstallationCmd(log) return &InstructionManager{ log: log, db: db, stateToSteps: stateToStepsMap{ - HostStatusKnown: {connectivityCmd}, - HostStatusInsufficient: {connectivityCmd}, - HostStatusDisconnected: {hwCmd, inventoryCmd, connectivityCmd}, - HostStatusDiscovering: {hwCmd, inventoryCmd, connectivityCmd}, - HostStatusInstalling: {installCmd}, + HostStatusKnown: {[]CommandGetter{connectivityCmd, freeAddressesCmd}, defaultNextInstructionInSec}, + HostStatusInsufficient: {[]CommandGetter{inventoryCmd, connectivityCmd, freeAddressesCmd}, defaultNextInstructionInSec}, + HostStatusDisconnected: {[]CommandGetter{inventoryCmd, connectivityCmd}, defaultBackedOffInstructionInSec}, + HostStatusDiscovering: {[]CommandGetter{inventoryCmd, connectivityCmd}, defaultNextInstructionInSec}, + HostStatusPendingForInput: {[]CommandGetter{inventoryCmd, connectivityCmd, freeAddressesCmd}, defaultNextInstructionInSec}, + HostStatusInstalling: {[]CommandGetter{installCmd}, defaultBackedOffInstructionInSec}, + HostStatusDisabled: {[]CommandGetter{}, defaultBackedOffInstructionInSec}, + HostStatusResetting: {[]CommandGetter{resetCmd}, defaultBackedOffInstructionInSec}, + HostStatusError: {[]CommandGetter{stopCmd}, defaultBackedOffInstructionInSec}, }, } - } func (i *InstructionManager) GetNextSteps(ctx context.Context, host *models.Host) (models.Steps, error) { @@ -59,21 +80,25 @@ func (i *InstructionManager) GetNextSteps(ctx context.Context, host *models.Host ClusterID := host.ClusterID HostID := host.ID HostStatus := swag.StringValue(host.Status) - log.Infof("GetNextSteps cluster: ,<%s> host: <%s>, host status: <%s>", ClusterID, HostID, HostStatus) returnSteps := models.Steps{} if cmdsMap, ok := i.stateToSteps[HostStatus]; ok { //need to add the step id - for _, cmd := range cmdsMap { + returnSteps.NextInstructionSeconds = cmdsMap.NextStepInSec + for _, cmd := range cmdsMap.Commands { step, err := cmd.GetStep(ctx, host) if err != nil { return returnSteps, err } - step.StepID = createStepID(step.StepType) - returnSteps = append(returnSteps, step) + if step.StepID == "" { + step.StepID = createStepID(step.StepType) + } + returnSteps.Instructions = append(returnSteps.Instructions, step) } + } else { + returnSteps.NextInstructionSeconds = defaultNextInstructionInSec } logSteps(returnSteps, ClusterID, HostID, log) return returnSteps, nil @@ -84,10 +109,10 @@ func createStepID(stepType models.StepType) string { } func logSteps(steps models.Steps, clusterId strfmt.UUID, hostId *strfmt.UUID, log logrus.FieldLogger) { - if len(steps) == 0 { + if len(steps.Instructions) == 0 { log.Infof("No steps required for cluster <%s> host <%s>", clusterId, hostId) } - for _, step := range steps { + for _, step := range steps.Instructions { log.Infof("Submitting step <%s> id <%s> to cluster <%s> host <%s> Command: <%s> Arguments: <%+v>", step.StepType, step.StepID, clusterId, hostId, step.Command, step.Args) } diff --git a/internal/host/instructionmanager_test.go b/internal/host/instructionmanager_test.go index 6b06c04e3..7b11c636f 100644 --- a/internal/host/instructionmanager_test.go +++ b/internal/host/instructionmanager_test.go @@ -3,6 +3,8 @@ package host import ( "context" + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/internal/events" "github.com/filanov/bm-inventory/internal/hardware" "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" @@ -19,77 +21,88 @@ var _ = Describe("instructionmanager", func() { ctx = context.Background() host models.Host db *gorm.DB + mockEvents *events.MockHandler stepsReply models.Steps hostId, clusterId strfmt.UUID stepsErr error instMng *InstructionManager ctrl *gomock.Controller - mockValidator *hardware.MockValidator + hwValidator *hardware.MockValidator instructionConfig InstructionConfig + dbName = "instructionmanager" ) BeforeEach(func() { - db = prepareDB() + db = common.PrepareTestDB(dbName) ctrl = gomock.NewController(GinkgoT()) - mockValidator = hardware.NewMockValidator(ctrl) - instMng = NewInstructionManager(getTestLog(), db, mockValidator, instructionConfig) + mockEvents = events.NewMockHandler(ctrl) + hwValidator = hardware.NewMockValidator(ctrl) + instMng = NewInstructionManager(getTestLog(), db, hwValidator, instructionConfig, nil) hostId = strfmt.UUID(uuid.New().String()) clusterId = strfmt.UUID(uuid.New().String()) - cluster := models.Cluster{ID: &clusterId} + cluster := common.Cluster{Cluster: models.Cluster{ID: &clusterId}} Expect(db.Create(&cluster).Error).ShouldNot(HaveOccurred()) host = getTestHost(hostId, clusterId, "unknown invalid state") - host.Role = RoleMaster + host.Role = models.HostRoleMaster Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) }) Context("get_next_steps", func() { It("invalid_host_state", func() { stepsReply, stepsErr = instMng.GetNextSteps(ctx, &host) - Expect(stepsReply).To(HaveLen(0)) + Expect(stepsReply.Instructions).To(HaveLen(0)) Expect(stepsErr).Should(BeNil()) }) It("discovering", func() { - checkStepsByState(HostStatusDiscovering, &host, db, instMng, mockValidator, ctx, - []models.StepType{models.StepTypeHardwareInfo, models.StepTypeInventory, models.StepTypeConnectivityCheck}) + checkStepsByState(HostStatusDiscovering, &host, db, mockEvents, instMng, hwValidator, ctx, + []models.StepType{models.StepTypeInventory, models.StepTypeConnectivityCheck}) }) It("known", func() { - checkStepsByState(HostStatusKnown, &host, db, instMng, mockValidator, ctx, - []models.StepType{models.StepTypeConnectivityCheck}) + checkStepsByState(HostStatusKnown, &host, db, mockEvents, instMng, hwValidator, ctx, + []models.StepType{models.StepTypeConnectivityCheck, models.StepTypeFreeNetworkAddresses}) }) It("disconnected", func() { - checkStepsByState(HostStatusDisconnected, &host, db, instMng, mockValidator, ctx, - []models.StepType{models.StepTypeHardwareInfo, models.StepTypeInventory, models.StepTypeConnectivityCheck}) + checkStepsByState(HostStatusDisconnected, &host, db, mockEvents, instMng, hwValidator, ctx, + []models.StepType{models.StepTypeInventory, models.StepTypeConnectivityCheck}) }) It("insufficient", func() { - checkStepsByState(HostStatusInsufficient, &host, db, instMng, mockValidator, ctx, - []models.StepType{models.StepTypeConnectivityCheck}) + checkStepsByState(HostStatusInsufficient, &host, db, mockEvents, instMng, hwValidator, ctx, + []models.StepType{models.StepTypeInventory, models.StepTypeConnectivityCheck, models.StepTypeFreeNetworkAddresses}) + }) + It("pending-for-input", func() { + checkStepsByState(HostStatusPendingForInput, &host, db, mockEvents, instMng, hwValidator, ctx, + []models.StepType{models.StepTypeInventory, models.StepTypeConnectivityCheck, models.StepTypeFreeNetworkAddresses}) }) It("error", func() { - checkStepsByState(HostStatusError, &host, db, instMng, mockValidator, ctx, - []models.StepType{}) + checkStepsByState(HostStatusError, &host, db, mockEvents, instMng, hwValidator, ctx, + []models.StepType{models.StepTypeExecute}) }) It("installing", func() { - checkStepsByState(HostStatusInstalling, &host, db, instMng, mockValidator, ctx, - []models.StepType{models.StepTypeExecute}) + checkStepsByState(HostStatusInstalling, &host, db, mockEvents, instMng, hwValidator, ctx, + []models.StepType{models.StepTypeInstall}) + }) + It("reset", func() { + checkStepsByState(HostStatusResetting, &host, db, mockEvents, instMng, hwValidator, ctx, + []models.StepType{models.StepTypeResetInstallation}) }) - }) AfterEach(func() { // cleanup - db.Close() + common.DeleteTestDB(db, dbName) ctrl.Finish() - stepsReply = nil + stepsReply = models.Steps{} stepsErr = nil }) }) -func checkStepsByState(state string, host *models.Host, db *gorm.DB, instMng *InstructionManager, mockValidator *hardware.MockValidator, ctx context.Context, +func checkStepsByState(state string, host *models.Host, db *gorm.DB, mockEvents *events.MockHandler, instMng *InstructionManager, mockValidator *hardware.MockValidator, ctx context.Context, expectedStepTypes []models.StepType) { - updateReply, updateErr := updateState(getTestLog(), state, "", host, db) + mockEvents.EXPECT().AddEvent(gomock.Any(), host.ID.String(), common.GetEventSeverityFromHostStatus(state), gomock.Any(), gomock.Any(), host.ClusterID.String()) + updateReply, updateErr := updateHostStatus(ctx, getTestLog(), db, mockEvents, host.ClusterID, *host.ID, *host.Status, state, "") ExpectWithOffset(1, updateErr).ShouldNot(HaveOccurred()) - ExpectWithOffset(1, updateReply.IsChanged).Should(BeTrue()) + ExpectWithOffset(1, updateReply).ShouldNot(BeNil()) h := getHost(*host.ID, host.ClusterID, db) ExpectWithOffset(1, swag.StringValue(h.Status)).Should(Equal(state)) validDiskSize := int64(128849018880) @@ -100,8 +113,14 @@ func checkStepsByState(state string, host *models.Host, db *gorm.DB, instMng *In } mockValidator.EXPECT().GetHostValidDisks(gomock.Any()).Return(disks, nil).AnyTimes() stepsReply, stepsErr := instMng.GetNextSteps(ctx, h) - ExpectWithOffset(1, stepsReply).To(HaveLen(len(expectedStepTypes))) - for i, step := range stepsReply { + ExpectWithOffset(1, stepsReply.Instructions).To(HaveLen(len(expectedStepTypes))) + if stateValues, ok := instMng.stateToSteps[state]; ok { + Expect(stepsReply.NextInstructionSeconds).Should(Equal(stateValues.NextStepInSec)) + } else { + Expect(stepsReply.NextInstructionSeconds).Should(Equal(defaultNextInstructionInSec)) + } + + for i, step := range stepsReply.Instructions { ExpectWithOffset(1, step.StepType).Should(Equal(expectedStepTypes[i])) } ExpectWithOffset(1, stepsErr).ShouldNot(HaveOccurred()) diff --git a/internal/host/insufficient.go b/internal/host/insufficient.go deleted file mode 100644 index accc15f9d..000000000 --- a/internal/host/insufficient.go +++ /dev/null @@ -1,76 +0,0 @@ -package host - -import ( - "context" - - "github.com/filanov/bm-inventory/internal/hardware" - "github.com/filanov/bm-inventory/models" - logutil "github.com/filanov/bm-inventory/pkg/log" - "github.com/go-openapi/swag" - "github.com/jinzhu/gorm" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func NewInsufficientState(log logrus.FieldLogger, db *gorm.DB, hwValidator hardware.Validator) *insufficientState { - return &insufficientState{ - baseState: baseState{ - log: log, - db: db, - }, - hwValidator: hwValidator, - } -} - -type insufficientState struct { - baseState - hwValidator hardware.Validator -} - -func (i *insufficientState) UpdateHwInfo(ctx context.Context, h *models.Host, hwInfo string) (*UpdateReply, error) { - h.HardwareInfo = hwInfo - return updateHwInfo(logutil.FromContext(ctx, i.log), i.hwValidator, h, i.db) -} - -func (d *insufficientState) UpdateInventory(ctx context.Context, h *models.Host, inventory string) (*UpdateReply, error) { - h.Inventory = inventory - return updateInventory(logutil.FromContext(ctx, d.log), d.hwValidator, h, d.db) -} - -func (i *insufficientState) UpdateRole(ctx context.Context, h *models.Host, role string, db *gorm.DB) (*UpdateReply, error) { - log := logutil.FromContext(ctx, i.log) - cdb := i.db - if db != nil { - cdb = db - } - reply, err := i.hwValidator.IsSufficient(h) - if err != nil { - return nil, err - } - if !reply.IsSufficient { - return updateStateWithParams(log, HostStatusInsufficient, reply.Reason, h, cdb, - "role", role) - } - return updateStateWithParams(log, HostStatusKnown, "", h, cdb, "role", role) -} - -func (i *insufficientState) RefreshStatus(ctx context.Context, h *models.Host) (*UpdateReply, error) { - return updateByKeepAlive(logutil.FromContext(ctx, i.log), h, i.db) -} - -func (i *insufficientState) Install(ctx context.Context, h *models.Host, db *gorm.DB) (*UpdateReply, error) { - return nil, errors.Errorf("unable to install host <%s> in <%s> status", - h.ID, swag.StringValue(h.Status)) -} - -func (i *insufficientState) EnableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - // State in the same state - return &UpdateReply{ - State: HostStatusInsufficient, - IsChanged: false, - }, nil -} - -func (i *insufficientState) DisableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - return updateState(logutil.FromContext(ctx, i.log), HostStatusDisabled, statusInfoDisabled, h, i.db) -} diff --git a/internal/host/insufficient_test.go b/internal/host/insufficient_test.go deleted file mode 100644 index a711bde6c..000000000 --- a/internal/host/insufficient_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package host - -import ( - "context" - "time" - - "github.com/filanov/bm-inventory/internal/hardware" - "github.com/filanov/bm-inventory/models" - "github.com/go-openapi/strfmt" - "github.com/golang/mock/gomock" - "github.com/google/uuid" - "github.com/jinzhu/gorm" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/pkg/errors" -) - -var _ = Describe("insufficient_state", func() { - var ( - ctx = context.Background() - state API - db *gorm.DB - currentState = HostStatusInsufficient - host models.Host - id, clusterId strfmt.UUID - updateReply *UpdateReply - updateErr error - expectedReply *expect - ctrl *gomock.Controller - mockValidator *hardware.MockValidator - ) - - BeforeEach(func() { - db = prepareDB() - ctrl = gomock.NewController(GinkgoT()) - mockValidator = hardware.NewMockValidator(ctrl) - state = &Manager{insufficient: NewInsufficientState(getTestLog(), db, mockValidator)} - - id = strfmt.UUID(uuid.New().String()) - clusterId = strfmt.UUID(uuid.New().String()) - host = getTestHost(id, clusterId, currentState) - Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) - expectedReply = &expect{expectedState: currentState} - }) - - Context("update hw info", func() { - It("update", func() { - updateReply, updateErr = state.UpdateHwInfo(ctx, &host, "some hw info") - expectedReply.expectedState = HostStatusInsufficient - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Inventory).Should(Equal("")) - Expect(h.HardwareInfo).Should(Equal("some hw info")) - } - }) - }) - - Context("update_inventory", func() { - It("sufficient_hw", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(&hardware.IsSufficientReply{IsSufficient: true}, nil).Times(1) - updateReply, updateErr = state.UpdateInventory(ctx, &host, "some hw info") - expectedReply.expectedState = HostStatusKnown - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.HardwareInfo).Should(Equal(defaultHwInfo)) - Expect(h.Inventory).Should(Equal("some hw info")) - } - }) - It("insufficient_hw", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(&hardware.IsSufficientReply{IsSufficient: false, Reason: "because"}, nil).Times(1) - updateReply, updateErr = state.UpdateInventory(ctx, &host, "some hw info") - expectedReply.expectedState = HostStatusInsufficient - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.HardwareInfo).Should(Equal(defaultHwInfo)) - Expect(h.Inventory).Should(Equal("some hw info")) - Expect(*h.StatusInfo).Should(Equal("because")) - } - }) - It("hw_validation_error", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(nil, errors.New("error")).Times(1) - updateReply, updateErr = state.UpdateInventory(ctx, &host, "some hw info") - expectedReply.expectError = true - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.HardwareInfo).Should(Equal(defaultHwInfo)) - } - }) - }) - - Context("update_role", func() { - It("sufficient_hw", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(&hardware.IsSufficientReply{IsSufficient: true}, nil).Times(1) - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", nil) - expectedReply.expectedState = HostStatusKnown - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Role).Should(Equal("master")) - } - }) - It("insufficient_hw", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(&hardware.IsSufficientReply{IsSufficient: false, Reason: "because"}, nil).Times(1) - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", nil) - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Role).Should(Equal("master")) - Expect(*h.StatusInfo).Should(Equal("because")) - } - }) - It("hw_validation_error", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(nil, errors.New("error")).Times(1) - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", nil) - expectedReply.expectError = true - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Role).Should(Equal("")) - } - }) - It("master_with_tx", func() { - tx := db.Begin() - Expect(tx.Error).ShouldNot(HaveOccurred()) - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(&hardware.IsSufficientReply{IsSufficient: false}, nil).Times(1) - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", tx) - Expect(tx.Rollback().Error).ShouldNot(HaveOccurred()) - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Role).Should(Equal("")) - } - }) - }) - - Context("refresh_status", func() { - It("keep_alive", func() { - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - It("keep_alive_timeout", func() { - host.UpdatedAt = strfmt.DateTime(time.Now().Add(-time.Hour)) - expectedReply.expectedState = HostStatusDisconnected - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - }) - - It("install", func() { - updateReply, updateErr = state.Install(ctx, &host, nil) - expectedReply.expectError = true - }) - - It("enable_host", func() { - updateReply, updateErr = state.EnableHost(ctx, &host) - }) - - It("disable_host", func() { - updateReply, updateErr = state.DisableHost(ctx, &host) - expectedReply.expectedState = HostStatusDisabled - }) - - AfterEach(func() { - ctrl.Finish() - postValidation(expectedReply, currentState, db, id, clusterId, updateReply, updateErr) - - // cleanup - db.Close() - expectedReply = nil - updateReply = nil - updateErr = nil - }) -}) diff --git a/internal/host/inventorycmd.go b/internal/host/inventorycmd.go index f3ff4578d..f41db32da 100644 --- a/internal/host/inventorycmd.go +++ b/internal/host/inventorycmd.go @@ -8,16 +8,31 @@ import ( "github.com/filanov/bm-inventory/models" ) -type inventoryCmd baseCmd +type inventoryCmd struct { + baseCmd + inventoryImage string +} -func NewInventoryCmd(log logrus.FieldLogger) *inventoryCmd { +func NewInventoryCmd(log logrus.FieldLogger, inventoryImage string) *inventoryCmd { return &inventoryCmd{ - log: log, + baseCmd: baseCmd{log: log}, + inventoryImage: inventoryImage, } } func (h *inventoryCmd) GetStep(ctx context.Context, host *models.Host) (*models.Step, error) { - step := &models.Step{} - step.StepType = models.StepTypeInventory + step := &models.Step{ + StepType: models.StepTypeInventory, + Command: "podman", + Args: []string{ + "run", "--privileged", "--net=host", "--rm", "--quiet", + "-v", "/var/log:/var/log", + "-v", "/run/udev:/run/udev", + "-v", "/dev/disk:/dev/disk", + "-v", "/run/systemd/journal/socket:/run/systemd/journal/socket", + h.inventoryImage, + "inventory", + }, + } return step, nil } diff --git a/internal/host/inventorycmd_test.go b/internal/host/inventorycmd_test.go index 40bb925ba..a7dfcffde 100644 --- a/internal/host/inventorycmd_test.go +++ b/internal/host/inventorycmd_test.go @@ -3,6 +3,8 @@ package host import ( "context" + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" "github.com/google/uuid" @@ -19,10 +21,11 @@ var _ = Describe("inventory", func() { var id, clusterId strfmt.UUID var stepReply *models.Step var stepErr error + dbName := "inventorycmd" BeforeEach(func() { - db = prepareDB() - invCmd = NewInventoryCmd(getTestLog()) + db = common.PrepareTestDB(dbName) + invCmd = NewInventoryCmd(getTestLog(), "quay.io/ocpmetal/inventory:latest") id = strfmt.UUID(uuid.New().String()) clusterId = strfmt.UUID(uuid.New().String()) @@ -38,7 +41,7 @@ var _ = Describe("inventory", func() { AfterEach(func() { // cleanup - db.Close() + common.DeleteTestDB(db, dbName) stepReply = nil stepErr = nil }) diff --git a/internal/host/known.go b/internal/host/known.go deleted file mode 100644 index e51768548..000000000 --- a/internal/host/known.go +++ /dev/null @@ -1,81 +0,0 @@ -package host - -import ( - "context" - - "github.com/filanov/bm-inventory/internal/hardware" - "github.com/filanov/bm-inventory/models" - logutil "github.com/filanov/bm-inventory/pkg/log" - "github.com/jinzhu/gorm" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func NewKnownState(log logrus.FieldLogger, db *gorm.DB, hwValidator hardware.Validator) *knownState { - return &knownState{ - baseState: baseState{ - log: log, - db: db, - }, - hwValidator: hwValidator, - } -} - -type knownState struct { - baseState - hwValidator hardware.Validator -} - -func (k *knownState) UpdateHwInfo(ctx context.Context, h *models.Host, hwInfo string) (*UpdateReply, error) { - h.HardwareInfo = hwInfo - return updateHwInfo(logutil.FromContext(ctx, k.log), k.hwValidator, h, k.db) -} - -func (k *knownState) UpdateInventory(ctx context.Context, h *models.Host, inventory string) (*UpdateReply, error) { - h.Inventory = inventory - return updateInventory(logutil.FromContext(ctx, k.log), k.hwValidator, h, k.db) -} - -func (k *knownState) UpdateRole(ctx context.Context, h *models.Host, role string, db *gorm.DB) (*UpdateReply, error) { - log := logutil.FromContext(ctx, k.log) - cdb := k.db - if db != nil { - cdb = db - } - h.Role = role - reply, err := k.hwValidator.IsSufficient(h) - if err != nil { - return nil, err - } - if !reply.IsSufficient { - return updateStateWithParams(log, HostStatusInsufficient, reply.Reason, h, cdb, "role", role) - } - return updateStateWithParams(log, HostStatusKnown, "", h, cdb, "role", role) -} - -func (k *knownState) RefreshStatus(ctx context.Context, h *models.Host) (*UpdateReply, error) { - return updateByKeepAlive(logutil.FromContext(ctx, k.log), h, k.db) -} - -func (k *knownState) Install(ctx context.Context, h *models.Host, db *gorm.DB) (*UpdateReply, error) { - if h.Role == "" { - return nil, errors.Errorf("unable to install host <%s> without a role", h.ID) - } - cdb := k.db - if db != nil { - cdb = db - } - return updateState(logutil.FromContext(ctx, k.log), HostStatusInstalling, statusInfoInstalling, h, cdb) -} - -func (k *knownState) EnableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - // State in the same state - return &UpdateReply{ - State: HostStatusKnown, - IsChanged: false, - }, nil -} - -func (k *knownState) DisableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { - return updateState(logutil.FromContext(ctx, k.log), HostStatusDisabled, statusInfoDisabled, h, k.db) -} diff --git a/internal/host/known_test.go b/internal/host/known_test.go deleted file mode 100644 index cf1b7a8a8..000000000 --- a/internal/host/known_test.go +++ /dev/null @@ -1,191 +0,0 @@ -package host - -import ( - "context" - "time" - - "github.com/filanov/bm-inventory/internal/hardware" - "github.com/filanov/bm-inventory/models" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/golang/mock/gomock" - "github.com/google/uuid" - "github.com/jinzhu/gorm" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/pkg/errors" -) - -var _ = Describe("known_state", func() { - var ( - ctx = context.Background() - state API - db *gorm.DB - currentState = HostStatusKnown - host models.Host - id, clusterId strfmt.UUID - updateReply *UpdateReply - updateErr error - expectedReply *expect - ctrl *gomock.Controller - mockValidator *hardware.MockValidator - ) - - BeforeEach(func() { - db = prepareDB() - ctrl = gomock.NewController(GinkgoT()) - mockValidator = hardware.NewMockValidator(ctrl) - state = &Manager{known: NewKnownState(getTestLog(), db, mockValidator)} - - id = strfmt.UUID(uuid.New().String()) - clusterId = strfmt.UUID(uuid.New().String()) - host = getTestHost(id, clusterId, currentState) - Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) - expectedReply = &expect{expectedState: currentState} - }) - - Context("update hw info", func() { - It("update", func() { - updateReply, updateErr = state.UpdateHwInfo(ctx, &host, "some hw info") - expectedReply.expectedState = HostStatusKnown - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Inventory).Should(Equal("")) - Expect(h.HardwareInfo).Should(Equal("some hw info")) - } - }) - }) - - Context("update_inventory", func() { - It("sufficient_hw", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(&hardware.IsSufficientReply{IsSufficient: true}, nil).Times(1) - updateReply, updateErr = state.UpdateInventory(ctx, &host, "some hw info") - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.HardwareInfo).Should(Equal(defaultHwInfo)) - Expect(h.Inventory).Should(Equal("some hw info")) - } - }) - It("insufficient_hw", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(&hardware.IsSufficientReply{IsSufficient: false, Reason: "because"}, nil).Times(1) - updateReply, updateErr = state.UpdateInventory(ctx, &host, "some hw info") - expectedReply.expectedState = HostStatusInsufficient - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.HardwareInfo).Should(Equal(defaultHwInfo)) - Expect(h.Inventory).Should(Equal("some hw info")) - Expect(*h.StatusInfo).Should(Equal("because")) - } - }) - It("hw_validation_error", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(nil, errors.New("error")).Times(1) - updateReply, updateErr = state.UpdateInventory(ctx, &host, "some hw info") - expectedReply.expectError = true - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Inventory).Should(Equal("")) - Expect(h.HardwareInfo).Should(Equal(defaultHwInfo)) - } - }) - }) - - Context("update_role", func() { - It("sufficient_hw", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(&hardware.IsSufficientReply{IsSufficient: true}, nil).Times(1) - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", nil) - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Role).Should(Equal("master")) - } - }) - It("insufficient_hw", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(&hardware.IsSufficientReply{IsSufficient: false, Reason: "because"}, nil).Times(1) - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", nil) - expectedReply.expectedState = HostStatusInsufficient - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Role).Should(Equal("master")) - Expect(*h.StatusInfo).Should(Equal("because")) - } - }) - It("hw_validation_error", func() { - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(nil, errors.New("error")).Times(1) - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", nil) - expectedReply.expectError = true - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Role).Should(Equal("")) - } - }) - It("master_with_tx", func() { - tx := db.Begin() - Expect(tx.Error).ShouldNot(HaveOccurred()) - mockValidator.EXPECT().IsSufficient(gomock.Any()). - Return(&hardware.IsSufficientReply{IsSufficient: true}, nil).Times(1) - updateReply, updateErr = state.UpdateRole(ctx, &host, "master", tx) - Expect(tx.Rollback().Error).ShouldNot(HaveOccurred()) - expectedReply.postCheck = func() { - h := getHost(id, clusterId, db) - Expect(h.Role).Should(Equal("")) - } - }) - }) - - Context("refresh_status", func() { - It("keep_alive", func() { - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - It("keep_alive_timeout", func() { - host.UpdatedAt = strfmt.DateTime(time.Now().Add(-time.Hour)) - expectedReply.expectedState = HostStatusDisconnected - updateReply, updateErr = state.RefreshStatus(ctx, &host) - }) - }) - - Context("install", func() { - It("no_role", func() { - updateReply, updateErr = state.Install(ctx, &host, nil) - expectedReply.expectError = true - }) - It("with_role", func() { - host.Role = "master" - updateReply, updateErr = state.Install(ctx, &host, nil) - expectedReply.expectedState = HostStatusInstalling - }) - It("with_role_and_transaction", func() { - tx := db.Begin() - Expect(tx.Error).ShouldNot(HaveOccurred()) - host.Role = "master" - updateReply, updateErr = state.Install(ctx, &host, tx) - expectedReply = nil - Expect(tx.Rollback().Error).ShouldNot(HaveOccurred()) - h := getHost(id, clusterId, db) - Expect(swag.StringValue(h.Status)).Should(Equal(currentState)) - }) - }) - - It("enable_host", func() { - updateReply, updateErr = state.EnableHost(ctx, &host) - }) - - It("disable_host", func() { - updateReply, updateErr = state.DisableHost(ctx, &host) - expectedReply.expectedState = HostStatusDisabled - }) - - AfterEach(func() { - ctrl.Finish() - postValidation(expectedReply, currentState, db, id, clusterId, updateReply, updateErr) - // cleanup - db.Close() - expectedReply = nil - updateReply = nil - updateErr = nil - }) -}) diff --git a/internal/host/mock_host_api.go b/internal/host/mock_host_api.go index 7ee4a401e..36635ea7b 100644 --- a/internal/host/mock_host_api.go +++ b/internal/host/mock_host_api.go @@ -8,387 +8,338 @@ import ( context "context" reflect "reflect" + common "github.com/filanov/bm-inventory/internal/common" models "github.com/filanov/bm-inventory/models" gomock "github.com/golang/mock/gomock" gorm "github.com/jinzhu/gorm" ) -// MockStateAPI is a mock of StateAPI interface. -type MockStateAPI struct { +// MockAPI is a mock of API interface +type MockAPI struct { ctrl *gomock.Controller - recorder *MockStateAPIMockRecorder + recorder *MockAPIMockRecorder } -// MockStateAPIMockRecorder is the mock recorder for MockStateAPI. -type MockStateAPIMockRecorder struct { - mock *MockStateAPI +// MockAPIMockRecorder is the mock recorder for MockAPI +type MockAPIMockRecorder struct { + mock *MockAPI } -// NewMockStateAPI creates a new mock instance. -func NewMockStateAPI(ctrl *gomock.Controller) *MockStateAPI { - mock := &MockStateAPI{ctrl: ctrl} - mock.recorder = &MockStateAPIMockRecorder{mock} +// NewMockAPI creates a new mock instance +func NewMockAPI(ctrl *gomock.Controller) *MockAPI { + mock := &MockAPI{ctrl: ctrl} + mock.recorder = &MockAPIMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockStateAPI) EXPECT() *MockStateAPIMockRecorder { +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockAPI) EXPECT() *MockAPIMockRecorder { return m.recorder } -// UpdateHwInfo mocks base method. -func (m *MockStateAPI) UpdateHwInfo(ctx context.Context, h *models.Host, hwInfo string) (*UpdateReply, error) { +// RegisterHost mocks base method +func (m *MockAPI) RegisterHost(ctx context.Context, h *models.Host) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateHwInfo", ctx, h, hwInfo) - ret0, _ := ret[0].(*UpdateReply) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "RegisterHost", ctx, h) + ret0, _ := ret[0].(error) + return ret0 } -// UpdateHwInfo indicates an expected call of UpdateHwInfo. -func (mr *MockStateAPIMockRecorder) UpdateHwInfo(ctx, h, hwInfo interface{}) *gomock.Call { +// RegisterHost indicates an expected call of RegisterHost +func (mr *MockAPIMockRecorder) RegisterHost(ctx, h interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateHwInfo", reflect.TypeOf((*MockStateAPI)(nil).UpdateHwInfo), ctx, h, hwInfo) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterHost", reflect.TypeOf((*MockAPI)(nil).RegisterHost), ctx, h) } -// UpdateInventory mocks base method. -func (m *MockStateAPI) UpdateInventory(ctx context.Context, h *models.Host, inventory string) (*UpdateReply, error) { +// HandleInstallationFailure mocks base method +func (m *MockAPI) HandleInstallationFailure(ctx context.Context, h *models.Host) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateInventory", ctx, h, inventory) - ret0, _ := ret[0].(*UpdateReply) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "HandleInstallationFailure", ctx, h) + ret0, _ := ret[0].(error) + return ret0 } -// UpdateInventory indicates an expected call of UpdateInventory. -func (mr *MockStateAPIMockRecorder) UpdateInventory(ctx, h, inventory interface{}) *gomock.Call { +// HandleInstallationFailure indicates an expected call of HandleInstallationFailure +func (mr *MockAPIMockRecorder) HandleInstallationFailure(ctx, h interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateInventory", reflect.TypeOf((*MockStateAPI)(nil).UpdateInventory), ctx, h, inventory) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleInstallationFailure", reflect.TypeOf((*MockAPI)(nil).HandleInstallationFailure), ctx, h) } -// UpdateRole mocks base method. -func (m *MockStateAPI) UpdateRole(ctx context.Context, h *models.Host, role string, db *gorm.DB) (*UpdateReply, error) { +// GetNextSteps mocks base method +func (m *MockAPI) GetNextSteps(ctx context.Context, host *models.Host) (models.Steps, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateRole", ctx, h, role, db) - ret0, _ := ret[0].(*UpdateReply) + ret := m.ctrl.Call(m, "GetNextSteps", ctx, host) + ret0, _ := ret[0].(models.Steps) ret1, _ := ret[1].(error) return ret0, ret1 } -// UpdateRole indicates an expected call of UpdateRole. -func (mr *MockStateAPIMockRecorder) UpdateRole(ctx, h, role, db interface{}) *gomock.Call { +// GetNextSteps indicates an expected call of GetNextSteps +func (mr *MockAPIMockRecorder) GetNextSteps(ctx, host interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRole", reflect.TypeOf((*MockStateAPI)(nil).UpdateRole), ctx, h, role, db) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextSteps", reflect.TypeOf((*MockAPI)(nil).GetNextSteps), ctx, host) } -// RefreshStatus mocks base method. -func (m *MockStateAPI) RefreshStatus(ctx context.Context, h *models.Host) (*UpdateReply, error) { +// UpdateInstallProgress mocks base method +func (m *MockAPI) UpdateInstallProgress(ctx context.Context, h *models.Host, progress *models.HostProgress) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RefreshStatus", ctx, h) - ret0, _ := ret[0].(*UpdateReply) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "UpdateInstallProgress", ctx, h, progress) + ret0, _ := ret[0].(error) + return ret0 } -// RefreshStatus indicates an expected call of RefreshStatus. -func (mr *MockStateAPIMockRecorder) RefreshStatus(ctx, h interface{}) *gomock.Call { +// UpdateInstallProgress indicates an expected call of UpdateInstallProgress +func (mr *MockAPIMockRecorder) UpdateInstallProgress(ctx, h, progress interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RefreshStatus", reflect.TypeOf((*MockStateAPI)(nil).RefreshStatus), ctx, h) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateInstallProgress", reflect.TypeOf((*MockAPI)(nil).UpdateInstallProgress), ctx, h, progress) } -// Install mocks base method. -func (m *MockStateAPI) Install(ctx context.Context, h *models.Host, db *gorm.DB) (*UpdateReply, error) { +// RefreshStatus mocks base method +func (m *MockAPI) RefreshStatus(ctx context.Context, h *models.Host, db *gorm.DB) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Install", ctx, h, db) - ret0, _ := ret[0].(*UpdateReply) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "RefreshStatus", ctx, h, db) + ret0, _ := ret[0].(error) + return ret0 } -// Install indicates an expected call of Install. -func (mr *MockStateAPIMockRecorder) Install(ctx, h, db interface{}) *gomock.Call { +// RefreshStatus indicates an expected call of RefreshStatus +func (mr *MockAPIMockRecorder) RefreshStatus(ctx, h, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Install", reflect.TypeOf((*MockStateAPI)(nil).Install), ctx, h, db) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RefreshStatus", reflect.TypeOf((*MockAPI)(nil).RefreshStatus), ctx, h, db) } -// EnableHost mocks base method. -func (m *MockStateAPI) EnableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { +// SetBootstrap mocks base method +func (m *MockAPI) SetBootstrap(ctx context.Context, h *models.Host, isbootstrap bool, db *gorm.DB) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EnableHost", ctx, h) - ret0, _ := ret[0].(*UpdateReply) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "SetBootstrap", ctx, h, isbootstrap, db) + ret0, _ := ret[0].(error) + return ret0 } -// EnableHost indicates an expected call of EnableHost. -func (mr *MockStateAPIMockRecorder) EnableHost(ctx, h interface{}) *gomock.Call { +// SetBootstrap indicates an expected call of SetBootstrap +func (mr *MockAPIMockRecorder) SetBootstrap(ctx, h, isbootstrap, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableHost", reflect.TypeOf((*MockStateAPI)(nil).EnableHost), ctx, h) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBootstrap", reflect.TypeOf((*MockAPI)(nil).SetBootstrap), ctx, h, isbootstrap, db) } -// DisableHost mocks base method. -func (m *MockStateAPI) DisableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { +// UpdateConnectivityReport mocks base method +func (m *MockAPI) UpdateConnectivityReport(ctx context.Context, h *models.Host, connectivityReport string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DisableHost", ctx, h) - ret0, _ := ret[0].(*UpdateReply) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "UpdateConnectivityReport", ctx, h, connectivityReport) + ret0, _ := ret[0].(error) + return ret0 } -// DisableHost indicates an expected call of DisableHost. -func (mr *MockStateAPIMockRecorder) DisableHost(ctx, h interface{}) *gomock.Call { +// UpdateConnectivityReport indicates an expected call of UpdateConnectivityReport +func (mr *MockAPIMockRecorder) UpdateConnectivityReport(ctx, h, connectivityReport interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableHost", reflect.TypeOf((*MockStateAPI)(nil).DisableHost), ctx, h) -} - -// MockSpecificHardwareParams is a mock of SpecificHardwareParams interface. -type MockSpecificHardwareParams struct { - ctrl *gomock.Controller - recorder *MockSpecificHardwareParamsMockRecorder -} - -// MockSpecificHardwareParamsMockRecorder is the mock recorder for MockSpecificHardwareParams. -type MockSpecificHardwareParamsMockRecorder struct { - mock *MockSpecificHardwareParams -} - -// NewMockSpecificHardwareParams creates a new mock instance. -func NewMockSpecificHardwareParams(ctrl *gomock.Controller) *MockSpecificHardwareParams { - mock := &MockSpecificHardwareParams{ctrl: ctrl} - mock.recorder = &MockSpecificHardwareParamsMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSpecificHardwareParams) EXPECT() *MockSpecificHardwareParamsMockRecorder { - return m.recorder + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateConnectivityReport", reflect.TypeOf((*MockAPI)(nil).UpdateConnectivityReport), ctx, h, connectivityReport) } -// GetHostValidDisks mocks base method. -func (m *MockSpecificHardwareParams) GetHostValidDisks(h *models.Host) ([]*models.Disk, error) { +// HostMonitoring mocks base method +func (m *MockAPI) HostMonitoring() { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHostValidDisks", h) - ret0, _ := ret[0].([]*models.Disk) - ret1, _ := ret[1].(error) - return ret0, ret1 + m.ctrl.Call(m, "HostMonitoring") } -// GetHostValidDisks indicates an expected call of GetHostValidDisks. -func (mr *MockSpecificHardwareParamsMockRecorder) GetHostValidDisks(h interface{}) *gomock.Call { +// HostMonitoring indicates an expected call of HostMonitoring +func (mr *MockAPIMockRecorder) HostMonitoring() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHostValidDisks", reflect.TypeOf((*MockSpecificHardwareParams)(nil).GetHostValidDisks), h) -} - -// MockAPI is a mock of API interface. -type MockAPI struct { - ctrl *gomock.Controller - recorder *MockAPIMockRecorder -} - -// MockAPIMockRecorder is the mock recorder for MockAPI. -type MockAPIMockRecorder struct { - mock *MockAPI -} - -// NewMockAPI creates a new mock instance. -func NewMockAPI(ctrl *gomock.Controller) *MockAPI { - mock := &MockAPI{ctrl: ctrl} - mock.recorder = &MockAPIMockRecorder{mock} - return mock + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HostMonitoring", reflect.TypeOf((*MockAPI)(nil).HostMonitoring)) } -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockAPI) EXPECT() *MockAPIMockRecorder { - return m.recorder -} - -// RegisterHost mocks base method. -func (m *MockAPI) RegisterHost(ctx context.Context, h *models.Host) error { +// UpdateRole mocks base method +func (m *MockAPI) UpdateRole(ctx context.Context, h *models.Host, role models.HostRole, db *gorm.DB) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterHost", ctx, h) + ret := m.ctrl.Call(m, "UpdateRole", ctx, h, role, db) ret0, _ := ret[0].(error) return ret0 } -// RegisterHost indicates an expected call of RegisterHost. -func (mr *MockAPIMockRecorder) RegisterHost(ctx, h interface{}) *gomock.Call { +// UpdateRole indicates an expected call of UpdateRole +func (mr *MockAPIMockRecorder) UpdateRole(ctx, h, role, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterHost", reflect.TypeOf((*MockAPI)(nil).RegisterHost), ctx, h) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRole", reflect.TypeOf((*MockAPI)(nil).UpdateRole), ctx, h, role, db) } -// UpdateHwInfo mocks base method. -func (m *MockAPI) UpdateHwInfo(ctx context.Context, h *models.Host, hwInfo string) (*UpdateReply, error) { +// UpdateHostname mocks base method +func (m *MockAPI) UpdateHostname(ctx context.Context, h *models.Host, hostname string, db *gorm.DB) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateHwInfo", ctx, h, hwInfo) - ret0, _ := ret[0].(*UpdateReply) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "UpdateHostname", ctx, h, hostname, db) + ret0, _ := ret[0].(error) + return ret0 } -// UpdateHwInfo indicates an expected call of UpdateHwInfo. -func (mr *MockAPIMockRecorder) UpdateHwInfo(ctx, h, hwInfo interface{}) *gomock.Call { +// UpdateHostname indicates an expected call of UpdateHostname +func (mr *MockAPIMockRecorder) UpdateHostname(ctx, h, hostname, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateHwInfo", reflect.TypeOf((*MockAPI)(nil).UpdateHwInfo), ctx, h, hwInfo) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateHostname", reflect.TypeOf((*MockAPI)(nil).UpdateHostname), ctx, h, hostname, db) } -// UpdateInventory mocks base method. -func (m *MockAPI) UpdateInventory(ctx context.Context, h *models.Host, inventory string) (*UpdateReply, error) { +// CancelInstallation mocks base method +func (m *MockAPI) CancelInstallation(ctx context.Context, h *models.Host, reason string, db *gorm.DB) *common.ApiErrorResponse { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateInventory", ctx, h, inventory) - ret0, _ := ret[0].(*UpdateReply) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "CancelInstallation", ctx, h, reason, db) + ret0, _ := ret[0].(*common.ApiErrorResponse) + return ret0 } -// UpdateInventory indicates an expected call of UpdateInventory. -func (mr *MockAPIMockRecorder) UpdateInventory(ctx, h, inventory interface{}) *gomock.Call { +// CancelInstallation indicates an expected call of CancelInstallation +func (mr *MockAPIMockRecorder) CancelInstallation(ctx, h, reason, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateInventory", reflect.TypeOf((*MockAPI)(nil).UpdateInventory), ctx, h, inventory) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelInstallation", reflect.TypeOf((*MockAPI)(nil).CancelInstallation), ctx, h, reason, db) } -// UpdateRole mocks base method. -func (m *MockAPI) UpdateRole(ctx context.Context, h *models.Host, role string, db *gorm.DB) (*UpdateReply, error) { +// IsRequireUserActionReset mocks base method +func (m *MockAPI) IsRequireUserActionReset(h *models.Host) bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateRole", ctx, h, role, db) - ret0, _ := ret[0].(*UpdateReply) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "IsRequireUserActionReset", h) + ret0, _ := ret[0].(bool) + return ret0 } -// UpdateRole indicates an expected call of UpdateRole. -func (mr *MockAPIMockRecorder) UpdateRole(ctx, h, role, db interface{}) *gomock.Call { +// IsRequireUserActionReset indicates an expected call of IsRequireUserActionReset +func (mr *MockAPIMockRecorder) IsRequireUserActionReset(h interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRole", reflect.TypeOf((*MockAPI)(nil).UpdateRole), ctx, h, role, db) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsRequireUserActionReset", reflect.TypeOf((*MockAPI)(nil).IsRequireUserActionReset), h) } -// RefreshStatus mocks base method. -func (m *MockAPI) RefreshStatus(ctx context.Context, h *models.Host) (*UpdateReply, error) { +// ResetHost mocks base method +func (m *MockAPI) ResetHost(ctx context.Context, h *models.Host, reason string, db *gorm.DB) *common.ApiErrorResponse { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RefreshStatus", ctx, h) - ret0, _ := ret[0].(*UpdateReply) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "ResetHost", ctx, h, reason, db) + ret0, _ := ret[0].(*common.ApiErrorResponse) + return ret0 } -// RefreshStatus indicates an expected call of RefreshStatus. -func (mr *MockAPIMockRecorder) RefreshStatus(ctx, h interface{}) *gomock.Call { +// ResetHost indicates an expected call of ResetHost +func (mr *MockAPIMockRecorder) ResetHost(ctx, h, reason, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RefreshStatus", reflect.TypeOf((*MockAPI)(nil).RefreshStatus), ctx, h) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetHost", reflect.TypeOf((*MockAPI)(nil).ResetHost), ctx, h, reason, db) } -// Install mocks base method. -func (m *MockAPI) Install(ctx context.Context, h *models.Host, db *gorm.DB) (*UpdateReply, error) { +// ResetPendingUserAction mocks base method +func (m *MockAPI) ResetPendingUserAction(ctx context.Context, h *models.Host, db *gorm.DB) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Install", ctx, h, db) - ret0, _ := ret[0].(*UpdateReply) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "ResetPendingUserAction", ctx, h, db) + ret0, _ := ret[0].(error) + return ret0 } -// Install indicates an expected call of Install. -func (mr *MockAPIMockRecorder) Install(ctx, h, db interface{}) *gomock.Call { +// ResetPendingUserAction indicates an expected call of ResetPendingUserAction +func (mr *MockAPIMockRecorder) ResetPendingUserAction(ctx, h, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Install", reflect.TypeOf((*MockAPI)(nil).Install), ctx, h, db) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetPendingUserAction", reflect.TypeOf((*MockAPI)(nil).ResetPendingUserAction), ctx, h, db) } -// EnableHost mocks base method. -func (m *MockAPI) EnableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { +// GetHostname mocks base method +func (m *MockAPI) GetHostname(h *models.Host) string { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EnableHost", ctx, h) - ret0, _ := ret[0].(*UpdateReply) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "GetHostname", h) + ret0, _ := ret[0].(string) + return ret0 } -// EnableHost indicates an expected call of EnableHost. -func (mr *MockAPIMockRecorder) EnableHost(ctx, h interface{}) *gomock.Call { +// GetHostname indicates an expected call of GetHostname +func (mr *MockAPIMockRecorder) GetHostname(h interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableHost", reflect.TypeOf((*MockAPI)(nil).EnableHost), ctx, h) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHostname", reflect.TypeOf((*MockAPI)(nil).GetHostname), h) } -// DisableHost mocks base method. -func (m *MockAPI) DisableHost(ctx context.Context, h *models.Host) (*UpdateReply, error) { +// DisableHost mocks base method +func (m *MockAPI) DisableHost(ctx context.Context, h *models.Host) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DisableHost", ctx, h) - ret0, _ := ret[0].(*UpdateReply) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret0, _ := ret[0].(error) + return ret0 } -// DisableHost indicates an expected call of DisableHost. +// DisableHost indicates an expected call of DisableHost func (mr *MockAPIMockRecorder) DisableHost(ctx, h interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableHost", reflect.TypeOf((*MockAPI)(nil).DisableHost), ctx, h) } -// GetNextSteps mocks base method. -func (m *MockAPI) GetNextSteps(ctx context.Context, host *models.Host) (models.Steps, error) { +// EnableHost mocks base method +func (m *MockAPI) EnableHost(ctx context.Context, h *models.Host) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNextSteps", ctx, host) - ret0, _ := ret[0].(models.Steps) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "EnableHost", ctx, h) + ret0, _ := ret[0].(error) + return ret0 } -// GetNextSteps indicates an expected call of GetNextSteps. -func (mr *MockAPIMockRecorder) GetNextSteps(ctx, host interface{}) *gomock.Call { +// EnableHost indicates an expected call of EnableHost +func (mr *MockAPIMockRecorder) EnableHost(ctx, h interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextSteps", reflect.TypeOf((*MockAPI)(nil).GetNextSteps), ctx, host) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableHost", reflect.TypeOf((*MockAPI)(nil).EnableHost), ctx, h) } -// GetHostValidDisks mocks base method. -func (m *MockAPI) GetHostValidDisks(h *models.Host) ([]*models.Disk, error) { +// Install mocks base method +func (m *MockAPI) Install(ctx context.Context, h *models.Host, db *gorm.DB) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHostValidDisks", h) - ret0, _ := ret[0].([]*models.Disk) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "Install", ctx, h, db) + ret0, _ := ret[0].(error) + return ret0 } -// GetHostValidDisks indicates an expected call of GetHostValidDisks. -func (mr *MockAPIMockRecorder) GetHostValidDisks(h interface{}) *gomock.Call { +// Install indicates an expected call of Install +func (mr *MockAPIMockRecorder) Install(ctx, h, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHostValidDisks", reflect.TypeOf((*MockAPI)(nil).GetHostValidDisks), h) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Install", reflect.TypeOf((*MockAPI)(nil).Install), ctx, h, db) } -// UpdateInstallProgress mocks base method. -func (m *MockAPI) UpdateInstallProgress(ctx context.Context, h *models.Host, progress string) error { +// UpdateInventory mocks base method +func (m *MockAPI) UpdateInventory(ctx context.Context, h *models.Host, inventory string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateInstallProgress", ctx, h, progress) + ret := m.ctrl.Call(m, "UpdateInventory", ctx, h, inventory) ret0, _ := ret[0].(error) return ret0 } -// UpdateInstallProgress indicates an expected call of UpdateInstallProgress. -func (mr *MockAPIMockRecorder) UpdateInstallProgress(ctx, h, progress interface{}) *gomock.Call { +// UpdateInventory indicates an expected call of UpdateInventory +func (mr *MockAPIMockRecorder) UpdateInventory(ctx, h, inventory interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateInstallProgress", reflect.TypeOf((*MockAPI)(nil).UpdateInstallProgress), ctx, h, progress) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateInventory", reflect.TypeOf((*MockAPI)(nil).UpdateInventory), ctx, h, inventory) } -// SetBootstrap mocks base method. -func (m *MockAPI) SetBootstrap(ctx context.Context, h *models.Host, isbootstrap bool) error { +// GetStagesByRole mocks base method +func (m *MockAPI) GetStagesByRole(role models.HostRole, isbootstrap bool) []models.HostStage { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetBootstrap", ctx, h, isbootstrap) - ret0, _ := ret[0].(error) + ret := m.ctrl.Call(m, "GetStagesByRole", role, isbootstrap) + ret0, _ := ret[0].([]models.HostStage) return ret0 } -// SetBootstrap indicates an expected call of SetBootstrap. -func (mr *MockAPIMockRecorder) SetBootstrap(ctx, h, isbootstrap interface{}) *gomock.Call { +// GetStagesByRole indicates an expected call of GetStagesByRole +func (mr *MockAPIMockRecorder) GetStagesByRole(role, isbootstrap interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBootstrap", reflect.TypeOf((*MockAPI)(nil).SetBootstrap), ctx, h, isbootstrap) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStagesByRole", reflect.TypeOf((*MockAPI)(nil).GetStagesByRole), role, isbootstrap) } -// UpdateConnectivityReport mocks base method. -func (m *MockAPI) UpdateConnectivityReport(ctx context.Context, h *models.Host, connectivityReport string) error { +// IsInstallable mocks base method +func (m *MockAPI) IsInstallable(h *models.Host) bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateConnectivityReport", ctx, h, connectivityReport) + ret := m.ctrl.Call(m, "IsInstallable", h) + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsInstallable indicates an expected call of IsInstallable +func (mr *MockAPIMockRecorder) IsInstallable(h interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsInstallable", reflect.TypeOf((*MockAPI)(nil).IsInstallable), h) +} + +// PrepareForInstallation mocks base method +func (m *MockAPI) PrepareForInstallation(ctx context.Context, h *models.Host, db *gorm.DB) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PrepareForInstallation", ctx, h, db) ret0, _ := ret[0].(error) return ret0 } -// UpdateConnectivityReport indicates an expected call of UpdateConnectivityReport. -func (mr *MockAPIMockRecorder) UpdateConnectivityReport(ctx, h, connectivityReport interface{}) *gomock.Call { +// PrepareForInstallation indicates an expected call of PrepareForInstallation +func (mr *MockAPIMockRecorder) PrepareForInstallation(ctx, h, db interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateConnectivityReport", reflect.TypeOf((*MockAPI)(nil).UpdateConnectivityReport), ctx, h, connectivityReport) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareForInstallation", reflect.TypeOf((*MockAPI)(nil).PrepareForInstallation), ctx, h, db) } diff --git a/internal/host/mock_instruction_api.go b/internal/host/mock_instruction_api.go index 580f61c2f..8b331e372 100644 --- a/internal/host/mock_instruction_api.go +++ b/internal/host/mock_instruction_api.go @@ -12,30 +12,30 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockInstructionApi is a mock of InstructionApi interface. +// MockInstructionApi is a mock of InstructionApi interface type MockInstructionApi struct { ctrl *gomock.Controller recorder *MockInstructionApiMockRecorder } -// MockInstructionApiMockRecorder is the mock recorder for MockInstructionApi. +// MockInstructionApiMockRecorder is the mock recorder for MockInstructionApi type MockInstructionApiMockRecorder struct { mock *MockInstructionApi } -// NewMockInstructionApi creates a new mock instance. +// NewMockInstructionApi creates a new mock instance func NewMockInstructionApi(ctrl *gomock.Controller) *MockInstructionApi { mock := &MockInstructionApi{ctrl: ctrl} mock.recorder = &MockInstructionApiMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use. +// EXPECT returns an object that allows the caller to indicate expected use func (m *MockInstructionApi) EXPECT() *MockInstructionApiMockRecorder { return m.recorder } -// GetNextSteps mocks base method. +// GetNextSteps mocks base method func (m *MockInstructionApi) GetNextSteps(ctx context.Context, host *models.Host) (models.Steps, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetNextSteps", ctx, host) @@ -44,7 +44,7 @@ func (m *MockInstructionApi) GetNextSteps(ctx context.Context, host *models.Host return ret0, ret1 } -// GetNextSteps indicates an expected call of GetNextSteps. +// GetNextSteps indicates an expected call of GetNextSteps func (mr *MockInstructionApiMockRecorder) GetNextSteps(ctx, host interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextSteps", reflect.TypeOf((*MockInstructionApi)(nil).GetNextSteps), ctx, host) diff --git a/internal/host/monitor.go b/internal/host/monitor.go new file mode 100644 index 000000000..110bcc76e --- /dev/null +++ b/internal/host/monitor.go @@ -0,0 +1,39 @@ +package host + +import ( + "context" + + "github.com/filanov/bm-inventory/models" + "github.com/filanov/bm-inventory/pkg/requestid" +) + +func (m *Manager) HostMonitoring() { + var ( + hosts []*models.Host + requestID = requestid.NewID() + ctx = requestid.ToContext(context.Background(), requestID) + log = requestid.RequestIDLogger(m.log, requestID) + ) + + monitorStates := []string{ + models.HostStatusDiscovering, + models.HostStatusKnown, + models.HostStatusDisconnected, + models.HostStatusInsufficient, + models.HostStatusPendingForInput, + models.HostStatusPreparingForInstallation, + models.HostStatusInstalling, + models.HostStatusInstallingInProgress, + models.HostStatusInstalled, + } + + if err := m.db.Where("status IN (?)", monitorStates).Find(&hosts).Error; err != nil { + log.WithError(err).Errorf("failed to get hosts") + return + } + for _, host := range hosts { + if err := m.RefreshStatus(ctx, host, m.db); err != nil { + log.WithError(err).Errorf("failed to refresh host %s state", *host.ID) + } + } +} diff --git a/internal/host/refresh_status_preprocessor.go b/internal/host/refresh_status_preprocessor.go new file mode 100644 index 000000000..2595c699d --- /dev/null +++ b/internal/host/refresh_status_preprocessor.go @@ -0,0 +1,115 @@ +package host + +import ( + "github.com/filanov/bm-inventory/internal/hardware" + "github.com/sirupsen/logrus" +) + +type validationResult struct { + ID validationID `json:"id"` + Status validationStatus `json:"status"` + Message string `json:"message"` +} + +type refreshPreprocessor struct { + log logrus.FieldLogger + validations []validation +} + +func newRefreshPreprocessor(log logrus.FieldLogger, hwValidatorCfg *hardware.ValidatorCfg) *refreshPreprocessor { + return &refreshPreprocessor{ + log: log, + validations: newValidations(log, hwValidatorCfg), + } +} + +func (r *refreshPreprocessor) preprocess(c *validationContext) (map[validationID]bool, map[string][]validationResult, error) { + stateMachineInput := make(map[validationID]bool) + validationsOutput := make(map[string][]validationResult) + for _, v := range r.validations { + st := v.condition(c) + stateMachineInput[v.id] = st == ValidationSuccess + message := v.formatter(c, st) + category, err := v.id.category() + if err != nil { + logrus.WithError(err).Warn("id.category()") + return nil, nil, err + } + validationsOutput[category] = append(validationsOutput[category], validationResult{ + ID: v.id, + Status: st, + Message: message, + }) + } + return stateMachineInput, validationsOutput, nil +} + +func newValidations(log logrus.FieldLogger, hwValidatorCfg *hardware.ValidatorCfg) []validation { + v := validator{ + log: log, + hwValidatorCfg: hwValidatorCfg, + } + ret := []validation{ + { + id: IsConnected, + condition: v.isConnected, + formatter: v.printConnected, + }, + { + id: HasInventory, + condition: v.hasInventory, + formatter: v.printHasInventory, + }, + { + id: HasMinCPUCores, + condition: v.hasMinCpuCores, + formatter: v.printHasMinCpuCores, + }, + { + id: HasMinMemory, + condition: v.hasMinMemory, + formatter: v.printHasMinMemory, + }, + { + id: HasMinValidDisks, + condition: v.hasMinValidDisks, + formatter: v.printHasMinValidDisks, + }, + { + id: IsMachineCidrDefined, + condition: v.isMachineCidrDefined, + formatter: v.printIsMachineCidrDefined, + }, + { + id: IsRoleDefined, + condition: v.isRoleDefined, + formatter: v.printIsRoleDefined, + }, + { + id: HasCPUCoresForRole, + condition: v.hasCpuCoresForRole, + formatter: v.printHasCpuCoresForRole, + }, + { + id: HasMemoryForRole, + condition: v.hasMemoryForRole, + formatter: v.printHasMemoryForRole, + }, + { + id: IsHostnameUnique, + condition: v.isHostnameUnique, + formatter: v.printHostnameUnique, + }, + { + id: BelongsToMachineCidr, + condition: v.belongsToMachineCidr, + formatter: v.printBelongsToMachineCidr, + }, + { + id: IsHostnameValid, + condition: v.isHostnameValid, + formatter: v.printHostnameValid, + }, + } + return ret +} diff --git a/internal/host/resetinstallationcmd.go b/internal/host/resetinstallationcmd.go new file mode 100644 index 000000000..756f81676 --- /dev/null +++ b/internal/host/resetinstallationcmd.go @@ -0,0 +1,42 @@ +package host + +import ( + "bytes" + "context" + "html/template" + + "github.com/sirupsen/logrus" + + "github.com/filanov/bm-inventory/models" +) + +type resetInstallationCmd struct { + baseCmd +} + +func NewResetInstallationCmd(log logrus.FieldLogger) *resetInstallationCmd { + return &resetInstallationCmd{ + baseCmd: baseCmd{log: log}, + } +} + +func (h *resetInstallationCmd) GetStep(ctx context.Context, host *models.Host) (*models.Step, error) { + var cmdStr string + if host.Bootstrap { + cmdStr += "systemctl stop bootkube.service; rm -rf /etc/kubernetes/manifests/* /etc/kubernetes/static-pod-resources/* /opt/openshift/*.done; " + } + cmdStr += "/usr/bin/podman rm --all -f; systemctl restart agent; " + t, err := template.New("cmd").Parse(cmdStr) + if err != nil { + return nil, err + } + buf := &bytes.Buffer{} + if err := t.Execute(buf, nil); err != nil { + return nil, err + } + step := &models.Step{} + step.StepType = models.StepTypeResetInstallation + step.Command = "bash" + step.Args = []string{"-c", buf.String()} + return step, nil +} diff --git a/internal/host/resetinstallationcmd_test.go b/internal/host/resetinstallationcmd_test.go new file mode 100644 index 000000000..34f9e8e4d --- /dev/null +++ b/internal/host/resetinstallationcmd_test.go @@ -0,0 +1,48 @@ +package host + +import ( + "context" + + "github.com/filanov/bm-inventory/internal/common" + + "github.com/filanov/bm-inventory/models" + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/jinzhu/gorm" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("reset", func() { + ctx := context.Background() + var host models.Host + var db *gorm.DB + var rstCmd *resetInstallationCmd + var id, clusterId strfmt.UUID + var stepReply *models.Step + var stepErr error + dbName := "reset_cmd" + + BeforeEach(func() { + db = common.PrepareTestDB(dbName) + rstCmd = NewResetInstallationCmd(getTestLog()) + + id = strfmt.UUID(uuid.New().String()) + clusterId = strfmt.UUID(uuid.New().String()) + host = getTestHost(id, clusterId, HostStatusResetting) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + }) + + It("get_step", func() { + stepReply, stepErr = rstCmd.GetStep(ctx, &host) + Expect(stepReply.StepType).To(Equal(models.StepTypeResetInstallation)) + Expect(stepErr).ShouldNot(HaveOccurred()) + }) + + AfterEach(func() { + // cleanup + common.DeleteTestDB(db, dbName) + stepReply = nil + stepErr = nil + }) +}) diff --git a/internal/host/statemachine.go b/internal/host/statemachine.go index d55324572..efb149f71 100644 --- a/internal/host/statemachine.go +++ b/internal/host/statemachine.go @@ -1,14 +1,27 @@ package host -import "github.com/filanov/stateswitch" +import ( + "github.com/filanov/bm-inventory/models" + "github.com/filanov/stateswitch" +) const ( - TransitionTypeRegisterHost = "RegisterHost" + TransitionTypeRegisterHost = "RegisterHost" + TransitionTypeHostInstallationFailed = "HostInstallationFailed" + TransitionTypeCancelInstallation = "CancelInstallation" + TransitionTypeResetHost = "ResetHost" + TransitionTypeInstallHost = "InstallHost" + TransitionTypeDisableHost = "DisableHost" + TransitionTypeEnableHost = "EnableHost" + TransitionTypeResettingPendingUserAction = "ResettingPendingUserAction" + TransitionTypePrepareForInstallation = "Prepare for installation" + TransitionTypeRefresh = "RefreshHost" ) func NewHostStateMachine(th *transitionHandler) stateswitch.StateMachine { sm := stateswitch.NewStateMachine() + // Register host sm.AddTransition(stateswitch.TransitionRule{ TransitionType: TransitionTypeRegisterHost, SourceStates: []stateswitch.State{ @@ -17,17 +30,278 @@ func NewHostStateMachine(th *transitionHandler) stateswitch.StateMachine { HostStatusKnown, HostStatusDisconnected, HostStatusInsufficient, + HostStatusResetting, + stateswitch.State(models.HostStatusResettingPendingUserAction), }, DestinationState: HostStatusDiscovering, PostTransition: th.PostRegisterHost, }) + // Register host after reboot sm.AddTransition(stateswitch.TransitionRule{ TransitionType: TransitionTypeRegisterHost, - SourceStates: []stateswitch.State{HostStatusInstalling, HostStatusInstallingInProgress}, + Condition: th.IsHostInReboot, + SourceStates: []stateswitch.State{HostStatusInstallingInProgress}, + DestinationState: HostStatusInstallingPendingUserAction, + PostTransition: th.PostRegisterDuringReboot, + }) + + // Register host during installation + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeRegisterHost, + SourceStates: []stateswitch.State{ + stateswitch.State(models.HostStatusPreparingForInstallation), + stateswitch.State(models.HostStatusInstalling), + stateswitch.State(models.HostStatusInstallingInProgress), + }, DestinationState: HostStatusError, PostTransition: th.PostRegisterDuringInstallation, }) + // Installation failure + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeHostInstallationFailed, + SourceStates: []stateswitch.State{HostStatusInstalling, HostStatusInstallingInProgress}, + DestinationState: HostStatusError, + PostTransition: th.PostHostInstallationFailed, + }) + + // Cancel installation - disabled host (do nothing) + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeCancelInstallation, + SourceStates: []stateswitch.State{ + stateswitch.State(models.HostStatusDisabled), + }, + DestinationState: stateswitch.State(models.HostStatusDisabled), + }) + + // Cancel installation + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeCancelInstallation, + SourceStates: []stateswitch.State{ + stateswitch.State(models.HostStatusPreparingForInstallation), + stateswitch.State(models.HostStatusInstalling), + stateswitch.State(models.HostStatusInstallingInProgress), + stateswitch.State(models.HostStatusInstalled), + stateswitch.State(models.HostStatusError), + }, + DestinationState: HostStatusError, + PostTransition: th.PostCancelInstallation, + }) + + // Reset disabled host (do nothing) + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeResetHost, + SourceStates: []stateswitch.State{ + stateswitch.State(models.HostStatusDisabled), + }, + DestinationState: stateswitch.State(models.HostStatusDisabled), + }) + + // Reset host + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeResetHost, + SourceStates: []stateswitch.State{ + stateswitch.State(models.HostStatusInstalling), + stateswitch.State(models.HostStatusPreparingForInstallation), + stateswitch.State(models.HostStatusInstallingInProgress), + stateswitch.State(models.HostStatusInstalled), + stateswitch.State(models.HostStatusError), + }, + DestinationState: stateswitch.State(models.HostStatusResetting), + PostTransition: th.PostResetHost, + }) + + // Install host + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeInstallHost, + SourceStates: []stateswitch.State{stateswitch.State(models.HostStatusPreparingForInstallation)}, + DestinationState: stateswitch.State(models.HostStatusInstalling), + PostTransition: th.PostInstallHost, + }) + + // Install disabled host will not do anything + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeInstallHost, + SourceStates: []stateswitch.State{HostStatusDisabled}, + DestinationState: HostStatusDisabled, + }) + + // Disable host + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeDisableHost, + SourceStates: []stateswitch.State{ + stateswitch.State(models.HostStatusDisconnected), + stateswitch.State(models.HostStatusDiscovering), + stateswitch.State(models.HostStatusInsufficient), + stateswitch.State(models.HostStatusKnown), + stateswitch.State(models.HostStatusPendingForInput), + }, + DestinationState: HostStatusDisabled, + PostTransition: th.PostDisableHost, + }) + + // Enable host + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeEnableHost, + SourceStates: []stateswitch.State{ + HostStatusDisabled, + }, + DestinationState: HostStatusDiscovering, + PostTransition: th.PostEnableHost, + }) + + // Resetting pending user action + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeResettingPendingUserAction, + SourceStates: []stateswitch.State{ + HostStatusResetting, + }, + DestinationState: stateswitch.State(models.HostStatusResettingPendingUserAction), + PostTransition: th.PostResettingPendingUserAction, + }) + + // Prepare for installation + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypePrepareForInstallation, + Condition: th.IsValidRoleForInstallation, + SourceStates: []stateswitch.State{stateswitch.State(models.HostStatusKnown)}, + DestinationState: stateswitch.State(models.HostStatusPreparingForInstallation), + PostTransition: th.PostPrepareForInstallation, + }) + + // Refresh host + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeRefresh, + SourceStates: []stateswitch.State{HostStatusDiscovering, HostStatusInsufficient, HostStatusKnown, + HostStatusPendingForInput, HostStatusDisconnected}, + Condition: stateswitch.Not(If(IsConnected)), + DestinationState: HostStatusDisconnected, + PostTransition: th.PostRefreshHost(statusInfoDisconnected), + }) + + // Abort host if cluster has errors + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeRefresh, + SourceStates: []stateswitch.State{ + stateswitch.State(models.HostStatusInstalling), + stateswitch.State(models.HostStatusInstallingInProgress), + stateswitch.State(models.HostStatusInstalled), + }, + Condition: th.HasClusterError, + DestinationState: stateswitch.State(models.HostStatusError), + PostTransition: th.PostRefreshHost(statusInfoAbortingDueClusterErrors), + }) + + // Noop transitions for cluster error + for _, state := range []stateswitch.State{ + stateswitch.State(models.HostStatusInstalling), + stateswitch.State(models.HostStatusInstallingInProgress), + stateswitch.State(models.HostStatusInstalled), + } { + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeRefresh, + SourceStates: []stateswitch.State{state}, + Condition: stateswitch.Not(th.HasClusterError), + DestinationState: state, + }) + } + + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeRefresh, + SourceStates: []stateswitch.State{HostStatusDisconnected, HostStatusDiscovering}, + Condition: stateswitch.And(If(IsConnected), stateswitch.Not(If(HasInventory))), + DestinationState: HostStatusDiscovering, + PostTransition: th.PostRefreshHost(statusInfoDiscovering), + }) + + var hasMinRequiredHardware = stateswitch.And(If(HasMinValidDisks), If(HasMinCPUCores), If(HasMinMemory)) + + var requiredInputFieldsExist = stateswitch.And(If(IsMachineCidrDefined), If(IsRoleDefined)) + + var isSufficientForInstall = stateswitch.And(If(HasMemoryForRole), If(HasCPUCoresForRole), If(BelongsToMachineCidr), + If(IsHostnameUnique), If(IsHostnameValid)) + + // In order for this transition to be fired at least one of the validations in minRequiredHardwareValidations must fail. + // This transition handles the case that a host does not pass minimum hardware requirements for any of the roles + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeRefresh, + SourceStates: []stateswitch.State{HostStatusDisconnected, HostStatusDiscovering, HostStatusInsufficient}, + Condition: stateswitch.And(If(IsConnected), If(HasInventory), + stateswitch.Not(hasMinRequiredHardware)), + DestinationState: HostStatusInsufficient, + PostTransition: th.PostRefreshHost(statusInfoInsufficientHardware), + }) + + // In order for this transition to be fired at least one of the validations in sufficientInputValidations must fail. + // This transition handles the case that there is missing input that has to be provided from a user or other external means + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeRefresh, + SourceStates: []stateswitch.State{HostStatusDisconnected, HostStatusDiscovering, + HostStatusInsufficient, HostStatusKnown, HostStatusPendingForInput}, + Condition: stateswitch.And(If(IsConnected), If(HasInventory), + hasMinRequiredHardware, + stateswitch.Not(requiredInputFieldsExist)), + DestinationState: HostStatusPendingForInput, + PostTransition: th.PostRefreshHost(statusInfoPendingForInput), + }) + + // In order for this transition to be fired at least one of the validations in sufficientForInstallValidations must fail. + // This transition handles the case that one of the required validations that are required in order for the host + // to be in known state (ready for installation) has failed + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeRefresh, + SourceStates: []stateswitch.State{HostStatusDisconnected, HostStatusInsufficient, HostStatusPendingForInput, + HostStatusDiscovering, HostStatusKnown}, + Condition: stateswitch.And(If(IsConnected), If(HasInventory), + hasMinRequiredHardware, + requiredInputFieldsExist, + stateswitch.Not(isSufficientForInstall)), + DestinationState: HostStatusInsufficient, + PostTransition: th.PostRefreshHost(statusInfoNotReadyForInstall), + }) + + // This transition is fired when all validations pass + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeRefresh, + SourceStates: []stateswitch.State{HostStatusDisconnected, HostStatusInsufficient, HostStatusPendingForInput, + HostStatusDiscovering, HostStatusKnown}, + Condition: stateswitch.And(If(IsConnected), If(HasInventory), + hasMinRequiredHardware, + requiredInputFieldsExist, + isSufficientForInstall), + DestinationState: HostStatusKnown, + PostTransition: th.PostRefreshHost(""), + }) + + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeRefresh, + SourceStates: []stateswitch.State{stateswitch.State(models.HostStatusPreparingForInstallation)}, + Condition: th.IsPreparingTimedOut, + DestinationState: HostStatusError, + PostTransition: th.PostRefreshHost(statusInfoPreparingTimedOut), + }) + + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeRefresh, + SourceStates: []stateswitch.State{stateswitch.State(models.HostStatusPreparingForInstallation)}, + Condition: stateswitch.Not(th.IsPreparingTimedOut), + DestinationState: stateswitch.State(models.HostStatusPreparingForInstallation), + }) + + // Noop transitions + for _, state := range []stateswitch.State{ + stateswitch.State(models.HostStatusDisabled), + stateswitch.State(models.HostStatusError), + stateswitch.State(models.HostStatusResetting), + stateswitch.State(models.HostStatusInstallingPendingUserAction), + stateswitch.State(models.HostStatusResettingPendingUserAction)} { + sm.AddTransition(stateswitch.TransitionRule{ + TransitionType: TransitionTypeRefresh, + SourceStates: []stateswitch.State{state}, + DestinationState: state, + }) + } + return sm } diff --git a/internal/host/stopinstallation.go b/internal/host/stopinstallation.go new file mode 100644 index 000000000..fcf6f09d2 --- /dev/null +++ b/internal/host/stopinstallation.go @@ -0,0 +1,30 @@ +package host + +import ( + "context" + + "github.com/sirupsen/logrus" + + "github.com/filanov/bm-inventory/models" +) + +type stopInstallationCmd struct { + baseCmd +} + +func NewStopInstallationCmd(log logrus.FieldLogger) *stopInstallationCmd { + return &stopInstallationCmd{ + baseCmd: baseCmd{log: log}, + } +} + +func (h *stopInstallationCmd) GetStep(ctx context.Context, host *models.Host) (*models.Step, error) { + step := &models.Step{ + StepType: models.StepTypeExecute, + Command: "/usr/bin/podman", + Args: []string{ + "kill", "--all", + }, + } + return step, nil +} diff --git a/internal/host/hwinfocmd_test.go b/internal/host/stopinstallation_test.go similarity index 61% rename from internal/host/hwinfocmd_test.go rename to internal/host/stopinstallation_test.go index d39b1c50d..8dd99d847 100644 --- a/internal/host/hwinfocmd_test.go +++ b/internal/host/stopinstallation_test.go @@ -3,6 +3,8 @@ package host import ( "context" + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" "github.com/google/uuid" @@ -11,34 +13,35 @@ import ( . "github.com/onsi/gomega" ) -var _ = Describe("hwinfocmd", func() { +var _ = Describe("stop-podman", func() { ctx := context.Background() var host models.Host var db *gorm.DB - var hwCmd *hwInfoCmd + var stopCmd *stopInstallationCmd var id, clusterId strfmt.UUID var stepReply *models.Step var stepErr error + dbName := "stop_podman" BeforeEach(func() { - db = prepareDB() - hwCmd = NewHwInfoCmd(getTestLog()) + db = common.PrepareTestDB(dbName) + stopCmd = NewStopInstallationCmd(getTestLog()) id = strfmt.UUID(uuid.New().String()) clusterId = strfmt.UUID(uuid.New().String()) - host = getTestHost(id, clusterId, HostStatusDiscovering) + host = getTestHost(id, clusterId, HostStatusError) Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) }) It("get_step", func() { - stepReply, stepErr = hwCmd.GetStep(ctx, &host) - Expect(stepReply.StepType).To(Equal(models.StepTypeHardwareInfo)) + stepReply, stepErr = stopCmd.GetStep(ctx, &host) + Expect(stepReply.StepType).To(Equal(models.StepTypeExecute)) Expect(stepErr).ShouldNot(HaveOccurred()) }) AfterEach(func() { // cleanup - db.Close() + common.DeleteTestDB(db, dbName) stepReply = nil stepErr = nil }) diff --git a/internal/host/transition.go b/internal/host/transition.go index 1330973d4..97bd07a3d 100644 --- a/internal/host/transition.go +++ b/internal/host/transition.go @@ -2,22 +2,29 @@ package host import ( "context" + "encoding/json" + "net/http" + "time" + "github.com/filanov/bm-inventory/internal/events" + + "github.com/filanov/bm-inventory/internal/common" + "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "github.com/filanov/bm-inventory/models" logutil "github.com/filanov/bm-inventory/pkg/log" - - "github.com/sirupsen/logrus" - "github.com/filanov/stateswitch" "github.com/jinzhu/gorm" "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/thoas/go-funk" ) type transitionHandler struct { - db *gorm.DB - log logrus.FieldLogger + db *gorm.DB + log logrus.FieldLogger + eventsHandler events.Handler } //////////////////////////////////////////////////////////////////////////// @@ -25,7 +32,8 @@ type transitionHandler struct { //////////////////////////////////////////////////////////////////////////// type TransitionArgsRegisterHost struct { - ctx context.Context + ctx context.Context + discoveryAgentVersion string } func (th *transitionHandler) PostRegisterHost(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { @@ -41,14 +49,22 @@ func (th *transitionHandler) PostRegisterHost(sw stateswitch.StateSwitch, args s host := models.Host{} log := logutil.FromContext(params.ctx, th.log) - // if already exists, reset role and hw info + // If host already exists if err := th.db.First(&host, "id = ? and cluster_id = ?", sHost.host.ID, sHost.host.ClusterID).Error; err == nil { - currentState := swag.StringValue(host.Status) - host.Status = sHost.host.Status - return updateHostStateWithParams(log, currentState, statusInfoDiscovering, &host, th.db, - "hardware_info", "", "role", "") + // The reason for the double register is unknown (HW might have changed) - + // so we reset the hw info and progress, and start the discovery process again. + if host, err := updateHostProgress(params.ctx, log, th.db, th.eventsHandler, sHost.host.ClusterID, *sHost.host.ID, sHost.srcState, + swag.StringValue(sHost.host.Status), statusInfoDiscovering, sHost.host.Progress.CurrentStage, "", "", + "inventory", "", "discovery_agent_version", params.discoveryAgentVersion, "bootstrap", false); err != nil { + return err + } else { + sHost.host = host + return nil + } } + sHost.host.StatusUpdatedAt = strfmt.DateTime(time.Now()) + sHost.host.StatusInfo = swag.String(statusInfoDiscovering) log.Infof("Register new host %s cluster %s", sHost.host.ID.String(), sHost.host.ClusterID) return th.db.Create(sHost.host).Error } @@ -62,6 +78,322 @@ func (th *transitionHandler) PostRegisterDuringInstallation(sw stateswitch.State if !ok { return errors.New("PostRegisterDuringInstallation invalid argument") } - return updateHostStateWithParams(logutil.FromContext(params.ctx, th.log), sHost.srcState, - "Tried to register during installation", sHost.host, th.db) + + return th.updateTransitionHost(params.ctx, logutil.FromContext(params.ctx, th.log), th.db, sHost, + "The host unexpectedly restarted during the installation") +} + +func (th *transitionHandler) IsHostInReboot(sw stateswitch.StateSwitch, _ stateswitch.TransitionArgs) (bool, error) { + sHost, ok := sw.(*stateHost) + if !ok { + return false, errors.New("IsInReboot incompatible type of StateSwitch") + } + + return sHost.host.Progress.CurrentStage == models.HostStageRebooting, nil +} + +func (th *transitionHandler) PostRegisterDuringReboot(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sHost, ok := sw.(*stateHost) + if !ok { + return errors.New("RegisterNewHost incompatible type of StateSwitch") + } + params, ok := args.(*TransitionArgsRegisterHost) + if !ok { + return errors.New("PostRegisterDuringReboot invalid argument") + } + + return th.updateTransitionHost(params.ctx, logutil.FromContext(params.ctx, th.log), th.db, sHost, + "Expected the host to boot from disk, but it booted the installation image - please reboot and fix boot order to boot from disk") +} + +//////////////////////////////////////////////////////////////////////////// +// Installation failure +//////////////////////////////////////////////////////////////////////////// + +type TransitionArgsHostInstallationFailed struct { + ctx context.Context + reason string +} + +func (th *transitionHandler) PostHostInstallationFailed(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sHost, ok := sw.(*stateHost) + if !ok { + return errors.New("HostInstallationFailed incompatible type of StateSwitch") + } + params, ok := args.(*TransitionArgsHostInstallationFailed) + if !ok { + return errors.New("HostInstallationFailed invalid argument") + } + + return th.updateTransitionHost(params.ctx, logutil.FromContext(params.ctx, th.log), th.db, sHost, + params.reason) +} + +//////////////////////////////////////////////////////////////////////////// +// Cancel Installation +//////////////////////////////////////////////////////////////////////////// + +type TransitionArgsCancelInstallation struct { + ctx context.Context + reason string + db *gorm.DB +} + +func (th *transitionHandler) PostCancelInstallation(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sHost, ok := sw.(*stateHost) + if !ok { + return errors.New("PostCancelInstallation incompatible type of StateSwitch") + } + params, ok := args.(*TransitionArgsCancelInstallation) + if !ok { + return errors.New("PostCancelInstallation invalid argument") + } + if sHost.srcState == HostStatusError { + return nil + } + + return th.updateTransitionHost(params.ctx, logutil.FromContext(params.ctx, th.log), params.db, sHost, + params.reason) +} + +//////////////////////////////////////////////////////////////////////////// +// Reset Host +//////////////////////////////////////////////////////////////////////////// + +type TransitionArgsResetHost struct { + ctx context.Context + reason string + db *gorm.DB +} + +func (th *transitionHandler) PostResetHost(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sHost, ok := sw.(*stateHost) + if !ok { + return errors.New("PostResetHost incompatible type of StateSwitch") + } + params, ok := args.(*TransitionArgsResetHost) + if !ok { + return errors.New("PostResetHost invalid argument") + } + + return th.updateTransitionHost(params.ctx, logutil.FromContext(params.ctx, th.log), params.db, sHost, + params.reason) +} + +//////////////////////////////////////////////////////////////////////////// +// Install host +//////////////////////////////////////////////////////////////////////////// + +type TransitionArgsInstallHost struct { + ctx context.Context + db *gorm.DB +} + +func (th *transitionHandler) PostInstallHost(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sHost, ok := sw.(*stateHost) + if !ok { + return errors.New("PostInstallHost incompatible type of StateSwitch") + } + params, ok := args.(*TransitionArgsInstallHost) + if !ok { + return errors.New("PostInstallHost invalid argument") + } + return th.updateTransitionHost(params.ctx, logutil.FromContext(params.ctx, th.log), params.db, sHost, + statusInfoInstalling) +} + +//////////////////////////////////////////////////////////////////////////// +// Disable host +//////////////////////////////////////////////////////////////////////////// + +type TransitionArgsDisableHost struct { + ctx context.Context +} + +func (th *transitionHandler) PostDisableHost(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sHost, ok := sw.(*stateHost) + if !ok { + return errors.New("PostDisableHost incompatible type of StateSwitch") + } + params, ok := args.(*TransitionArgsDisableHost) + if !ok { + return errors.New("PostDisableHost invalid argument") + } + + return th.updateTransitionHost(params.ctx, logutil.FromContext(params.ctx, th.log), th.db, sHost, + statusInfoDisabled) +} + +//////////////////////////////////////////////////////////////////////////// +// Enable host +//////////////////////////////////////////////////////////////////////////// + +type TransitionArgsEnableHost struct { + ctx context.Context +} + +func (th *transitionHandler) PostEnableHost(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sHost, ok := sw.(*stateHost) + if !ok { + return errors.New("PostEnableHost incompatible type of StateSwitch") + } + params, ok := args.(*TransitionArgsEnableHost) + if !ok { + return errors.New("PostEnableHost invalid argument") + } + + return th.updateTransitionHost(params.ctx, logutil.FromContext(params.ctx, th.log), th.db, sHost, + statusInfoDiscovering, "inventory", "") +} + +//////////////////////////////////////////////////////////////////////////// +// Resetting pending user action +//////////////////////////////////////////////////////////////////////////// + +type TransitionResettingPendingUserAction struct { + ctx context.Context + db *gorm.DB +} + +func (th *transitionHandler) IsValidRoleForInstallation(sw stateswitch.StateSwitch, _ stateswitch.TransitionArgs) (bool, error) { + sHost, ok := sw.(*stateHost) + if !ok { + return false, errors.New("IsValidRoleForInstallation incompatible type of StateSwitch") + } + validRoles := []string{string(models.HostRoleMaster), string(models.HostRoleWorker)} + if !funk.ContainsString(validRoles, string(sHost.host.Role)) { + return false, common.NewApiError(http.StatusConflict, + errors.Errorf("Can't install host %s due to invalid host role: %s, should be one of %s", + sHost.host.ID.String(), sHost.host.Role, validRoles)) + } + return true, nil +} + +func (th *transitionHandler) PostResettingPendingUserAction(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sHost, ok := sw.(*stateHost) + if !ok { + return errors.New("ResettingPendingUserAction incompatible type of StateSwitch") + } + params, ok := args.(*TransitionResettingPendingUserAction) + if !ok { + return errors.New("ResettingPendingUserAction invalid argument") + } + + return th.updateTransitionHost(params.ctx, logutil.FromContext(params.ctx, th.log), params.db, sHost, + statusInfoResettingPendingUserAction) +} + +//////////////////////////////////////////////////////////////////////////// +// Resetting pending user action +//////////////////////////////////////////////////////////////////////////// + +type TransitionArgsPrepareForInstallation struct { + ctx context.Context + db *gorm.DB +} + +func (th *transitionHandler) PostPrepareForInstallation(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sHost, _ := sw.(*stateHost) + params, _ := args.(*TransitionArgsPrepareForInstallation) + return th.updateTransitionHost(params.ctx, logutil.FromContext(params.ctx, th.log), params.db, sHost, + statusInfoPreparingForInstallation) +} + +func (th *transitionHandler) updateTransitionHost(ctx context.Context, log logrus.FieldLogger, db *gorm.DB, state *stateHost, + statusInfo string, extra ...interface{}) error { + + if host, err := updateHostStatus(ctx, log, db, th.eventsHandler, state.host.ClusterID, *state.host.ID, state.srcState, + swag.StringValue(state.host.Status), statusInfo, extra...); err != nil { + return err + } else { + state.host = host + return nil + } +} + +//////////////////////////////////////////////////////////////////////////// +// Refresh Host +//////////////////////////////////////////////////////////////////////////// + +type TransitionArgsRefreshHost struct { + ctx context.Context + eventHandler events.Handler + conditions map[validationID]bool + validationResults map[string][]validationResult + db *gorm.DB +} + +func If(id validationID) stateswitch.Condition { + ret := func(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) (bool, error) { + params, ok := args.(*TransitionArgsRefreshHost) + if !ok { + return false, errors.Errorf("If(%s) invalid argument", id.String()) + } + b, ok := params.conditions[id] + if !ok { + return false, errors.Errorf("If(%s) no such condition", id.String()) + } + return b, nil + } + return ret +} + +func (th *transitionHandler) IsPreparingTimedOut(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) (bool, error) { + sHost, ok := sw.(*stateHost) + if !ok { + return false, errors.New("IsPreparingTimedOut incompatible type of StateSwitch") + } + params, ok := args.(*TransitionArgsRefreshHost) + if !ok { + return false, errors.New("IsPreparingTimedOut invalid argument") + } + var cluster common.Cluster + err := params.db.Select("status").Take(&cluster, "id = ?", sHost.host.ClusterID.String()).Error + if err != nil { + return false, err + } + return swag.StringValue(cluster.Status) != models.ClusterStatusPreparingForInstallation, nil +} + +func (th *transitionHandler) HasClusterError(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) (bool, error) { + sHost, ok := sw.(*stateHost) + if !ok { + return false, errors.New("HasClusterError incompatible type of StateSwitch") + } + params, ok := args.(*TransitionArgsRefreshHost) + if !ok { + return false, errors.New("HasClusterError invalid argument") + } + var cluster common.Cluster + err := params.db.Select("status").Take(&cluster, "id = ?", sHost.host.ClusterID.String()).Error + if err != nil { + return false, err + } + return swag.StringValue(cluster.Status) == models.ClusterStatusError, nil +} + +// Return a post transition function with a constant reason +func (th *transitionHandler) PostRefreshHost(reason string) stateswitch.PostTransition { + ret := func(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error { + sHost, ok := sw.(*stateHost) + if !ok { + return errors.New("PostResetHost incompatible type of StateSwitch") + } + params, ok := args.(*TransitionArgsRefreshHost) + if !ok { + return errors.New("PostRefreshHost invalid argument") + } + var ( + b []byte + err error + ) + b, err = json.Marshal(¶ms.validationResults) + if err != nil { + return err + } + _, err = updateHostStatus(params.ctx, logutil.FromContext(params.ctx, th.log), params.db, th.eventsHandler, sHost.host.ClusterID, *sHost.host.ID, + sHost.srcState, swag.StringValue(sHost.host.Status), reason, "validations_info", string(b)) + return err + } + return ret } diff --git a/internal/host/transition_test.go b/internal/host/transition_test.go index 84c2a38e0..7183778d3 100644 --- a/internal/host/transition_test.go +++ b/internal/host/transition_test.go @@ -2,38 +2,63 @@ package host import ( "context" + "encoding/json" + "fmt" + "net/http" + "time" - "github.com/go-openapi/swag" - - . "github.com/onsi/gomega" - + "github.com/filanov/bm-inventory/internal/common" + "github.com/filanov/bm-inventory/internal/events" + "github.com/filanov/bm-inventory/internal/hardware" + "github.com/filanov/bm-inventory/internal/metrics" "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/golang/mock/gomock" "github.com/google/uuid" "github.com/jinzhu/gorm" . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" ) +func createValidatorCfg() *hardware.ValidatorCfg { + return &hardware.ValidatorCfg{ + MinCPUCores: 2, + MinCPUCoresWorker: 2, + MinCPUCoresMaster: 4, + MinDiskSizeGb: 120, + MinRamGib: 8, + MinRamGibWorker: 8, + MinRamGibMaster: 16, + } +} + var _ = Describe("RegisterHost", func() { var ( ctx = context.Background() hapi API db *gorm.DB + ctrl *gomock.Controller + mockEvents *events.MockHandler hostId, clusterId strfmt.UUID + dbName = "register_host" ) BeforeEach(func() { - db = prepareDB() - hapi = NewManager(getTestLog(), db, nil, nil) + ctrl = gomock.NewController(GinkgoT()) + db = common.PrepareTestDB(dbName, &events.Event{}) + mockEvents = events.NewMockHandler(ctrl) + hapi = NewManager(getTestLog(), db, mockEvents, nil, nil, createValidatorCfg(), nil) hostId = strfmt.UUID(uuid.New().String()) clusterId = strfmt.UUID(uuid.New().String()) }) It("register_new", func() { - Expect(hapi.RegisterHost(ctx, &models.Host{ID: &hostId, ClusterID: clusterId})).ShouldNot(HaveOccurred()) + Expect(hapi.RegisterHost(ctx, &models.Host{ID: &hostId, ClusterID: clusterId, DiscoveryAgentVersion: "v1.0.1"})).ShouldNot(HaveOccurred()) h := getHost(hostId, clusterId, db) Expect(swag.StringValue(h.Status)).Should(Equal(HostStatusDiscovering)) + Expect(h.DiscoveryAgentVersion).To(Equal("v1.0.1")) }) Context("register during installation put host in error", func() { @@ -54,8 +79,9 @@ var _ = Describe("RegisterHost", func() { AfterEach(func() { h := getHost(hostId, clusterId, db) Expect(swag.StringValue(h.Status)).Should(Equal(HostStatusError)) - Expect(h.Role).Should(Equal(RoleMaster)) - Expect(h.HardwareInfo).Should(Equal(defaultHwInfo)) + Expect(h.Role).Should(Equal(models.HostRoleMaster)) + Expect(h.Inventory).Should(Equal(defaultHwInfo)) + Expect(h.StatusInfo).NotTo(BeNil()) }) for i := range tests { @@ -63,12 +89,15 @@ var _ = Describe("RegisterHost", func() { It(t.name, func() { Expect(db.Create(&models.Host{ - ID: &hostId, - ClusterID: clusterId, - Role: RoleMaster, - HardwareInfo: defaultHwInfo, - Status: swag.String(t.srcState), + ID: &hostId, + ClusterID: clusterId, + Role: models.HostRoleMaster, + Inventory: defaultHwInfo, + Status: swag.String(t.srcState), }).Error).ShouldNot(HaveOccurred()) + mockEvents.EXPECT().AddEvent(gomock.Any(), hostId.String(), models.EventSeverityError, + fmt.Sprintf("Host %s: updated status from \"%s\" to \"error\" (The host unexpectedly restarted during the installation)", hostId.String(), t.srcState), + gomock.Any(), clusterId.String()) Expect(hapi.RegisterHost(ctx, &models.Host{ ID: &hostId, @@ -80,6 +109,7 @@ var _ = Describe("RegisterHost", func() { }) Context("host already exists register success", func() { + discoveryAgentVersion := "v2.0.5" tests := []struct { name string srcState string @@ -105,8 +135,9 @@ var _ = Describe("RegisterHost", func() { AfterEach(func() { h := getHost(hostId, clusterId, db) Expect(swag.StringValue(h.Status)).Should(Equal(HostStatusDiscovering)) - Expect(h.Role).Should(Equal("")) - Expect(h.HardwareInfo).Should(Equal("")) + Expect(h.Role).Should(Equal(models.HostRoleMaster)) + Expect(h.Inventory).Should(Equal("")) + Expect(h.DiscoveryAgentVersion).To(Equal(discoveryAgentVersion)) }) for i := range tests { @@ -114,17 +145,21 @@ var _ = Describe("RegisterHost", func() { It(t.name, func() { Expect(db.Create(&models.Host{ - ID: &hostId, - ClusterID: clusterId, - Role: RoleMaster, - HardwareInfo: defaultHwInfo, - Status: swag.String(t.srcState), - }).Error).ShouldNot(HaveOccurred()) - - Expect(hapi.RegisterHost(ctx, &models.Host{ ID: &hostId, ClusterID: clusterId, + Role: models.HostRoleMaster, + Inventory: defaultHwInfo, Status: swag.String(t.srcState), + }).Error).ShouldNot(HaveOccurred()) + mockEvents.EXPECT().AddEvent(gomock.Any(), hostId.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"%s\" to \"discovering\" (Waiting for host hardware info)", hostId.String(), t.srcState), + gomock.Any(), clusterId.String()) + + Expect(hapi.RegisterHost(ctx, &models.Host{ + ID: &hostId, + ClusterID: clusterId, + Status: swag.String(t.srcState), + DiscoveryAgentVersion: discoveryAgentVersion, })).ShouldNot(HaveOccurred()) }) } @@ -155,11 +190,11 @@ var _ = Describe("RegisterHost", func() { It(t.name, func() { Expect(db.Create(&models.Host{ - ID: &hostId, - ClusterID: clusterId, - Role: RoleMaster, - HardwareInfo: defaultHwInfo, - Status: swag.String(t.srcState), + ID: &hostId, + ClusterID: clusterId, + Role: models.HostRoleMaster, + Inventory: defaultHwInfo, + Status: swag.String(t.srcState), }).Error).ShouldNot(HaveOccurred()) Expect(hapi.RegisterHost(ctx, &models.Host{ @@ -170,14 +205,1869 @@ var _ = Describe("RegisterHost", func() { h := getHost(hostId, clusterId, db) Expect(swag.StringValue(h.Status)).Should(Equal(t.srcState)) - Expect(h.Role).Should(Equal(RoleMaster)) - Expect(h.HardwareInfo).Should(Equal(defaultHwInfo)) + Expect(h.Role).Should(Equal(models.HostRoleMaster)) + Expect(h.Inventory).Should(Equal(defaultHwInfo)) + }) + } + }) + + Context("register after reboot", func() { + tests := []struct { + name string + srcState string + progress models.HostProgressInfo + }{ + { + name: "host in reboot", + srcState: HostStatusInstallingInProgress, + progress: models.HostProgressInfo{ + CurrentStage: models.HostStageRebooting, + }, + }, + } + + AfterEach(func() { + h := getHost(hostId, clusterId, db) + Expect(swag.StringValue(h.Status)).Should(Equal(models.HostStatusInstallingPendingUserAction)) + Expect(h.Role).Should(Equal(models.HostRoleMaster)) + Expect(h.Inventory).Should(Equal(defaultHwInfo)) + Expect(h.StatusInfo).NotTo(BeNil()) + }) + + for i := range tests { + t := tests[i] + + It(t.name, func() { + Expect(db.Create(&models.Host{ + ID: &hostId, + ClusterID: clusterId, + Role: models.HostRoleMaster, + Inventory: defaultHwInfo, + Status: swag.String(t.srcState), + Progress: &t.progress, + }).Error).ShouldNot(HaveOccurred()) + mockEvents.EXPECT().AddEvent(gomock.Any(), hostId.String(), models.EventSeverityWarning, + fmt.Sprintf("Host %s: updated status from \"installing-in-progress\" to \"installing-pending-user-action\" "+ + "(Expected the host to boot from disk, but it booted the installation image - please reboot and fix boot order "+ + "to boot from disk)", hostId.String()), + gomock.Any(), clusterId.String()) + + Expect(hapi.RegisterHost(ctx, &models.Host{ + ID: &hostId, + ClusterID: clusterId, + Status: swag.String(t.srcState), + })).ShouldNot(HaveOccurred()) + }) + } + }) + + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) +}) + +var _ = Describe("HostInstallationFailed", func() { + var ( + ctx = context.Background() + hapi API + db *gorm.DB + hostId, clusterId strfmt.UUID + host models.Host + ctrl *gomock.Controller + mockMetric *metrics.MockAPI + mockEvents *events.MockHandler + dbName = "host_installation_failed" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + ctrl = gomock.NewController(GinkgoT()) + mockMetric = metrics.NewMockAPI(ctrl) + mockEvents = events.NewMockHandler(ctrl) + hapi = NewManager(getTestLog(), db, mockEvents, nil, nil, createValidatorCfg(), mockMetric) + hostId = strfmt.UUID(uuid.New().String()) + clusterId = strfmt.UUID(uuid.New().String()) + host = getTestHost(hostId, clusterId, "") + host.Status = swag.String(HostStatusInstalling) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + }) + + It("handle_installation_error", func() { + mockEvents.EXPECT().AddEvent(gomock.Any(), hostId.String(), models.EventSeverityError, + fmt.Sprintf("Host %s: updated status from \"installing\" to \"error\" (installation command failed)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + mockMetric.EXPECT().ReportHostInstallationMetrics(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) + Expect(hapi.HandleInstallationFailure(ctx, &host)).ShouldNot(HaveOccurred()) + h := getHost(hostId, clusterId, db) + Expect(swag.StringValue(h.Status)).Should(Equal(HostStatusError)) + Expect(swag.StringValue(h.StatusInfo)).Should(Equal("installation command failed")) + }) + + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) +}) + +var _ = Describe("Cancel host installation", func() { + var ( + ctx = context.Background() + dbName = "cancel_host_installation_test" + hapi API + db *gorm.DB + hostId, clusterId strfmt.UUID + host models.Host + ctrl *gomock.Controller + mockEventsHandler *events.MockHandler + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + ctrl = gomock.NewController(GinkgoT()) + mockEventsHandler = events.NewMockHandler(ctrl) + hapi = NewManager(getTestLog(), db, mockEventsHandler, nil, nil, createValidatorCfg(), nil) + }) + + tests := []struct { + state string + success bool + statusCode int32 + }{ + {state: models.HostStatusPreparingForInstallation, success: true}, + {state: models.HostStatusInstalling, success: true}, + {state: models.HostStatusInstallingInProgress, success: true}, + {state: models.HostStatusInstalled, success: true}, + {state: models.HostStatusError, success: true}, + {state: models.HostStatusDisabled, success: true}, + {state: models.HostStatusDiscovering, success: false, statusCode: http.StatusConflict}, + {state: models.HostStatusKnown, success: false, statusCode: http.StatusConflict}, + {state: models.HostStatusPendingForInput, success: false, statusCode: http.StatusConflict}, + {state: models.HostStatusInstallingPendingUserAction, success: false, statusCode: http.StatusConflict}, + {state: models.HostStatusResettingPendingUserAction, success: false, statusCode: http.StatusConflict}, + {state: models.HostStatusDisconnected, success: false, statusCode: http.StatusConflict}, + } + + acceptNewEvents := func(times int) { + mockEventsHandler.EXPECT().AddEvent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(times) + } + + for _, t := range tests { + It(fmt.Sprintf("cancel from state %s", t.state), func() { + hostId = strfmt.UUID(uuid.New().String()) + clusterId = strfmt.UUID(uuid.New().String()) + host = getTestHost(hostId, clusterId, "") + host.Status = swag.String(t.state) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + eventsNum := 1 + if t.success { + eventsNum++ + } + acceptNewEvents(eventsNum) + err := hapi.CancelInstallation(ctx, &host, "reason", db) + h := getHost(hostId, clusterId, db) + if t.success { + Expect(err).ShouldNot(HaveOccurred()) + Expect(swag.StringValue(h.Status)).Should(Equal(models.HostStatusResetting)) + } else { + Expect(err).Should(HaveOccurred()) + Expect(err.StatusCode()).Should(Equal(t.statusCode)) + Expect(swag.StringValue(h.Status)).Should(Equal(t.state)) + } + }) + } + + AfterEach(func() { + ctrl.Finish() + common.DeleteTestDB(db, dbName) + }) +}) + +var _ = Describe("Reset host", func() { + var ( + ctx = context.Background() + dbName = "reset_host_test" + hapi API + db *gorm.DB + hostId, clusterId strfmt.UUID + host models.Host + ctrl *gomock.Controller + mockEventsHandler *events.MockHandler + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + ctrl = gomock.NewController(GinkgoT()) + mockEventsHandler = events.NewMockHandler(ctrl) + hapi = NewManager(getTestLog(), db, mockEventsHandler, nil, nil, createValidatorCfg(), nil) + }) + + tests := []struct { + state string + success bool + statusCode int32 + }{ + {state: models.HostStatusPreparingForInstallation, success: true}, + {state: models.HostStatusInstalling, success: true}, + {state: models.HostStatusInstallingInProgress, success: true}, + {state: models.HostStatusInstalled, success: true}, + {state: models.HostStatusError, success: true}, + {state: models.HostStatusDisabled, success: true}, + {state: models.HostStatusDiscovering, success: false, statusCode: http.StatusConflict}, + {state: models.HostStatusKnown, success: false, statusCode: http.StatusConflict}, + {state: models.HostStatusPendingForInput, success: false, statusCode: http.StatusConflict}, + {state: models.HostStatusInstallingPendingUserAction, success: false, statusCode: http.StatusConflict}, + {state: models.HostStatusResettingPendingUserAction, success: false, statusCode: http.StatusConflict}, + {state: models.HostStatusDisconnected, success: false, statusCode: http.StatusConflict}, + } + + acceptNewEvents := func(times int) { + mockEventsHandler.EXPECT().AddEvent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(times) + } + + for _, t := range tests { + It(fmt.Sprintf("reset from state %s", t.state), func() { + hostId = strfmt.UUID(uuid.New().String()) + clusterId = strfmt.UUID(uuid.New().String()) + host = getTestHost(hostId, clusterId, "") + host.Status = swag.String(t.state) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + eventsNum := 1 + if t.success { + eventsNum++ + } + acceptNewEvents(eventsNum) + err := hapi.ResetHost(ctx, &host, "reason", db) + h := getHost(hostId, clusterId, db) + if t.success { + Expect(err).ShouldNot(HaveOccurred()) + Expect(swag.StringValue(h.Status)).Should(Equal(models.HostStatusResetting)) + } else { + Expect(err).Should(HaveOccurred()) + Expect(err.StatusCode()).Should(Equal(t.statusCode)) + Expect(swag.StringValue(h.Status)).Should(Equal(t.state)) + } + }) + } + + AfterEach(func() { + ctrl.Finish() + common.DeleteTestDB(db, dbName) + }) +}) + +var _ = Describe("Install", func() { + var ( + ctx = context.Background() + hapi API + db *gorm.DB + ctrl *gomock.Controller + mockEvents *events.MockHandler + hostId, clusterId strfmt.UUID + host models.Host + dbName = "transition_install" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + ctrl = gomock.NewController(GinkgoT()) + mockEvents = events.NewMockHandler(ctrl) + hapi = NewManager(getTestLog(), db, mockEvents, nil, nil, createValidatorCfg(), nil) + hostId = strfmt.UUID(uuid.New().String()) + clusterId = strfmt.UUID(uuid.New().String()) + }) + + Context("install host", func() { + success := func(reply error) { + Expect(reply).To(BeNil()) + h := getHost(hostId, clusterId, db) + Expect(*h.Status).Should(Equal(HostStatusInstalling)) + Expect(*h.StatusInfo).Should(Equal(statusInfoInstalling)) + } + + failure := func(reply error) { + Expect(reply).To(HaveOccurred()) + } + + noChange := func(reply error) { + Expect(reply).To(BeNil()) + h := getHost(hostId, clusterId, db) + Expect(*h.Status).Should(Equal(HostStatusDisabled)) + } + + tests := []struct { + name string + srcState string + validation func(error) + }{ + { + name: "prepared", + srcState: models.HostStatusPreparingForInstallation, + validation: success, + }, + { + name: "known", + srcState: HostStatusKnown, + validation: failure, + }, + { + name: "disabled nothing change", + srcState: HostStatusDisabled, + validation: noChange, + }, + { + name: "disconnected", + srcState: HostStatusDisconnected, + validation: failure, + }, + { + name: "discovering", + srcState: HostStatusDiscovering, + validation: failure, + }, + { + name: "error", + srcState: HostStatusError, + validation: failure, + }, + { + name: "installed", + srcState: HostStatusInstalled, + validation: failure, + }, + { + name: "installing", + srcState: HostStatusInstalling, + validation: failure, + }, + { + name: "in-progress", + srcState: HostStatusInstallingInProgress, + validation: failure, + }, + { + name: "insufficient", + srcState: HostStatusInsufficient, + validation: failure, + }, + { + name: "resetting", + srcState: HostStatusResetting, + validation: failure, + }, + } + + for i := range tests { + t := tests[i] + It(t.name, func() { + host = getTestHost(hostId, clusterId, t.srcState) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + mockEvents.EXPECT().AddEvent(gomock.Any(), hostId.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"%s\" to \"installing\" (Installation in progress)", host.ID.String(), t.srcState), + gomock.Any(), host.ClusterID.String()) + t.validation(hapi.Install(ctx, &host, nil)) + }) + } + }) + + Context("install with transaction", func() { + BeforeEach(func() { + host = getTestHost(hostId, clusterId, models.HostStatusPreparingForInstallation) + host.StatusInfo = swag.String(models.HostStatusPreparingForInstallation) + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + }) + + It("success", func() { + tx := db.Begin() + Expect(tx.Error).To(BeNil()) + mockEvents.EXPECT().AddEvent(gomock.Any(), hostId.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"preparing-for-installation\" to \"installing\" (Installation in progress)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + Expect(hapi.Install(ctx, &host, tx)).ShouldNot(HaveOccurred()) + Expect(tx.Commit().Error).ShouldNot(HaveOccurred()) + h := getHost(hostId, clusterId, db) + Expect(*h.Status).Should(Equal(HostStatusInstalling)) + Expect(*h.StatusInfo).Should(Equal(statusInfoInstalling)) + }) + + It("rollback transition", func() { + tx := db.Begin() + Expect(tx.Error).To(BeNil()) + mockEvents.EXPECT().AddEvent(gomock.Any(), hostId.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"preparing-for-installation\" to \"installing\" (Installation in progress)", host.ID.String()), + gomock.Any(), host.ClusterID.String()) + Expect(hapi.Install(ctx, &host, tx)).ShouldNot(HaveOccurred()) + Expect(tx.Rollback().Error).ShouldNot(HaveOccurred()) + h := getHost(hostId, clusterId, db) + Expect(*h.Status).Should(Equal(models.HostStatusPreparingForInstallation)) + Expect(*h.StatusInfo).Should(Equal(models.HostStatusPreparingForInstallation)) + }) + }) + + AfterEach(func() { + common.DeleteTestDB(db, dbName) + }) +}) + +var _ = Describe("Disable", func() { + var ( + ctx = context.Background() + hapi API + db *gorm.DB + ctrl *gomock.Controller + mockEvents *events.MockHandler + hostId, clusterId strfmt.UUID + host models.Host + dbName = "transition_disable" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + ctrl = gomock.NewController(GinkgoT()) + mockEvents = events.NewMockHandler(ctrl) + hapi = NewManager(getTestLog(), db, mockEvents, nil, nil, createValidatorCfg(), nil) + hostId = strfmt.UUID(uuid.New().String()) + clusterId = strfmt.UUID(uuid.New().String()) + }) + + Context("disable host", func() { + var srcState string + success := func(reply error) { + Expect(reply).To(BeNil()) + h := getHost(hostId, clusterId, db) + Expect(*h.Status).Should(Equal(HostStatusDisabled)) + Expect(*h.StatusInfo).Should(Equal(statusInfoDisabled)) + } + + failure := func(reply error) { + Expect(reply).To(HaveOccurred()) + h := getHost(hostId, clusterId, db) + Expect(*h.Status).Should(Equal(srcState)) + } + + mockEventsUpdateStatus := func(srcState string) { + mockEvents.EXPECT().AddEvent(gomock.Any(), hostId.String(), models.EventSeverityInfo, + fmt.Sprintf(`Host %s: updated status from "%s" to "disabled" (Host is disabled)`, + host.ID.String(), srcState), + gomock.Any(), host.ClusterID.String()).Times(1) + } + + tests := []struct { + name string + srcState string + validation func(error) + mocks []func(string) + }{ + { + name: "known", + srcState: HostStatusKnown, + validation: success, + mocks: []func(string){mockEventsUpdateStatus}, + }, + { + name: "disabled nothing change", + srcState: HostStatusDisabled, + validation: failure, + }, + { + name: "disconnected", + srcState: HostStatusDisconnected, + validation: success, + mocks: []func(string){mockEventsUpdateStatus}, + }, + { + name: "discovering", + srcState: HostStatusDiscovering, + validation: success, + mocks: []func(string){mockEventsUpdateStatus}, + }, + { + name: "error", + srcState: HostStatusError, + validation: failure, + }, + { + name: "installed", + srcState: HostStatusInstalled, + validation: failure, + }, + { + name: "installing", + srcState: HostStatusInstalling, + validation: failure, + }, + { + name: "in-progress", + srcState: HostStatusInstallingInProgress, + validation: failure, + }, + { + name: "insufficient", + srcState: HostStatusInsufficient, + validation: success, + mocks: []func(string){mockEventsUpdateStatus}, + }, + { + name: "resetting", + srcState: HostStatusResetting, + validation: failure, + }, + { + name: models.HostStatusPendingForInput, + srcState: models.HostStatusPendingForInput, + validation: success, + mocks: []func(string){mockEventsUpdateStatus}, + }, + } + + for i := range tests { + t := tests[i] + It(t.name, func() { + srcState = t.srcState + host = getTestHost(hostId, clusterId, srcState) + for _, m := range t.mocks { + m(t.srcState) + } + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + t.validation(hapi.DisableHost(ctx, &host)) + }) + } + }) + + AfterEach(func() { + ctrl.Finish() + common.DeleteTestDB(db, dbName) + }) +}) + +var _ = Describe("Enable", func() { + var ( + ctx = context.Background() + hapi API + db *gorm.DB + ctrl *gomock.Controller + mockEvents *events.MockHandler + hostId, clusterId strfmt.UUID + host models.Host + dbName = "transition_enable" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + ctrl = gomock.NewController(GinkgoT()) + mockEvents = events.NewMockHandler(ctrl) + hapi = NewManager(getTestLog(), db, mockEvents, nil, nil, createValidatorCfg(), nil) + hostId = strfmt.UUID(uuid.New().String()) + clusterId = strfmt.UUID(uuid.New().String()) + }) + + Context("enable host", func() { + var srcState string + success := func(reply error) { + Expect(reply).To(BeNil()) + h := getHost(hostId, clusterId, db) + Expect(*h.Status).Should(Equal(HostStatusDiscovering)) + Expect(*h.StatusInfo).Should(Equal(statusInfoDiscovering)) + Expect(h.Inventory).Should(Equal("")) + } + + failure := func(reply error) { + Expect(reply).Should(HaveOccurred()) + h := getHost(hostId, clusterId, db) + Expect(*h.Status).Should(Equal(srcState)) + Expect(h.Inventory).Should(Equal(defaultHwInfo)) + } + + tests := []struct { + name string + srcState string + validation func(error) + sendEvent bool + }{ + { + name: "known", + srcState: HostStatusKnown, + validation: failure, + sendEvent: false, + }, + { + name: "disabled to enable", + srcState: HostStatusDisabled, + validation: success, + sendEvent: true, + }, + { + name: "disconnected", + srcState: HostStatusDisconnected, + validation: failure, + sendEvent: false, + }, + { + name: "discovering", + srcState: HostStatusDiscovering, + validation: failure, + sendEvent: false, + }, + { + name: "error", + srcState: HostStatusError, + validation: failure, + sendEvent: false, + }, + { + name: "installed", + srcState: HostStatusInstalled, + validation: failure, + sendEvent: false, + }, + { + name: "installing", + srcState: HostStatusInstalling, + validation: failure, + sendEvent: false, + }, + { + name: "in-progress", + srcState: HostStatusInstallingInProgress, + validation: failure, + sendEvent: false, + }, + { + name: "insufficient", + srcState: HostStatusInsufficient, + validation: failure, + sendEvent: false, + }, + { + name: "resetting", + srcState: HostStatusResetting, + validation: failure, + sendEvent: false, + }, + } + + for i := range tests { + t := tests[i] + It(t.name, func() { + srcState = t.srcState + host = getTestHost(hostId, clusterId, srcState) + host.Inventory = defaultHwInfo + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + if t.sendEvent { + mockEvents.EXPECT().AddEvent(gomock.Any(), hostId.String(), models.EventSeverityInfo, + fmt.Sprintf("Host %s: updated status from \"%s\" to \"discovering\" (Waiting for host hardware info)", common.GetHostnameForMsg(&host), srcState), + gomock.Any(), host.ClusterID.String()) + } + t.validation(hapi.EnableHost(ctx, &host)) }) } }) AfterEach(func() { - db.Close() + common.DeleteTestDB(db, dbName) + }) +}) + +type statusInfoChecker interface { + check(statusInfo *string) +} + +type valueChecker struct { + value string +} + +func (v *valueChecker) check(value *string) { + if value == nil { + Expect(v.value).To(Equal("")) + } else { + Expect(*value).To(Equal(v.value)) + } +} + +func makeValueChecker(value string) statusInfoChecker { + return &valueChecker{value: value} +} + +type validationsChecker struct { + expected map[validationID]validationCheckResult +} + +func (j *validationsChecker) check(validationsStr string) { + validationMap := make(map[string][]validationResult) + Expect(json.Unmarshal([]byte(validationsStr), &validationMap)).ToNot(HaveOccurred()) +next: + for id, checkedResult := range j.expected { + category, err := id.category() + Expect(err).ToNot(HaveOccurred()) + results, ok := validationMap[category] + Expect(ok).To(BeTrue()) + for _, r := range results { + if r.ID == id { + Expect(r.Status).To(Equal(checkedResult.status), "id = %s", id.String()) + Expect(r.Message).To(MatchRegexp(checkedResult.messagePattern)) + continue next + } + } + // Should not reach here + Expect(false).To(BeTrue()) + } +} + +type validationCheckResult struct { + status validationStatus + messagePattern string +} + +func makeJsonChecker(expected map[validationID]validationCheckResult) *validationsChecker { + return &validationsChecker{expected: expected} +} + +var _ = Describe("Refresh Host", func() { + var ( + ctx = context.Background() + hapi API + db *gorm.DB + hostId, clusterId strfmt.UUID + host models.Host + cluster common.Cluster + mockEvents *events.MockHandler + ctrl *gomock.Controller + dbName string = "host_transition_test_refresh_host" + ) + + BeforeEach(func() { + db = common.PrepareTestDB(dbName, &events.Event{}) + ctrl = gomock.NewController(GinkgoT()) + mockEvents = events.NewMockHandler(ctrl) + hapi = NewManager(getTestLog(), db, mockEvents, nil, nil, createValidatorCfg(), nil) + hostId = strfmt.UUID(uuid.New().String()) + clusterId = strfmt.UUID(uuid.New().String()) + }) + Context("All transitions", func() { + var srcState string + tests := []struct { + name string + srcState string + inventory string + role string + machineNetworkCidr string + validCheckInTime bool + dstState string + statusInfoChecker statusInfoChecker + validationsChecker *validationsChecker + errorExpected bool + }{ + { + name: "discovering to disconnected", + validCheckInTime: false, + srcState: HostStatusDiscovering, + dstState: HostStatusDisconnected, + statusInfoChecker: makeValueChecker(statusInfoDisconnected), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationFailure, messagePattern: "Host is disconnected"}, + HasInventory: {status: ValidationFailure, messagePattern: "Inventory has not been received for the host"}, + HasMinCPUCores: {status: ValidationPending, messagePattern: "Missing inventory"}, + HasMinMemory: {status: ValidationPending, messagePattern: "Missing inventory"}, + HasMinValidDisks: {status: ValidationPending, messagePattern: "Missing inventory"}, + IsMachineCidrDefined: {status: ValidationFailure, messagePattern: "Machine network CIDR is undefined"}, + IsRoleDefined: {status: ValidationFailure, messagePattern: "Role is undefined"}, + HasCPUCoresForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + HasMemoryForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + IsHostnameUnique: {status: ValidationPending, messagePattern: "Missing inventory"}, + BelongsToMachineCidr: {status: ValidationPending, messagePattern: "Missing inventory or machine network CIDR"}, + }), + errorExpected: false, + }, + { + name: "insufficient to disconnected", + validCheckInTime: false, + srcState: HostStatusInsufficient, + dstState: HostStatusDisconnected, + statusInfoChecker: makeValueChecker(statusInfoDisconnected), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationFailure, messagePattern: "Host is disconnected"}, + HasInventory: {status: ValidationFailure, messagePattern: "Inventory has not been received for the host"}, + HasMinCPUCores: {status: ValidationPending, messagePattern: "Missing inventory"}, + HasMinMemory: {status: ValidationPending, messagePattern: "Missing inventory"}, + HasMinValidDisks: {status: ValidationPending, messagePattern: "Missing inventory"}, + IsMachineCidrDefined: {status: ValidationFailure, messagePattern: "Machine network CIDR is undefined"}, + IsRoleDefined: {status: ValidationFailure, messagePattern: "Role is undefined"}, + HasCPUCoresForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + HasMemoryForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + IsHostnameUnique: {status: ValidationPending, messagePattern: "Missing inventory"}, + BelongsToMachineCidr: {status: ValidationPending, messagePattern: "Missing inventory or machine network CIDR"}, + }), + errorExpected: false, + }, + { + name: "known to disconnected", + validCheckInTime: false, + srcState: HostStatusKnown, + dstState: HostStatusDisconnected, + statusInfoChecker: makeValueChecker(statusInfoDisconnected), + errorExpected: false, + }, + { + name: "pending to disconnected", + validCheckInTime: false, + srcState: HostStatusPendingForInput, + dstState: HostStatusDisconnected, + statusInfoChecker: makeValueChecker(statusInfoDisconnected), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationFailure, messagePattern: "Host is disconnected"}, + HasInventory: {status: ValidationFailure, messagePattern: "Inventory has not been received for the host"}, + HasMinCPUCores: {status: ValidationPending, messagePattern: "Missing inventory"}, + HasMinMemory: {status: ValidationPending, messagePattern: "Missing inventory"}, + HasMinValidDisks: {status: ValidationPending, messagePattern: "Missing inventory"}, + IsMachineCidrDefined: {status: ValidationFailure, messagePattern: "Machine network CIDR is undefined"}, + IsRoleDefined: {status: ValidationFailure, messagePattern: "Role is undefined"}, + HasCPUCoresForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + HasMemoryForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + IsHostnameUnique: {status: ValidationPending, messagePattern: "Missing inventory"}, + BelongsToMachineCidr: {status: ValidationPending, messagePattern: "Missing inventory or machine network CIDR"}, + }), + errorExpected: false, + }, + { + name: "disconnected to disconnected", + validCheckInTime: false, + srcState: HostStatusDisconnected, + dstState: HostStatusDisconnected, + statusInfoChecker: makeValueChecker(statusInfoDisconnected), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationFailure, messagePattern: "Host is disconnected"}, + HasInventory: {status: ValidationFailure, messagePattern: "Inventory has not been received for the host"}, + HasMinCPUCores: {status: ValidationPending, messagePattern: "Missing inventory"}, + HasMinMemory: {status: ValidationPending, messagePattern: "Missing inventory"}, + HasMinValidDisks: {status: ValidationPending, messagePattern: "Missing inventory"}, + IsMachineCidrDefined: {status: ValidationFailure, messagePattern: "Machine network CIDR is undefined"}, + IsRoleDefined: {status: ValidationFailure, messagePattern: "Role is undefined"}, + HasCPUCoresForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + HasMemoryForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + IsHostnameUnique: {status: ValidationPending, messagePattern: "Missing inventory"}, + BelongsToMachineCidr: {status: ValidationPending, messagePattern: "Missing inventory or machine network CIDR"}, + }), + errorExpected: false, + }, + { + name: "disconnected to discovering", + validCheckInTime: true, + srcState: HostStatusDisconnected, + dstState: HostStatusDiscovering, + statusInfoChecker: makeValueChecker(statusInfoDiscovering), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationFailure, messagePattern: "Inventory has not been received for the host"}, + HasMinCPUCores: {status: ValidationPending, messagePattern: "Missing inventory"}, + HasMinMemory: {status: ValidationPending, messagePattern: "Missing inventory"}, + HasMinValidDisks: {status: ValidationPending, messagePattern: "Missing inventory"}, + IsMachineCidrDefined: {status: ValidationFailure, messagePattern: "Machine network CIDR is undefined"}, + IsRoleDefined: {status: ValidationFailure, messagePattern: "Role is undefined"}, + HasCPUCoresForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + HasMemoryForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + IsHostnameUnique: {status: ValidationPending, messagePattern: "Missing inventory"}, + BelongsToMachineCidr: {status: ValidationPending, messagePattern: "Missing inventory or machine network CIDR"}, + }), + errorExpected: false, + }, + { + name: "discovering to discovering", + validCheckInTime: true, + srcState: HostStatusDiscovering, + dstState: HostStatusDiscovering, + statusInfoChecker: makeValueChecker(statusInfoDiscovering), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationFailure, messagePattern: "Inventory has not been received for the host"}, + HasMinCPUCores: {status: ValidationPending, messagePattern: "Missing inventory"}, + HasMinMemory: {status: ValidationPending, messagePattern: "Missing inventory"}, + HasMinValidDisks: {status: ValidationPending, messagePattern: "Missing inventory"}, + IsMachineCidrDefined: {status: ValidationFailure, messagePattern: "Machine network CIDR is undefined"}, + IsRoleDefined: {status: ValidationFailure, messagePattern: "Role is undefined"}, + HasCPUCoresForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + HasMemoryForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + IsHostnameUnique: {status: ValidationPending, messagePattern: "Missing inventory"}, + BelongsToMachineCidr: {status: ValidationPending, messagePattern: "Missing inventory or machine network CIDR"}, + }), + errorExpected: false, + }, + { + name: "disconnected to insufficient (1)", + validCheckInTime: true, + srcState: HostStatusDisconnected, + dstState: HostStatusInsufficient, + statusInfoChecker: makeValueChecker(statusInfoInsufficientHardware), + inventory: insufficientHWInventory(), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationFailure, messagePattern: "Require at least 8 GiB RAM, found only 0 GiB"}, + HasMinValidDisks: {status: ValidationFailure, messagePattern: "Require a disk of at least 120 GB"}, + IsMachineCidrDefined: {status: ValidationFailure, messagePattern: "Machine network CIDR is undefined"}, + IsRoleDefined: {status: ValidationFailure, messagePattern: "Role is undefined"}, + HasCPUCoresForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + HasMemoryForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationPending, messagePattern: "Missing inventory or machine network CIDR"}, + }), + errorExpected: false, + }, + { + name: "insufficient to insufficient (1)", + validCheckInTime: true, + srcState: HostStatusInsufficient, + dstState: HostStatusInsufficient, + statusInfoChecker: makeValueChecker(statusInfoInsufficientHardware), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationFailure, messagePattern: "Require at least 8 GiB RAM, found only 0 GiB"}, + HasMinValidDisks: {status: ValidationFailure, messagePattern: "Require a disk of at least 120 GB"}, + IsMachineCidrDefined: {status: ValidationFailure, messagePattern: "Machine network CIDR is undefined"}, + IsRoleDefined: {status: ValidationFailure, messagePattern: "Role is undefined"}, + HasCPUCoresForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + HasMemoryForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationPending, messagePattern: "Missing inventory or machine network CIDR"}, + }), + inventory: insufficientHWInventory(), + errorExpected: false, + }, + { + name: "discovering to insufficient (1)", + validCheckInTime: true, + srcState: HostStatusDiscovering, + dstState: HostStatusInsufficient, + statusInfoChecker: makeValueChecker(statusInfoInsufficientHardware), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationFailure, messagePattern: "Require at least 8 GiB RAM, found only 0 GiB"}, + HasMinValidDisks: {status: ValidationFailure, messagePattern: "Require a disk of at least 120 GB"}, + IsMachineCidrDefined: {status: ValidationFailure, messagePattern: "Machine network CIDR is undefined"}, + IsRoleDefined: {status: ValidationFailure, messagePattern: "Role is undefined"}, + HasCPUCoresForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + HasMemoryForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationPending, messagePattern: "Missing inventory or machine network CIDR"}, + }), + inventory: insufficientHWInventory(), + errorExpected: false, + }, + { + name: "pending to insufficient (1)", + validCheckInTime: true, + srcState: HostStatusPendingForInput, + dstState: HostStatusPendingForInput, + statusInfoChecker: makeValueChecker(""), + inventory: insufficientHWInventory(), + errorExpected: true, + }, + { + name: "known to insufficient (1)", + validCheckInTime: true, + srcState: HostStatusKnown, + dstState: HostStatusKnown, + statusInfoChecker: makeValueChecker(""), + inventory: insufficientHWInventory(), + errorExpected: true, + }, + { + name: "disconnected to pending", + validCheckInTime: true, + srcState: HostStatusDisconnected, + dstState: HostStatusPendingForInput, + statusInfoChecker: makeValueChecker(statusInfoPendingForInput), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationFailure, messagePattern: "Machine network CIDR is undefined"}, + IsRoleDefined: {status: ValidationFailure, messagePattern: "Role is undefined"}, + HasCPUCoresForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + HasMemoryForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationPending, messagePattern: "Missing inventory or machine network CIDR"}, + }), + inventory: workerInventory(), + errorExpected: false, + }, + { + name: "discovering to pending", + validCheckInTime: true, + srcState: HostStatusDiscovering, + dstState: HostStatusPendingForInput, + machineNetworkCidr: "5.6.7.0/24", + statusInfoChecker: makeValueChecker(statusInfoPendingForInput), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationFailure, messagePattern: "Role is undefined"}, + HasCPUCoresForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + HasMemoryForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDR"}, + }), + inventory: workerInventory(), + errorExpected: false, + }, + { + name: "insufficient to pending", + validCheckInTime: true, + srcState: HostStatusInsufficient, + dstState: HostStatusPendingForInput, + machineNetworkCidr: "5.6.7.0/24", + statusInfoChecker: makeValueChecker(statusInfoPendingForInput), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationFailure, messagePattern: "Role is undefined"}, + HasCPUCoresForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + HasMemoryForRole: {status: ValidationPending, messagePattern: "Missing inventory or role"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDR"}, + }), + inventory: workerInventory(), + errorExpected: false, + }, + { + name: "known to pending", + validCheckInTime: true, + srcState: HostStatusKnown, + dstState: HostStatusPendingForInput, + role: "worker", + statusInfoChecker: makeValueChecker(statusInfoPendingForInput), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationFailure, messagePattern: "Machine network CIDR is undefined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationPending, messagePattern: "Missing inventory or machine network CIDR"}, + }), + inventory: workerInventory(), + errorExpected: false, + }, + { + name: "pending to pending", + validCheckInTime: true, + srcState: HostStatusPendingForInput, + dstState: HostStatusPendingForInput, + role: "worker", + statusInfoChecker: makeValueChecker(statusInfoPendingForInput), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationFailure, messagePattern: "Machine network CIDR is undefined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationPending, messagePattern: "Missing inventory or machine network CIDR"}, + }), + inventory: workerInventory(), + errorExpected: false, + }, + { + name: "disconnected to insufficient (2)", + validCheckInTime: true, + srcState: HostStatusDisconnected, + dstState: HostStatusInsufficient, + machineNetworkCidr: "5.6.7.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDR "}, + }), + inventory: workerInventory(), + errorExpected: false, + }, + { + name: "discovering to insufficient (2)", + validCheckInTime: true, + srcState: HostStatusDiscovering, + dstState: HostStatusInsufficient, + machineNetworkCidr: "5.6.7.0/24", + role: "master", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationFailure, messagePattern: "Require at least 4 CPU cores for master role, found only 2"}, + HasMemoryForRole: {status: ValidationFailure, messagePattern: "Require at least 16 GiB RAM role master, found only 8"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDR "}, + }), + inventory: workerInventory(), + errorExpected: false, + }, + { + name: "insufficient to insufficient (2)", + validCheckInTime: true, + srcState: HostStatusInsufficient, + dstState: HostStatusInsufficient, + machineNetworkCidr: "1.2.3.0/24", + role: "master", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationFailure, messagePattern: "Require at least 4 CPU cores for master role, found only 2"}, + HasMemoryForRole: {status: ValidationFailure, messagePattern: "Require at least 16 GiB RAM role master, found only 8"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + }), + inventory: workerInventory(), + errorExpected: false, + }, + { + name: "pending to insufficient (2)", + validCheckInTime: true, + srcState: HostStatusPendingForInput, + dstState: HostStatusInsufficient, + machineNetworkCidr: "1.2.3.0/24", + role: "master", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + inventory: workerInventory(), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationFailure, messagePattern: "Require at least 4 CPU cores for master role, found only 2"}, + HasMemoryForRole: {status: ValidationFailure, messagePattern: "Require at least 16 GiB RAM role master, found only 8"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + }), + errorExpected: false, + }, + { + name: "known to insufficient (2)", + validCheckInTime: true, + srcState: HostStatusKnown, + dstState: HostStatusInsufficient, + machineNetworkCidr: "5.6.7.0/24", + role: "master", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role master"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role master"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: " is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDR"}, + }), + inventory: masterInventory(), + errorExpected: false, + }, + { + name: "insufficient to insufficient (2)", + validCheckInTime: true, + srcState: HostStatusInsufficient, + dstState: HostStatusInsufficient, + machineNetworkCidr: "5.6.7.0/24", + role: "master", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role master"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role master"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: " is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDR"}, + }), + inventory: masterInventory(), + errorExpected: false, + }, + { + name: "insufficient to insufficient (localhost)", + validCheckInTime: true, + srcState: HostStatusInsufficient, + dstState: HostStatusInsufficient, + machineNetworkCidr: "1.2.3.0/24", + role: "master", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role master"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role master"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: " is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + IsHostnameValid: {status: ValidationFailure, messagePattern: "Hostname localhost is forbidden"}, + }), + inventory: masterInventoryWithHostname("localhost"), + errorExpected: false, + }, + { + name: "discovering to known", + validCheckInTime: true, + srcState: HostStatusDiscovering, + dstState: HostStatusKnown, + machineNetworkCidr: "1.2.3.0/24", + role: "master", + statusInfoChecker: makeValueChecker(""), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role master"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role master"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: " is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + IsHostnameValid: {status: ValidationSuccess, messagePattern: "Hostname .* is allowed"}, + }), + inventory: masterInventory(), + errorExpected: false, + }, + { + name: "insufficient to known", + validCheckInTime: true, + srcState: HostStatusInsufficient, + dstState: HostStatusKnown, + machineNetworkCidr: "1.2.3.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(""), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: " is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + IsHostnameValid: {status: ValidationSuccess, messagePattern: "Hostname .* is allowed"}, + }), + inventory: masterInventory(), + errorExpected: false, + }, + { + name: "pending to known", + validCheckInTime: true, + srcState: HostStatusPendingForInput, + dstState: HostStatusKnown, + machineNetworkCidr: "1.2.3.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(""), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: " is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + IsHostnameValid: {status: ValidationSuccess, messagePattern: "Hostname .* is allowed"}, + }), + inventory: masterInventory(), + errorExpected: false, + }, + { + name: "known to known", + validCheckInTime: true, + srcState: HostStatusKnown, + dstState: HostStatusKnown, + machineNetworkCidr: "1.2.3.0/24", + role: "master", + statusInfoChecker: makeValueChecker(""), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role master"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role master"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: " is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + IsHostnameValid: {status: ValidationSuccess, messagePattern: "Hostname .* is allowed"}, + }), + inventory: masterInventory(), + errorExpected: false, + }, + { + name: "known to known with unexpected role", + validCheckInTime: true, + srcState: HostStatusKnown, + dstState: HostStatusKnown, + machineNetworkCidr: "1.2.3.0/24", + role: "kuku", + statusInfoChecker: makeValueChecker(""), + inventory: masterInventory(), + errorExpected: true, + }, + } + + for i := range tests { + t := tests[i] + It(t.name, func() { + hostCheckInAt := strfmt.DateTime(time.Now()) + if !t.validCheckInTime { + // Timeout for checkin is 3 minutes so subtract 4 minutes from the current time + hostCheckInAt = strfmt.DateTime(time.Now().Add(-4 * time.Minute)) + } + srcState = t.srcState + host = getTestHost(hostId, clusterId, srcState) + host.Inventory = t.inventory + host.Role = models.HostRole(t.role) + host.CheckedInAt = hostCheckInAt + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + cluster = getTestCluster(clusterId, t.machineNetworkCidr) + Expect(db.Create(&cluster).Error).ToNot(HaveOccurred()) + if srcState != t.dstState { + mockEvents.EXPECT().AddEvent(gomock.Any(), hostId.String(), common.GetEventSeverityFromHostStatus(t.dstState), + gomock.Any(), gomock.Any(), host.ClusterID.String()) + } + err := hapi.RefreshStatus(ctx, &host, db) + if t.errorExpected { + Expect(err).To(HaveOccurred()) + } else { + Expect(err).ToNot(HaveOccurred()) + } + var resultHost models.Host + Expect(db.Take(&resultHost, "id = ? and cluster_id = ?", hostId.String(), clusterId.String()).Error).ToNot(HaveOccurred()) + Expect(resultHost.Role).To(Equal(models.HostRole(t.role))) + Expect(resultHost.Status).To(Equal(&t.dstState)) + t.statusInfoChecker.check(resultHost.StatusInfo) + if t.validationsChecker != nil { + t.validationsChecker.check(resultHost.ValidationsInfo) + } + }) + } + }) + Context("Pending timed out", func() { + tests := []struct { + name string + clusterStatus string + dstState string + statusInfo string + errorExpected bool + }{ + { + name: "No timeout", + dstState: models.HostStatusPreparingForInstallation, + statusInfo: "", + clusterStatus: models.ClusterStatusPreparingForInstallation, + }, + { + name: "Timeout", + dstState: HostStatusError, + statusInfo: statusInfoPreparingTimedOut, + clusterStatus: models.ClusterStatusInstalled, + }, + } + for i := range tests { + t := tests[i] + It(t.name, func() { + host = getTestHost(hostId, clusterId, models.HostStatusPreparingForInstallation) + host.Inventory = masterInventory() + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + cluster = getTestCluster(clusterId, "1.2.3.0/24") + cluster.Status = &t.clusterStatus + Expect(db.Create(&cluster).Error).ToNot(HaveOccurred()) + if *host.Status != t.dstState { + mockEvents.EXPECT().AddEvent(gomock.Any(), hostId.String(), common.GetEventSeverityFromHostStatus(t.dstState), + gomock.Any(), gomock.Any(), host.ClusterID.String()) + } + err := hapi.RefreshStatus(ctx, &host, db) + if t.errorExpected { + Expect(err).To(HaveOccurred()) + } else { + Expect(err).ToNot(HaveOccurred()) + } + var resultHost models.Host + Expect(db.Take(&resultHost, "id = ? and cluster_id = ?", hostId.String(), clusterId.String()).Error).ToNot(HaveOccurred()) + Expect(swag.StringValue(resultHost.Status)).To(Equal(t.dstState)) + Expect(swag.StringValue(resultHost.StatusInfo)).To(Equal(t.statusInfo)) + }) + } + }) + Context("Unique hostname", func() { + var srcState string + var otherHostID strfmt.UUID + + BeforeEach(func() { + otherHostID = strfmt.UUID(uuid.New().String()) + }) + + tests := []struct { + name string + srcState string + inventory string + role string + machineNetworkCidr string + dstState string + requestedHostname string + otherState string + otherRequestedHostname string + otherInventory string + statusInfoChecker statusInfoChecker + validationsChecker *validationsChecker + errorExpected bool + }{ + { + name: "insufficient to known", + srcState: HostStatusInsufficient, + dstState: HostStatusKnown, + machineNetworkCidr: "1.2.3.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(""), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: " is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + }), + inventory: masterInventoryWithHostname("first"), + otherState: HostStatusInsufficient, + otherInventory: masterInventoryWithHostname("second"), + errorExpected: false, + }, + { + name: "insufficient to insufficient (same hostname) 1", + srcState: HostStatusInsufficient, + dstState: HostStatusInsufficient, + machineNetworkCidr: "1.2.3.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationFailure, messagePattern: " is not unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + }), + inventory: masterInventoryWithHostname("first"), + otherState: HostStatusInsufficient, + otherInventory: masterInventoryWithHostname("first"), + errorExpected: false, + }, + { + name: "insufficient to insufficient (same hostname) 2", + srcState: HostStatusInsufficient, + dstState: HostStatusInsufficient, + machineNetworkCidr: "1.2.3.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationFailure, messagePattern: " is not unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + }), + inventory: masterInventoryWithHostname("first"), + otherState: HostStatusInsufficient, + otherInventory: masterInventoryWithHostname("second"), + otherRequestedHostname: "first", + errorExpected: false, + }, + { + name: "insufficient to insufficient (same hostname) 3", + srcState: HostStatusInsufficient, + dstState: HostStatusInsufficient, + machineNetworkCidr: "1.2.3.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationFailure, messagePattern: " is not unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + }), + inventory: masterInventoryWithHostname("first"), + requestedHostname: "second", + otherState: HostStatusInsufficient, + otherInventory: masterInventoryWithHostname("second"), + errorExpected: false, + }, + { + name: "insufficient to insufficient (same hostname) 4", + srcState: HostStatusInsufficient, + dstState: HostStatusInsufficient, + machineNetworkCidr: "1.2.3.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationFailure, messagePattern: " is not unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + }), + inventory: masterInventoryWithHostname("first"), + requestedHostname: "third", + otherState: HostStatusInsufficient, + otherInventory: masterInventoryWithHostname("second"), + otherRequestedHostname: "third", + errorExpected: false, + }, + { + name: "insufficient to known 2", + srcState: HostStatusInsufficient, + dstState: HostStatusKnown, + machineNetworkCidr: "1.2.3.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(""), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: " is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + }), + inventory: masterInventoryWithHostname("first"), + requestedHostname: "third", + otherState: HostStatusInsufficient, + otherInventory: masterInventoryWithHostname("second"), + otherRequestedHostname: "forth", + errorExpected: false, + }, + { + name: "known to known", + srcState: HostStatusKnown, + dstState: HostStatusKnown, + machineNetworkCidr: "1.2.3.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(""), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: " is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + }), + inventory: masterInventoryWithHostname("first"), + otherState: HostStatusInsufficient, + otherInventory: masterInventoryWithHostname("second"), + errorExpected: false, + }, + { + name: "known to insufficient (same hostname) 1", + srcState: HostStatusKnown, + dstState: HostStatusInsufficient, + machineNetworkCidr: "1.2.3.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationFailure, messagePattern: " is not unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + }), + inventory: masterInventoryWithHostname("first"), + otherState: HostStatusInsufficient, + otherInventory: masterInventoryWithHostname("first"), + errorExpected: false, + }, + { + name: "known to insufficient (same hostname) 2", + srcState: HostStatusKnown, + dstState: HostStatusInsufficient, + machineNetworkCidr: "1.2.3.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationFailure, messagePattern: " is not unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + }), + inventory: masterInventoryWithHostname("first"), + otherState: HostStatusInsufficient, + otherInventory: masterInventoryWithHostname("second"), + otherRequestedHostname: "first", + errorExpected: false, + }, + { + name: "known to insufficient (same hostname) 3", + srcState: HostStatusKnown, + dstState: HostStatusInsufficient, + machineNetworkCidr: "1.2.3.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationFailure, messagePattern: " is not unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + }), + inventory: masterInventoryWithHostname("first"), + requestedHostname: "second", + otherState: HostStatusInsufficient, + otherInventory: masterInventoryWithHostname("second"), + errorExpected: false, + }, + { + name: "known to insufficient (same hostname) 4", + srcState: HostStatusKnown, + dstState: HostStatusInsufficient, + machineNetworkCidr: "1.2.3.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(statusInfoNotReadyForInstall), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationFailure, messagePattern: " is not unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + }), + inventory: masterInventoryWithHostname("first"), + requestedHostname: "third", + otherState: HostStatusInsufficient, + otherInventory: masterInventoryWithHostname("second"), + otherRequestedHostname: "third", + errorExpected: false, + }, + { + name: "known to known 2", + srcState: HostStatusKnown, + dstState: HostStatusKnown, + machineNetworkCidr: "1.2.3.0/24", + role: "worker", + statusInfoChecker: makeValueChecker(""), + validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ + IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, + HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, + HasMinCPUCores: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores"}, + HasMinMemory: {status: ValidationSuccess, messagePattern: "Sufficient minimum RAM"}, + HasMinValidDisks: {status: ValidationSuccess, messagePattern: "Sufficient disk capacity"}, + IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine network CIDR is defined"}, + IsRoleDefined: {status: ValidationSuccess, messagePattern: "Role is defined"}, + HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, + HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, + IsHostnameUnique: {status: ValidationSuccess, messagePattern: " is unique in cluster"}, + BelongsToMachineCidr: {status: ValidationSuccess, messagePattern: "Host belongs to machine network CIDR"}, + }), + inventory: masterInventoryWithHostname("first"), + requestedHostname: "third", + otherState: HostStatusInsufficient, + otherInventory: masterInventoryWithHostname("first"), + otherRequestedHostname: "forth", + errorExpected: false, + }, + } + + for i := range tests { + t := tests[i] + It(t.name, func() { + srcState = t.srcState + host = getTestHost(hostId, clusterId, srcState) + host.Inventory = t.inventory + host.Role = models.HostRole(t.role) + host.CheckedInAt = strfmt.DateTime(time.Now()) + host.RequestedHostname = t.requestedHostname + Expect(db.Create(&host).Error).ShouldNot(HaveOccurred()) + otherHost := getTestHost(otherHostID, clusterId, t.otherState) + otherHost.RequestedHostname = t.otherRequestedHostname + otherHost.Inventory = t.otherInventory + Expect(db.Create(&otherHost).Error).ShouldNot(HaveOccurred()) + cluster = getTestCluster(clusterId, t.machineNetworkCidr) + Expect(db.Create(&cluster).Error).ToNot(HaveOccurred()) + if !t.errorExpected && srcState != t.dstState { + mockEvents.EXPECT().AddEvent(gomock.Any(), hostId.String(), models.EventSeverityInfo, + gomock.Any(), gomock.Any(), clusterId.String()) + } + + err := hapi.RefreshStatus(ctx, &host, db) + if t.errorExpected { + Expect(err).To(HaveOccurred()) + } else { + Expect(err).ToNot(HaveOccurred()) + } + var resultHost models.Host + Expect(db.Take(&resultHost, "id = ? and cluster_id = ?", hostId.String(), clusterId.String()).Error).ToNot(HaveOccurred()) + Expect(resultHost.Role).To(Equal(models.HostRole(t.role))) + Expect(resultHost.Status).To(Equal(&t.dstState)) + t.statusInfoChecker.check(resultHost.StatusInfo) + if t.validationsChecker != nil { + t.validationsChecker.check(resultHost.ValidationsInfo) + } + }) + } + }) + Context("Cluster Errors", func() { + for _, srcState := range []string{ + models.HostStatusInstalling, + models.HostStatusInstallingInProgress, + models.HostStatusInstalled, + } { + It(fmt.Sprintf("host src: %s cluster error: false", srcState), func() { + h := getTestHost(hostId, clusterId, srcState) + h.Inventory = masterInventory() + Expect(db.Create(&h).Error).ShouldNot(HaveOccurred()) + c := getTestCluster(clusterId, "1.2.3.0/24") + c.Status = swag.String(models.ClusterStatusInstalling) + Expect(db.Create(&c).Error).ToNot(HaveOccurred()) + err := hapi.RefreshStatus(ctx, &h, db) + Expect(err).ShouldNot(HaveOccurred()) + Expect(swag.StringValue(h.Status)).Should(Equal(srcState)) + }) + It(fmt.Sprintf("host src: %s cluster error: true", srcState), func() { + h := getTestHost(hostId, clusterId, srcState) + h.Inventory = masterInventory() + Expect(db.Create(&h).Error).ShouldNot(HaveOccurred()) + c := getTestCluster(clusterId, "1.2.3.0/24") + c.Status = swag.String(models.ClusterStatusError) + Expect(db.Create(&c).Error).ToNot(HaveOccurred()) + mockEvents.EXPECT().AddEvent(gomock.Any(), hostId.String(), models.EventSeverityError, + "Host master-hostname: updated status from \"installed\" to \"error\" (Installation has been aborted due cluster errors)", + gomock.Any(), clusterId.String()) + err := hapi.RefreshStatus(ctx, &h, db) + Expect(err).ShouldNot(HaveOccurred()) + Expect(swag.StringValue(h.Status)).Should(Equal(models.HostStatusError)) + var resultHost models.Host + Expect(db.Take(&resultHost, "id = ? and cluster_id = ?", hostId.String(), clusterId.String()).Error).ToNot(HaveOccurred()) + Expect(swag.StringValue(resultHost.StatusInfo)).Should(Equal(statusInfoAbortingDueClusterErrors)) + }) + } + }) + AfterEach(func() { + common.DeleteTestDB(db, dbName) + ctrl.Finish() + }) }) diff --git a/internal/host/validation_id.go b/internal/host/validation_id.go new file mode 100644 index 000000000..3ff047e57 --- /dev/null +++ b/internal/host/validation_id.go @@ -0,0 +1,44 @@ +package host + +import ( + "net/http" + + "github.com/filanov/bm-inventory/internal/common" + "github.com/pkg/errors" + + "github.com/filanov/bm-inventory/models" +) + +type validationID models.HostValidationID + +const ( + IsConnected = validationID(models.HostValidationIDConnected) + HasInventory = validationID(models.HostValidationIDHasInventory) + IsMachineCidrDefined = validationID(models.HostValidationIDMachineCidrDefined) + BelongsToMachineCidr = validationID(models.HostValidationIDBelongsToMachineCidr) + HasMinCPUCores = validationID(models.HostValidationIDHasMinCPUCores) + HasMinValidDisks = validationID(models.HostValidationIDHasMinValidDisks) + HasMinMemory = validationID(models.HostValidationIDHasMinMemory) + HasCPUCoresForRole = validationID(models.HostValidationIDHasCPUCoresForRole) + HasMemoryForRole = validationID(models.HostValidationIDHasMemoryForRole) + IsHostnameUnique = validationID(models.HostValidationIDHostnameUnique) + IsRoleDefined = validationID(models.HostValidationIDRoleDefined) + IsHostnameValid = validationID(models.HostValidationIDHostnameValid) +) + +func (v validationID) category() (string, error) { + switch v { + case IsConnected, IsMachineCidrDefined, BelongsToMachineCidr: + return "network", nil + case HasInventory, HasMinCPUCores, HasMinValidDisks, HasMinMemory, + HasCPUCoresForRole, HasMemoryForRole, IsHostnameUnique, IsHostnameValid: + return "hardware", nil + case IsRoleDefined: + return "role", nil + } + return "", common.NewApiError(http.StatusInternalServerError, errors.Errorf("Unexpected validation id %s", string(v))) +} + +func (v validationID) String() string { + return string(v) +} diff --git a/internal/host/validator.go b/internal/host/validator.go new file mode 100644 index 000000000..e7c1bb0ce --- /dev/null +++ b/internal/host/validator.go @@ -0,0 +1,422 @@ +package host + +import ( + "encoding/json" + "fmt" + "net" + "time" + + "github.com/thoas/go-funk" + + "github.com/alecthomas/units" + + "github.com/filanov/bm-inventory/internal/network" + + "github.com/filanov/bm-inventory/internal/hardware" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/filanov/bm-inventory/internal/common" + "github.com/jinzhu/gorm" + + "github.com/filanov/bm-inventory/models" +) + +type validationStatus string + +const ( + ValidationSuccess validationStatus = "success" + ValidationFailure validationStatus = "failure" + ValidationPending validationStatus = "pending" + ValidationError validationStatus = "error" +) + +var forbiddenHostnames = []string{ + "localhost", +} + +func (v validationStatus) String() string { + return string(v) +} + +type validationContext struct { + host *models.Host + cluster *common.Cluster + inventory *models.Inventory + db *gorm.DB +} + +type validationConditon func(context *validationContext) validationStatus +type validationStringFormatter func(context *validationContext, status validationStatus) string + +type validation struct { + id validationID + condition validationConditon + formatter validationStringFormatter +} + +func gibToBytes(gib int64) int64 { + return gib * int64(units.GiB) +} + +func bytesToGiB(bytes int64) int64 { + return bytes / int64(units.GiB) +} + +func (c *validationContext) loadCluster() error { + var cluster common.Cluster + err := c.db.Preload("Hosts", "status <> ?", HostStatusDisabled).Take(&cluster, "id = ?", c.host.ClusterID.String()).Error + if err == nil { + c.cluster = &cluster + } + return err +} + +func (c *validationContext) loadInventory() error { + if c.host.Inventory != "" { + var inventory models.Inventory + err := json.Unmarshal([]byte(c.host.Inventory), &inventory) + if err != nil { + return err + } + if inventory.CPU == nil || inventory.Memory == nil || len(inventory.Disks) == 0 { + return errors.Errorf("Inventory is not valid") + } + c.inventory = &inventory + } + return nil +} + +func (c *validationContext) validateRole() error { + switch c.host.Role { + case models.HostRoleMaster, models.HostRoleWorker, "": + return nil + default: + return errors.Errorf("Illegal role defined: %s", c.host.Role) + } +} + +func (c *validationContext) validateMachineCIDR() error { + var err error + if c.cluster.MachineNetworkCidr != "" { + _, _, err = net.ParseCIDR(c.cluster.MachineNetworkCidr) + } + return err +} + +func newValidationContext(host *models.Host, db *gorm.DB) (*validationContext, error) { + ret := &validationContext{ + host: host, + db: db, + } + err := ret.loadCluster() + if err == nil { + err = ret.loadInventory() + } + if err == nil { + err = ret.validateRole() + } + if err == nil { + err = ret.validateMachineCIDR() + } + if err != nil { + return nil, err + } + return ret, nil +} + +func boolValue(b bool) validationStatus { + if b { + return ValidationSuccess + } else { + return ValidationFailure + } +} + +type validator struct { + log logrus.FieldLogger + hwValidatorCfg *hardware.ValidatorCfg +} + +func (v *validator) isConnected(c *validationContext) validationStatus { + return boolValue(c.host.CheckedInAt.String() == "" || time.Since(time.Time(c.host.CheckedInAt)) <= 3*time.Minute) +} + +func (v *validator) printConnected(context *validationContext, status validationStatus) string { + switch status { + case ValidationSuccess: + return "Host is connected" + case ValidationFailure: + return "Host is disconnected" + default: + return fmt.Sprintf("Unexpected status %s", status) + } +} + +func (v *validator) hasInventory(c *validationContext) validationStatus { + return boolValue(c.inventory != nil) +} + +func (v *validator) printHasInventory(context *validationContext, status validationStatus) string { + switch status { + case ValidationSuccess: + return "Valid inventory exists for the host" + case ValidationFailure: + return "Inventory has not been received for the host" + default: + return fmt.Sprintf("Unexpected status %s", status) + } +} + +func (v *validator) hasMinCpuCores(c *validationContext) validationStatus { + if c.inventory == nil { + return ValidationPending + } + return boolValue(c.inventory.CPU.Count >= v.hwValidatorCfg.MinCPUCores) +} + +func (v *validator) printHasMinCpuCores(c *validationContext, status validationStatus) string { + switch status { + case ValidationSuccess: + return "Sufficient CPU cores" + case ValidationFailure: + return fmt.Sprintf("Require at least %d CPU cores, found only %d", v.hwValidatorCfg.MinCPUCores, c.inventory.CPU.Count) + case ValidationPending: + return "Missing inventory" + default: + return fmt.Sprintf("Unexpected status %s", status) + } +} + +func (v *validator) hasMinMemory(c *validationContext) validationStatus { + if c.inventory == nil { + return ValidationPending + } + return boolValue(c.inventory.Memory.PhysicalBytes >= gibToBytes(v.hwValidatorCfg.MinRamGib)) +} + +func (v *validator) printHasMinMemory(c *validationContext, status validationStatus) string { + switch status { + case ValidationSuccess: + return "Sufficient minimum RAM" + case ValidationFailure: + return fmt.Sprintf("Require at least %d GiB RAM, found only %d GiB", v.hwValidatorCfg.MinRamGib, + bytesToGiB(c.inventory.Memory.PhysicalBytes)) + case ValidationPending: + return "Missing inventory" + default: + return fmt.Sprintf("Unexpected status %s", status) + } +} + +func (v *validator) hasMinValidDisks(c *validationContext) validationStatus { + if c.inventory == nil { + return ValidationPending + } + disks := hardware.ListValidDisks(c.inventory, gibToBytes(v.hwValidatorCfg.MinDiskSizeGb)) + return boolValue(len(disks) > 0) +} + +func (v *validator) printHasMinValidDisks(c *validationContext, status validationStatus) string { + switch status { + case ValidationSuccess: + return "Sufficient disk capacity" + case ValidationFailure: + return fmt.Sprintf("Require a disk of at least %d GB", v.hwValidatorCfg.MinDiskSizeGb) + case ValidationPending: + return "Missing inventory" + default: + return fmt.Sprintf("Unexpected status %s", status) + } +} + +func (v *validator) isRoleDefined(c *validationContext) validationStatus { + return boolValue(c.host.Role != "") +} + +func (v *validator) printIsRoleDefined(context *validationContext, status validationStatus) string { + switch status { + case ValidationSuccess: + return "Role is defined" + case ValidationFailure: + return "Role is undefined" + default: + return fmt.Sprintf("Unexpected status %s", status) + } +} + +func (v *validator) isMachineCidrDefined(c *validationContext) validationStatus { + return boolValue(c.cluster.MachineNetworkCidr != "") +} + +func (v *validator) printIsMachineCidrDefined(context *validationContext, status validationStatus) string { + switch status { + case ValidationSuccess: + return "Machine network CIDR is defined" + case ValidationFailure: + return "Machine network CIDR is undefined" + default: + return fmt.Sprintf("Unexpected status %s", status) + } +} + +func (v *validator) hasCpuCoresForRole(c *validationContext) validationStatus { + if c.inventory == nil || c.host.Role == "" { + return ValidationPending + } + switch c.host.Role { + case models.HostRoleMaster: + return boolValue(c.inventory.CPU.Count >= v.hwValidatorCfg.MinCPUCoresMaster) + case models.HostRoleWorker: + return boolValue(c.inventory.CPU.Count >= v.hwValidatorCfg.MinCPUCoresWorker) + default: + v.log.Errorf("Unexpected role %s", c.host.Role) + return ValidationError + } +} + +func (v *validator) getCpuCountForRole(role models.HostRole) int64 { + switch role { + case models.HostRoleMaster: + return v.hwValidatorCfg.MinCPUCoresMaster + case models.HostRoleWorker: + return v.hwValidatorCfg.MinCPUCoresWorker + default: + return v.hwValidatorCfg.MinCPUCores + } +} + +func (v *validator) printHasCpuCoresForRole(c *validationContext, status validationStatus) string { + switch status { + case ValidationSuccess: + return fmt.Sprintf("Sufficient CPU cores for role %s", c.host.Role) + case ValidationFailure: + return fmt.Sprintf("Require at least %d CPU cores for %s role, found only %d", + v.getCpuCountForRole(c.host.Role), c.host.Role, c.inventory.CPU.Count) + case ValidationPending: + return "Missing inventory or role" + default: + return fmt.Sprintf("Unexpected status %s", status) + } +} + +func (v *validator) hasMemoryForRole(c *validationContext) validationStatus { + if c.inventory == nil || c.host.Role == "" { + return ValidationPending + } + switch c.host.Role { + case models.HostRoleMaster: + return boolValue(c.inventory.Memory.PhysicalBytes >= gibToBytes(v.hwValidatorCfg.MinRamGibMaster)) + case models.HostRoleWorker: + return boolValue(c.inventory.Memory.PhysicalBytes >= gibToBytes(v.hwValidatorCfg.MinRamGibWorker)) + default: + v.log.Errorf("Unexpected role %s", c.host.Role) + return ValidationError + } +} + +func (v *validator) getMemoryForRole(role models.HostRole) int64 { + switch role { + case models.HostRoleMaster: + return v.hwValidatorCfg.MinRamGibMaster + case models.HostRoleWorker: + return v.hwValidatorCfg.MinRamGibWorker + default: + return v.hwValidatorCfg.MinRamGib + } +} + +func (v *validator) printHasMemoryForRole(c *validationContext, status validationStatus) string { + switch status { + case ValidationSuccess: + return fmt.Sprintf("Sufficient RAM for role %s", c.host.Role) + case ValidationFailure: + return fmt.Sprintf("Require at least %d GiB RAM role %s, found only %d", + v.getMemoryForRole(c.host.Role), c.host.Role, bytesToGiB(c.inventory.Memory.PhysicalBytes)) + case ValidationPending: + return "Missing inventory or role" + default: + return fmt.Sprintf("Unexpected status %s", status) + } +} + +func (v *validator) belongsToMachineCidr(c *validationContext) validationStatus { + if c.inventory == nil || c.cluster.MachineNetworkCidr == "" { + return ValidationPending + } + return boolValue(network.IsHostInMachineNetCidr(v.log, c.cluster, c.host)) +} + +func (v *validator) printBelongsToMachineCidr(c *validationContext, status validationStatus) string { + switch status { + case ValidationSuccess: + return fmt.Sprintf("Host belongs to machine network CIDR %s", c.cluster.MachineNetworkCidr) + case ValidationFailure: + return fmt.Sprintf("Host does not belong to machine network CIDR %s", c.cluster.MachineNetworkCidr) + case ValidationPending: + return "Missing inventory or machine network CIDR" + default: + return fmt.Sprintf("Unexpected status %s", status) + } +} + +func getRealHostname(host *models.Host, inventory *models.Inventory) string { + if host.RequestedHostname != "" { + return host.RequestedHostname + } + return inventory.Hostname +} + +func (v *validator) isHostnameUnique(c *validationContext) validationStatus { + if c.inventory == nil { + return ValidationPending + } + realHostname := getRealHostname(c.host, c.inventory) + for _, h := range c.cluster.Hosts { + if h.ID.String() != c.host.ID.String() && h.Inventory != "" { + var otherInventory models.Inventory + if err := json.Unmarshal([]byte(h.Inventory), &otherInventory); err != nil { + v.log.WithError(err).Warnf("Illegal inventory for host %s", h.ID.String()) + // It is not our hostname + continue + } + if realHostname == getRealHostname(h, &otherInventory) { + return ValidationFailure + } + } + } + return ValidationSuccess +} + +func (v *validator) printHostnameUnique(c *validationContext, status validationStatus) string { + switch status { + case ValidationSuccess: + return fmt.Sprintf("Hostname %s is unique in cluster", getRealHostname(c.host, c.inventory)) + case ValidationFailure: + return fmt.Sprintf("Hostname %s is not unique in cluster", getRealHostname(c.host, c.inventory)) + case ValidationPending: + return "Missing inventory" + default: + return fmt.Sprintf("Unexpected status %s", status) + } +} + +func (v *validator) isHostnameValid(c *validationContext) validationStatus { + if c.inventory == nil { + return ValidationPending + } + return boolValue(!funk.ContainsString(forbiddenHostnames, getRealHostname(c.host, c.inventory))) +} + +func (v *validator) printHostnameValid(c *validationContext, status validationStatus) string { + switch status { + case ValidationSuccess: + return fmt.Sprintf("Hostname %s is allowed", getRealHostname(c.host, c.inventory)) + case ValidationFailure: + return fmt.Sprintf("Hostname %s is forbidden", getRealHostname(c.host, c.inventory)) + case ValidationPending: + return "Missing inventory" + default: + return fmt.Sprintf("Unexpected status %s", status) + } +} diff --git a/internal/identity/identity.go b/internal/identity/identity.go new file mode 100644 index 000000000..3854e4d37 --- /dev/null +++ b/internal/identity/identity.go @@ -0,0 +1,21 @@ +package identity + +import ( + "context" + "fmt" + + "github.com/filanov/bm-inventory/pkg/auth" +) + +func IsAdmin(ctx context.Context) bool { + return auth.UserRoleFromContext(ctx) == auth.AdminUserRole +} + +func GetUserIDFilter(ctx context.Context) string { + query := "" + if !IsAdmin(ctx) { + user_id := auth.UserIDFromContext(ctx) + query = fmt.Sprintf("user_id = '%s'", user_id) + } + return query +} diff --git a/internal/imgexpirer/imgexpirer.go b/internal/imgexpirer/imgexpirer.go new file mode 100644 index 000000000..d0e7bb228 --- /dev/null +++ b/internal/imgexpirer/imgexpirer.go @@ -0,0 +1,99 @@ +package imgexpirer + +import ( + "context" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/filanov/bm-inventory/internal/events" + "github.com/filanov/bm-inventory/models" + logutil "github.com/filanov/bm-inventory/pkg/log" + "github.com/filanov/bm-inventory/pkg/requestid" + "github.com/sirupsen/logrus" +) + +const imagePrefix = "discovery-image-" +const imagePrefixLen = len(imagePrefix) +const dummyImage = "discovery-image-00000000-0000-0000-0000-000000000000" + +type Manager struct { + log logrus.FieldLogger + s3Client s3iface.S3API + s3Bucket string + deleteTime time.Duration + eventsHandler events.Handler +} + +func NewManager(log logrus.FieldLogger, s3Client s3iface.S3API, s3Bucket string, deleteTime time.Duration, eventsHandler events.Handler) *Manager { + return &Manager{ + log: log, + s3Client: s3Client, + s3Bucket: s3Bucket, + deleteTime: deleteTime, + eventsHandler: eventsHandler, + } +} + +func (m *Manager) ExpirationTask() { + ctx := requestid.ToContext(context.Background(), requestid.NewID()) + log := logutil.FromContext(ctx, m.log) + now := time.Now() + prefix := imagePrefix + + log.Info("Image expiration monitor woke up, checking for expired images...") + err := m.s3Client.ListObjectsPages(&s3.ListObjectsInput{Bucket: &m.s3Bucket, Prefix: &prefix}, + func(page *s3.ListObjectsOutput, lastPage bool) bool { + for _, object := range page.Contents { + m.handleObject(ctx, log, object, now) + } + return !lastPage + }) + if err != nil { + log.WithError(err).Error("Error listing objects") + return + } +} + +func (m *Manager) handleObject(ctx context.Context, log logrus.FieldLogger, object *s3.Object, now time.Time) { + // Delete dummy objects right away, they just take up space + if *object.Key == dummyImage { + m.deleteObject(ctx, log, object) + return + } + + // The timestamp that we really want is stored in a tag, but we check this one first as a cost optimization + if now.Before(object.LastModified.Add(m.deleteTime)) { + return + } + objectTags, err := m.s3Client.GetObjectTagging(&s3.GetObjectTaggingInput{Bucket: &m.s3Bucket, Key: object.Key}) + if err != nil { + log.WithError(err).Errorf("Error getting tags for object %s", *object.Key) + return + } + for _, tag := range objectTags.TagSet { + if *tag.Key == "create_sec_since_epoch" { + objTime, _ := strconv.ParseInt(*tag.Value, 10, 64) + if now.After(time.Unix(objTime, 0).Add(m.deleteTime)) { + m.deleteObject(ctx, log, object) + } + } + } +} + +func (m *Manager) deleteObject(ctx context.Context, log logrus.FieldLogger, object *s3.Object) { + _, err := m.s3Client.DeleteObject(&s3.DeleteObjectInput{Bucket: &m.s3Bucket, Key: object.Key}) + if err != nil { + log.WithError(err).Errorf("Error deleting object %s", *object.Key) + return + } + eventMsg := "Deleted image from backend because it expired. It may be generated again at any time." + m.eventsHandler.AddEvent(ctx, clusterIDFromImageName(*object.Key), models.EventSeverityInfo, eventMsg, time.Now()) + log.Infof("Deleted expired image %s", *object.Key) +} + +func clusterIDFromImageName(imgName string) string { + //Image name format is "discovery-image-" + return imgName[imagePrefixLen:] +} diff --git a/internal/imgexpirer/imgexpirer_test.go b/internal/imgexpirer/imgexpirer_test.go new file mode 100644 index 000000000..290330e79 --- /dev/null +++ b/internal/imgexpirer/imgexpirer_test.go @@ -0,0 +1,114 @@ +package imgexpirer + +import ( + "context" + "io/ioutil" + "strconv" + "testing" + "time" + + "github.com/aws/aws-sdk-go/service/s3" + "github.com/filanov/bm-inventory/internal/events" + "github.com/filanov/bm-inventory/models" + "github.com/golang/mock/gomock" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/sirupsen/logrus" +) + +//go:generate mockgen -package imgexpirer -destination mock_s3iface.go github.com/aws/aws-sdk-go/service/s3/s3iface S3API + +func TestExpirer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Image expirer tests Suite") +} + +var _ = Describe("image_expirer", func() { + var ( + ctx = context.Background() + log = logrus.New() + ctrl *gomock.Controller + deleteTime time.Duration + mockAPI *MockS3API + mockEvents *events.MockHandler + bucket string + mgr *Manager + now time.Time + objKey = "discovery-image-d183c403-d27b-42e1-b0a4-1274ea1a5d77" + clusterId = "d183c403-d27b-42e1-b0a4-1274ea1a5d77" + tagKey = "create_sec_since_epoch" + ) + BeforeEach(func() { + ctrl = gomock.NewController(GinkgoT()) + log.SetOutput(ioutil.Discard) + bucket = "test" + mockAPI = NewMockS3API(ctrl) + mockEvents = events.NewMockHandler(ctrl) + deleteTime, _ = time.ParseDuration("60m") + mgr = NewManager(log, mockAPI, bucket, deleteTime, mockEvents) + now, _ = time.Parse(time.RFC3339, "2020-01-01T10:00:00+00:00") + }) + It("not_expired_image_not_reused", func() { + imgCreatedAt, _ := time.Parse(time.RFC3339, "2020-01-01T09:30:00+00:00") // 30 minutes ago + obj := s3.Object{Key: &objKey, LastModified: &imgCreatedAt} + mgr.handleObject(ctx, log, &obj, now) + }) + It("expired_image_not_reused", func() { + imgCreatedAt, _ := time.Parse(time.RFC3339, "2020-01-01T08:00:00+00:00") // Two hours ago + unixTime := imgCreatedAt.Unix() // Tag is also two hours ago + obj := s3.Object{Key: &objKey, LastModified: &imgCreatedAt} + taggingInput := s3.GetObjectTaggingInput{Bucket: &bucket, Key: &objKey} + tagValue := strconv.Itoa(int(unixTime)) + tag := s3.Tag{Key: &tagKey, Value: &tagValue} + tagSet := []*s3.Tag{&tag} + taggingOutput := s3.GetObjectTaggingOutput{TagSet: tagSet} + mockAPI.EXPECT().GetObjectTagging(&taggingInput).Return(&taggingOutput, nil) + deleteInput := s3.DeleteObjectInput{Bucket: &bucket, Key: &objKey} + mockAPI.EXPECT().DeleteObject(&deleteInput).Return(nil, nil) + mockEvents.EXPECT().AddEvent(gomock.Any(), clusterId, models.EventSeverityInfo, "Deleted image from backend because it expired. It may be generated again at any time.", gomock.Any()) + mgr.handleObject(ctx, log, &obj, now) + }) + It("not_expired_image_reused", func() { + imgCreatedAt, _ := time.Parse(time.RFC3339, "2020-01-01T08:00:00+00:00") // Two hours ago + durationToAdd, _ := time.ParseDuration("90m") + unixTime := imgCreatedAt.Add(durationToAdd).Unix() // Tag is now half an hour ago + obj := s3.Object{Key: &objKey, LastModified: &imgCreatedAt} + taggingInput := s3.GetObjectTaggingInput{Bucket: &bucket, Key: &objKey} + tagValue := strconv.Itoa(int(unixTime)) + tag := s3.Tag{Key: &tagKey, Value: &tagValue} + tagSet := []*s3.Tag{&tag} + taggingOutput := s3.GetObjectTaggingOutput{TagSet: tagSet} + mockAPI.EXPECT().GetObjectTagging(&taggingInput).Return(&taggingOutput, nil) + mgr.handleObject(ctx, log, &obj, now) + }) + It("expired_image_reused", func() { + imgCreatedAt, _ := time.Parse(time.RFC3339, "2020-01-01T07:00:00+00:00") // Three hours ago + durationToAdd, _ := time.ParseDuration("90m") + unixTime := imgCreatedAt.Add(durationToAdd).Unix() // Tag is now 1.5 hours ago + obj := s3.Object{Key: &objKey, LastModified: &imgCreatedAt} + taggingInput := s3.GetObjectTaggingInput{Bucket: &bucket, Key: &objKey} + tagValue := strconv.Itoa(int(unixTime)) + tag := s3.Tag{Key: &tagKey, Value: &tagValue} + tagSet := []*s3.Tag{&tag} + taggingOutput := s3.GetObjectTaggingOutput{TagSet: tagSet} + mockAPI.EXPECT().GetObjectTagging(&taggingInput).Return(&taggingOutput, nil) + deleteInput := s3.DeleteObjectInput{Bucket: &bucket, Key: &objKey} + mockAPI.EXPECT().DeleteObject(&deleteInput).Return(nil, nil) + mockEvents.EXPECT().AddEvent(gomock.Any(), clusterId, models.EventSeverityInfo, "Deleted image from backend because it expired. It may be generated again at any time.", gomock.Any()) + mgr.handleObject(ctx, log, &obj, now) + }) + It("dummy_image_expires_immediately", func() { + clusterId = "00000000-0000-0000-0000-000000000000" + objKey = "discovery-image-00000000-0000-0000-0000-000000000000" + imgCreatedAt, _ := time.Parse(time.RFC3339, "2020-01-01T08:00:00+00:00") // Two hours ago + obj := s3.Object{Key: &objKey, LastModified: &imgCreatedAt} + deleteInput := s3.DeleteObjectInput{Bucket: &bucket, Key: &objKey} + mockAPI.EXPECT().DeleteObject(&deleteInput).Return(nil, nil) + mockEvents.EXPECT().AddEvent(gomock.Any(), clusterId, models.EventSeverityInfo, "Deleted image from backend because it expired. It may be generated again at any time.", gomock.Any()) + mgr.handleObject(ctx, log, &obj, now) + }) + + AfterEach(func() { + ctrl.Finish() + }) +}) diff --git a/internal/imgexpirer/mock_s3iface.go b/internal/imgexpirer/mock_s3iface.go new file mode 100644 index 000000000..a3eaa4750 --- /dev/null +++ b/internal/imgexpirer/mock_s3iface.go @@ -0,0 +1,4734 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/aws/aws-sdk-go/service/s3/s3iface (interfaces: S3API) + +// Package imgexpirer is a generated GoMock package. +package imgexpirer + +import ( + context "context" + reflect "reflect" + + request "github.com/aws/aws-sdk-go/aws/request" + s3 "github.com/aws/aws-sdk-go/service/s3" + gomock "github.com/golang/mock/gomock" +) + +// MockS3API is a mock of S3API interface +type MockS3API struct { + ctrl *gomock.Controller + recorder *MockS3APIMockRecorder +} + +// MockS3APIMockRecorder is the mock recorder for MockS3API +type MockS3APIMockRecorder struct { + mock *MockS3API +} + +// NewMockS3API creates a new mock instance +func NewMockS3API(ctrl *gomock.Controller) *MockS3API { + mock := &MockS3API{ctrl: ctrl} + mock.recorder = &MockS3APIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockS3API) EXPECT() *MockS3APIMockRecorder { + return m.recorder +} + +// AbortMultipartUpload mocks base method +func (m *MockS3API) AbortMultipartUpload(arg0 *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AbortMultipartUpload", arg0) + ret0, _ := ret[0].(*s3.AbortMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AbortMultipartUpload indicates an expected call of AbortMultipartUpload +func (mr *MockS3APIMockRecorder) AbortMultipartUpload(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUpload", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUpload), arg0) +} + +// AbortMultipartUploadRequest mocks base method +func (m *MockS3API) AbortMultipartUploadRequest(arg0 *s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AbortMultipartUploadRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.AbortMultipartUploadOutput) + return ret0, ret1 +} + +// AbortMultipartUploadRequest indicates an expected call of AbortMultipartUploadRequest +func (mr *MockS3APIMockRecorder) AbortMultipartUploadRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUploadRequest", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUploadRequest), arg0) +} + +// AbortMultipartUploadWithContext mocks base method +func (m *MockS3API) AbortMultipartUploadWithContext(arg0 context.Context, arg1 *s3.AbortMultipartUploadInput, arg2 ...request.Option) (*s3.AbortMultipartUploadOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "AbortMultipartUploadWithContext", varargs...) + ret0, _ := ret[0].(*s3.AbortMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AbortMultipartUploadWithContext indicates an expected call of AbortMultipartUploadWithContext +func (mr *MockS3APIMockRecorder) AbortMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUploadWithContext), varargs...) +} + +// CompleteMultipartUpload mocks base method +func (m *MockS3API) CompleteMultipartUpload(arg0 *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompleteMultipartUpload", arg0) + ret0, _ := ret[0].(*s3.CompleteMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CompleteMultipartUpload indicates an expected call of CompleteMultipartUpload +func (mr *MockS3APIMockRecorder) CompleteMultipartUpload(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUpload", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUpload), arg0) +} + +// CompleteMultipartUploadRequest mocks base method +func (m *MockS3API) CompleteMultipartUploadRequest(arg0 *s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompleteMultipartUploadRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.CompleteMultipartUploadOutput) + return ret0, ret1 +} + +// CompleteMultipartUploadRequest indicates an expected call of CompleteMultipartUploadRequest +func (mr *MockS3APIMockRecorder) CompleteMultipartUploadRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUploadRequest", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUploadRequest), arg0) +} + +// CompleteMultipartUploadWithContext mocks base method +func (m *MockS3API) CompleteMultipartUploadWithContext(arg0 context.Context, arg1 *s3.CompleteMultipartUploadInput, arg2 ...request.Option) (*s3.CompleteMultipartUploadOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CompleteMultipartUploadWithContext", varargs...) + ret0, _ := ret[0].(*s3.CompleteMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CompleteMultipartUploadWithContext indicates an expected call of CompleteMultipartUploadWithContext +func (mr *MockS3APIMockRecorder) CompleteMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUploadWithContext), varargs...) +} + +// CopyObject mocks base method +func (m *MockS3API) CopyObject(arg0 *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CopyObject", arg0) + ret0, _ := ret[0].(*s3.CopyObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CopyObject indicates an expected call of CopyObject +func (mr *MockS3APIMockRecorder) CopyObject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObject", reflect.TypeOf((*MockS3API)(nil).CopyObject), arg0) +} + +// CopyObjectRequest mocks base method +func (m *MockS3API) CopyObjectRequest(arg0 *s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CopyObjectRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.CopyObjectOutput) + return ret0, ret1 +} + +// CopyObjectRequest indicates an expected call of CopyObjectRequest +func (mr *MockS3APIMockRecorder) CopyObjectRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObjectRequest", reflect.TypeOf((*MockS3API)(nil).CopyObjectRequest), arg0) +} + +// CopyObjectWithContext mocks base method +func (m *MockS3API) CopyObjectWithContext(arg0 context.Context, arg1 *s3.CopyObjectInput, arg2 ...request.Option) (*s3.CopyObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CopyObjectWithContext", varargs...) + ret0, _ := ret[0].(*s3.CopyObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CopyObjectWithContext indicates an expected call of CopyObjectWithContext +func (mr *MockS3APIMockRecorder) CopyObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObjectWithContext", reflect.TypeOf((*MockS3API)(nil).CopyObjectWithContext), varargs...) +} + +// CreateBucket mocks base method +func (m *MockS3API) CreateBucket(arg0 *s3.CreateBucketInput) (*s3.CreateBucketOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateBucket", arg0) + ret0, _ := ret[0].(*s3.CreateBucketOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateBucket indicates an expected call of CreateBucket +func (mr *MockS3APIMockRecorder) CreateBucket(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucket", reflect.TypeOf((*MockS3API)(nil).CreateBucket), arg0) +} + +// CreateBucketRequest mocks base method +func (m *MockS3API) CreateBucketRequest(arg0 *s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateBucketRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.CreateBucketOutput) + return ret0, ret1 +} + +// CreateBucketRequest indicates an expected call of CreateBucketRequest +func (mr *MockS3APIMockRecorder) CreateBucketRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucketRequest", reflect.TypeOf((*MockS3API)(nil).CreateBucketRequest), arg0) +} + +// CreateBucketWithContext mocks base method +func (m *MockS3API) CreateBucketWithContext(arg0 context.Context, arg1 *s3.CreateBucketInput, arg2 ...request.Option) (*s3.CreateBucketOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateBucketWithContext", varargs...) + ret0, _ := ret[0].(*s3.CreateBucketOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateBucketWithContext indicates an expected call of CreateBucketWithContext +func (mr *MockS3APIMockRecorder) CreateBucketWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucketWithContext", reflect.TypeOf((*MockS3API)(nil).CreateBucketWithContext), varargs...) +} + +// CreateMultipartUpload mocks base method +func (m *MockS3API) CreateMultipartUpload(arg0 *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateMultipartUpload", arg0) + ret0, _ := ret[0].(*s3.CreateMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateMultipartUpload indicates an expected call of CreateMultipartUpload +func (mr *MockS3APIMockRecorder) CreateMultipartUpload(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUpload", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUpload), arg0) +} + +// CreateMultipartUploadRequest mocks base method +func (m *MockS3API) CreateMultipartUploadRequest(arg0 *s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateMultipartUploadRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.CreateMultipartUploadOutput) + return ret0, ret1 +} + +// CreateMultipartUploadRequest indicates an expected call of CreateMultipartUploadRequest +func (mr *MockS3APIMockRecorder) CreateMultipartUploadRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUploadRequest", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUploadRequest), arg0) +} + +// CreateMultipartUploadWithContext mocks base method +func (m *MockS3API) CreateMultipartUploadWithContext(arg0 context.Context, arg1 *s3.CreateMultipartUploadInput, arg2 ...request.Option) (*s3.CreateMultipartUploadOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateMultipartUploadWithContext", varargs...) + ret0, _ := ret[0].(*s3.CreateMultipartUploadOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateMultipartUploadWithContext indicates an expected call of CreateMultipartUploadWithContext +func (mr *MockS3APIMockRecorder) CreateMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUploadWithContext), varargs...) +} + +// DeleteBucket mocks base method +func (m *MockS3API) DeleteBucket(arg0 *s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucket", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucket indicates an expected call of DeleteBucket +func (mr *MockS3APIMockRecorder) DeleteBucket(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucket", reflect.TypeOf((*MockS3API)(nil).DeleteBucket), arg0) +} + +// DeleteBucketAnalyticsConfiguration mocks base method +func (m *MockS3API) DeleteBucketAnalyticsConfiguration(arg0 *s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketAnalyticsConfiguration", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketAnalyticsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketAnalyticsConfiguration indicates an expected call of DeleteBucketAnalyticsConfiguration +func (mr *MockS3APIMockRecorder) DeleteBucketAnalyticsConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketAnalyticsConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketAnalyticsConfiguration), arg0) +} + +// DeleteBucketAnalyticsConfigurationRequest mocks base method +func (m *MockS3API) DeleteBucketAnalyticsConfigurationRequest(arg0 *s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketAnalyticsConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketAnalyticsConfigurationOutput) + return ret0, ret1 +} + +// DeleteBucketAnalyticsConfigurationRequest indicates an expected call of DeleteBucketAnalyticsConfigurationRequest +func (mr *MockS3APIMockRecorder) DeleteBucketAnalyticsConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketAnalyticsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketAnalyticsConfigurationRequest), arg0) +} + +// DeleteBucketAnalyticsConfigurationWithContext mocks base method +func (m *MockS3API) DeleteBucketAnalyticsConfigurationWithContext(arg0 context.Context, arg1 *s3.DeleteBucketAnalyticsConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketAnalyticsConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketAnalyticsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketAnalyticsConfigurationWithContext indicates an expected call of DeleteBucketAnalyticsConfigurationWithContext +func (mr *MockS3APIMockRecorder) DeleteBucketAnalyticsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketAnalyticsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketAnalyticsConfigurationWithContext), varargs...) +} + +// DeleteBucketCors mocks base method +func (m *MockS3API) DeleteBucketCors(arg0 *s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketCors", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketCorsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketCors indicates an expected call of DeleteBucketCors +func (mr *MockS3APIMockRecorder) DeleteBucketCors(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketCors", reflect.TypeOf((*MockS3API)(nil).DeleteBucketCors), arg0) +} + +// DeleteBucketCorsRequest mocks base method +func (m *MockS3API) DeleteBucketCorsRequest(arg0 *s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketCorsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketCorsOutput) + return ret0, ret1 +} + +// DeleteBucketCorsRequest indicates an expected call of DeleteBucketCorsRequest +func (mr *MockS3APIMockRecorder) DeleteBucketCorsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketCorsRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketCorsRequest), arg0) +} + +// DeleteBucketCorsWithContext mocks base method +func (m *MockS3API) DeleteBucketCorsWithContext(arg0 context.Context, arg1 *s3.DeleteBucketCorsInput, arg2 ...request.Option) (*s3.DeleteBucketCorsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketCorsWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketCorsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketCorsWithContext indicates an expected call of DeleteBucketCorsWithContext +func (mr *MockS3APIMockRecorder) DeleteBucketCorsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketCorsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketCorsWithContext), varargs...) +} + +// DeleteBucketEncryption mocks base method +func (m *MockS3API) DeleteBucketEncryption(arg0 *s3.DeleteBucketEncryptionInput) (*s3.DeleteBucketEncryptionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketEncryption", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketEncryptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketEncryption indicates an expected call of DeleteBucketEncryption +func (mr *MockS3APIMockRecorder) DeleteBucketEncryption(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketEncryption", reflect.TypeOf((*MockS3API)(nil).DeleteBucketEncryption), arg0) +} + +// DeleteBucketEncryptionRequest mocks base method +func (m *MockS3API) DeleteBucketEncryptionRequest(arg0 *s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketEncryptionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketEncryptionOutput) + return ret0, ret1 +} + +// DeleteBucketEncryptionRequest indicates an expected call of DeleteBucketEncryptionRequest +func (mr *MockS3APIMockRecorder) DeleteBucketEncryptionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketEncryptionRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketEncryptionRequest), arg0) +} + +// DeleteBucketEncryptionWithContext mocks base method +func (m *MockS3API) DeleteBucketEncryptionWithContext(arg0 context.Context, arg1 *s3.DeleteBucketEncryptionInput, arg2 ...request.Option) (*s3.DeleteBucketEncryptionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketEncryptionWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketEncryptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketEncryptionWithContext indicates an expected call of DeleteBucketEncryptionWithContext +func (mr *MockS3APIMockRecorder) DeleteBucketEncryptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketEncryptionWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketEncryptionWithContext), varargs...) +} + +// DeleteBucketInventoryConfiguration mocks base method +func (m *MockS3API) DeleteBucketInventoryConfiguration(arg0 *s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketInventoryConfiguration", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketInventoryConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketInventoryConfiguration indicates an expected call of DeleteBucketInventoryConfiguration +func (mr *MockS3APIMockRecorder) DeleteBucketInventoryConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketInventoryConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketInventoryConfiguration), arg0) +} + +// DeleteBucketInventoryConfigurationRequest mocks base method +func (m *MockS3API) DeleteBucketInventoryConfigurationRequest(arg0 *s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketInventoryConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketInventoryConfigurationOutput) + return ret0, ret1 +} + +// DeleteBucketInventoryConfigurationRequest indicates an expected call of DeleteBucketInventoryConfigurationRequest +func (mr *MockS3APIMockRecorder) DeleteBucketInventoryConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketInventoryConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketInventoryConfigurationRequest), arg0) +} + +// DeleteBucketInventoryConfigurationWithContext mocks base method +func (m *MockS3API) DeleteBucketInventoryConfigurationWithContext(arg0 context.Context, arg1 *s3.DeleteBucketInventoryConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketInventoryConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketInventoryConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketInventoryConfigurationWithContext indicates an expected call of DeleteBucketInventoryConfigurationWithContext +func (mr *MockS3APIMockRecorder) DeleteBucketInventoryConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketInventoryConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketInventoryConfigurationWithContext), varargs...) +} + +// DeleteBucketLifecycle mocks base method +func (m *MockS3API) DeleteBucketLifecycle(arg0 *s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketLifecycle", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketLifecycleOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketLifecycle indicates an expected call of DeleteBucketLifecycle +func (mr *MockS3APIMockRecorder) DeleteBucketLifecycle(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketLifecycle", reflect.TypeOf((*MockS3API)(nil).DeleteBucketLifecycle), arg0) +} + +// DeleteBucketLifecycleRequest mocks base method +func (m *MockS3API) DeleteBucketLifecycleRequest(arg0 *s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketLifecycleRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketLifecycleOutput) + return ret0, ret1 +} + +// DeleteBucketLifecycleRequest indicates an expected call of DeleteBucketLifecycleRequest +func (mr *MockS3APIMockRecorder) DeleteBucketLifecycleRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketLifecycleRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketLifecycleRequest), arg0) +} + +// DeleteBucketLifecycleWithContext mocks base method +func (m *MockS3API) DeleteBucketLifecycleWithContext(arg0 context.Context, arg1 *s3.DeleteBucketLifecycleInput, arg2 ...request.Option) (*s3.DeleteBucketLifecycleOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketLifecycleWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketLifecycleOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketLifecycleWithContext indicates an expected call of DeleteBucketLifecycleWithContext +func (mr *MockS3APIMockRecorder) DeleteBucketLifecycleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketLifecycleWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketLifecycleWithContext), varargs...) +} + +// DeleteBucketMetricsConfiguration mocks base method +func (m *MockS3API) DeleteBucketMetricsConfiguration(arg0 *s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketMetricsConfiguration", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketMetricsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketMetricsConfiguration indicates an expected call of DeleteBucketMetricsConfiguration +func (mr *MockS3APIMockRecorder) DeleteBucketMetricsConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketMetricsConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketMetricsConfiguration), arg0) +} + +// DeleteBucketMetricsConfigurationRequest mocks base method +func (m *MockS3API) DeleteBucketMetricsConfigurationRequest(arg0 *s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketMetricsConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketMetricsConfigurationOutput) + return ret0, ret1 +} + +// DeleteBucketMetricsConfigurationRequest indicates an expected call of DeleteBucketMetricsConfigurationRequest +func (mr *MockS3APIMockRecorder) DeleteBucketMetricsConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketMetricsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketMetricsConfigurationRequest), arg0) +} + +// DeleteBucketMetricsConfigurationWithContext mocks base method +func (m *MockS3API) DeleteBucketMetricsConfigurationWithContext(arg0 context.Context, arg1 *s3.DeleteBucketMetricsConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketMetricsConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketMetricsConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketMetricsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketMetricsConfigurationWithContext indicates an expected call of DeleteBucketMetricsConfigurationWithContext +func (mr *MockS3APIMockRecorder) DeleteBucketMetricsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketMetricsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketMetricsConfigurationWithContext), varargs...) +} + +// DeleteBucketPolicy mocks base method +func (m *MockS3API) DeleteBucketPolicy(arg0 *s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketPolicy", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketPolicy indicates an expected call of DeleteBucketPolicy +func (mr *MockS3APIMockRecorder) DeleteBucketPolicy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketPolicy", reflect.TypeOf((*MockS3API)(nil).DeleteBucketPolicy), arg0) +} + +// DeleteBucketPolicyRequest mocks base method +func (m *MockS3API) DeleteBucketPolicyRequest(arg0 *s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketPolicyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketPolicyOutput) + return ret0, ret1 +} + +// DeleteBucketPolicyRequest indicates an expected call of DeleteBucketPolicyRequest +func (mr *MockS3APIMockRecorder) DeleteBucketPolicyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketPolicyRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketPolicyRequest), arg0) +} + +// DeleteBucketPolicyWithContext mocks base method +func (m *MockS3API) DeleteBucketPolicyWithContext(arg0 context.Context, arg1 *s3.DeleteBucketPolicyInput, arg2 ...request.Option) (*s3.DeleteBucketPolicyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketPolicyWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketPolicyWithContext indicates an expected call of DeleteBucketPolicyWithContext +func (mr *MockS3APIMockRecorder) DeleteBucketPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketPolicyWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketPolicyWithContext), varargs...) +} + +// DeleteBucketReplication mocks base method +func (m *MockS3API) DeleteBucketReplication(arg0 *s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketReplication", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketReplicationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketReplication indicates an expected call of DeleteBucketReplication +func (mr *MockS3APIMockRecorder) DeleteBucketReplication(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketReplication", reflect.TypeOf((*MockS3API)(nil).DeleteBucketReplication), arg0) +} + +// DeleteBucketReplicationRequest mocks base method +func (m *MockS3API) DeleteBucketReplicationRequest(arg0 *s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketReplicationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketReplicationOutput) + return ret0, ret1 +} + +// DeleteBucketReplicationRequest indicates an expected call of DeleteBucketReplicationRequest +func (mr *MockS3APIMockRecorder) DeleteBucketReplicationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketReplicationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketReplicationRequest), arg0) +} + +// DeleteBucketReplicationWithContext mocks base method +func (m *MockS3API) DeleteBucketReplicationWithContext(arg0 context.Context, arg1 *s3.DeleteBucketReplicationInput, arg2 ...request.Option) (*s3.DeleteBucketReplicationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketReplicationWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketReplicationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketReplicationWithContext indicates an expected call of DeleteBucketReplicationWithContext +func (mr *MockS3APIMockRecorder) DeleteBucketReplicationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketReplicationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketReplicationWithContext), varargs...) +} + +// DeleteBucketRequest mocks base method +func (m *MockS3API) DeleteBucketRequest(arg0 *s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketOutput) + return ret0, ret1 +} + +// DeleteBucketRequest indicates an expected call of DeleteBucketRequest +func (mr *MockS3APIMockRecorder) DeleteBucketRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketRequest), arg0) +} + +// DeleteBucketTagging mocks base method +func (m *MockS3API) DeleteBucketTagging(arg0 *s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketTagging", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketTagging indicates an expected call of DeleteBucketTagging +func (mr *MockS3APIMockRecorder) DeleteBucketTagging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketTagging", reflect.TypeOf((*MockS3API)(nil).DeleteBucketTagging), arg0) +} + +// DeleteBucketTaggingRequest mocks base method +func (m *MockS3API) DeleteBucketTaggingRequest(arg0 *s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketTaggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketTaggingOutput) + return ret0, ret1 +} + +// DeleteBucketTaggingRequest indicates an expected call of DeleteBucketTaggingRequest +func (mr *MockS3APIMockRecorder) DeleteBucketTaggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketTaggingRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketTaggingRequest), arg0) +} + +// DeleteBucketTaggingWithContext mocks base method +func (m *MockS3API) DeleteBucketTaggingWithContext(arg0 context.Context, arg1 *s3.DeleteBucketTaggingInput, arg2 ...request.Option) (*s3.DeleteBucketTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketTaggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketTaggingWithContext indicates an expected call of DeleteBucketTaggingWithContext +func (mr *MockS3APIMockRecorder) DeleteBucketTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketTaggingWithContext), varargs...) +} + +// DeleteBucketWebsite mocks base method +func (m *MockS3API) DeleteBucketWebsite(arg0 *s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketWebsite", arg0) + ret0, _ := ret[0].(*s3.DeleteBucketWebsiteOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketWebsite indicates an expected call of DeleteBucketWebsite +func (mr *MockS3APIMockRecorder) DeleteBucketWebsite(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWebsite", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWebsite), arg0) +} + +// DeleteBucketWebsiteRequest mocks base method +func (m *MockS3API) DeleteBucketWebsiteRequest(arg0 *s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBucketWebsiteRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteBucketWebsiteOutput) + return ret0, ret1 +} + +// DeleteBucketWebsiteRequest indicates an expected call of DeleteBucketWebsiteRequest +func (mr *MockS3APIMockRecorder) DeleteBucketWebsiteRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWebsiteRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWebsiteRequest), arg0) +} + +// DeleteBucketWebsiteWithContext mocks base method +func (m *MockS3API) DeleteBucketWebsiteWithContext(arg0 context.Context, arg1 *s3.DeleteBucketWebsiteInput, arg2 ...request.Option) (*s3.DeleteBucketWebsiteOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketWebsiteWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketWebsiteOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketWebsiteWithContext indicates an expected call of DeleteBucketWebsiteWithContext +func (mr *MockS3APIMockRecorder) DeleteBucketWebsiteWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWebsiteWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWebsiteWithContext), varargs...) +} + +// DeleteBucketWithContext mocks base method +func (m *MockS3API) DeleteBucketWithContext(arg0 context.Context, arg1 *s3.DeleteBucketInput, arg2 ...request.Option) (*s3.DeleteBucketOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteBucketWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteBucketOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteBucketWithContext indicates an expected call of DeleteBucketWithContext +func (mr *MockS3APIMockRecorder) DeleteBucketWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWithContext), varargs...) +} + +// DeleteObject mocks base method +func (m *MockS3API) DeleteObject(arg0 *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObject", arg0) + ret0, _ := ret[0].(*s3.DeleteObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObject indicates an expected call of DeleteObject +func (mr *MockS3APIMockRecorder) DeleteObject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObject", reflect.TypeOf((*MockS3API)(nil).DeleteObject), arg0) +} + +// DeleteObjectRequest mocks base method +func (m *MockS3API) DeleteObjectRequest(arg0 *s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObjectRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteObjectOutput) + return ret0, ret1 +} + +// DeleteObjectRequest indicates an expected call of DeleteObjectRequest +func (mr *MockS3APIMockRecorder) DeleteObjectRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectRequest", reflect.TypeOf((*MockS3API)(nil).DeleteObjectRequest), arg0) +} + +// DeleteObjectTagging mocks base method +func (m *MockS3API) DeleteObjectTagging(arg0 *s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObjectTagging", arg0) + ret0, _ := ret[0].(*s3.DeleteObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjectTagging indicates an expected call of DeleteObjectTagging +func (mr *MockS3APIMockRecorder) DeleteObjectTagging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTagging", reflect.TypeOf((*MockS3API)(nil).DeleteObjectTagging), arg0) +} + +// DeleteObjectTaggingRequest mocks base method +func (m *MockS3API) DeleteObjectTaggingRequest(arg0 *s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObjectTaggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteObjectTaggingOutput) + return ret0, ret1 +} + +// DeleteObjectTaggingRequest indicates an expected call of DeleteObjectTaggingRequest +func (mr *MockS3APIMockRecorder) DeleteObjectTaggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTaggingRequest", reflect.TypeOf((*MockS3API)(nil).DeleteObjectTaggingRequest), arg0) +} + +// DeleteObjectTaggingWithContext mocks base method +func (m *MockS3API) DeleteObjectTaggingWithContext(arg0 context.Context, arg1 *s3.DeleteObjectTaggingInput, arg2 ...request.Option) (*s3.DeleteObjectTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteObjectTaggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjectTaggingWithContext indicates an expected call of DeleteObjectTaggingWithContext +func (mr *MockS3APIMockRecorder) DeleteObjectTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectTaggingWithContext), varargs...) +} + +// DeleteObjectWithContext mocks base method +func (m *MockS3API) DeleteObjectWithContext(arg0 context.Context, arg1 *s3.DeleteObjectInput, arg2 ...request.Option) (*s3.DeleteObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteObjectWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjectWithContext indicates an expected call of DeleteObjectWithContext +func (mr *MockS3APIMockRecorder) DeleteObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectWithContext), varargs...) +} + +// DeleteObjects mocks base method +func (m *MockS3API) DeleteObjects(arg0 *s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObjects", arg0) + ret0, _ := ret[0].(*s3.DeleteObjectsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjects indicates an expected call of DeleteObjects +func (mr *MockS3APIMockRecorder) DeleteObjects(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjects", reflect.TypeOf((*MockS3API)(nil).DeleteObjects), arg0) +} + +// DeleteObjectsRequest mocks base method +func (m *MockS3API) DeleteObjectsRequest(arg0 *s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteObjectsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeleteObjectsOutput) + return ret0, ret1 +} + +// DeleteObjectsRequest indicates an expected call of DeleteObjectsRequest +func (mr *MockS3APIMockRecorder) DeleteObjectsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsRequest", reflect.TypeOf((*MockS3API)(nil).DeleteObjectsRequest), arg0) +} + +// DeleteObjectsWithContext mocks base method +func (m *MockS3API) DeleteObjectsWithContext(arg0 context.Context, arg1 *s3.DeleteObjectsInput, arg2 ...request.Option) (*s3.DeleteObjectsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteObjectsWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeleteObjectsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteObjectsWithContext indicates an expected call of DeleteObjectsWithContext +func (mr *MockS3APIMockRecorder) DeleteObjectsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectsWithContext), varargs...) +} + +// DeletePublicAccessBlock mocks base method +func (m *MockS3API) DeletePublicAccessBlock(arg0 *s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePublicAccessBlock", arg0) + ret0, _ := ret[0].(*s3.DeletePublicAccessBlockOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeletePublicAccessBlock indicates an expected call of DeletePublicAccessBlock +func (mr *MockS3APIMockRecorder) DeletePublicAccessBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePublicAccessBlock", reflect.TypeOf((*MockS3API)(nil).DeletePublicAccessBlock), arg0) +} + +// DeletePublicAccessBlockRequest mocks base method +func (m *MockS3API) DeletePublicAccessBlockRequest(arg0 *s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePublicAccessBlockRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.DeletePublicAccessBlockOutput) + return ret0, ret1 +} + +// DeletePublicAccessBlockRequest indicates an expected call of DeletePublicAccessBlockRequest +func (mr *MockS3APIMockRecorder) DeletePublicAccessBlockRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePublicAccessBlockRequest", reflect.TypeOf((*MockS3API)(nil).DeletePublicAccessBlockRequest), arg0) +} + +// DeletePublicAccessBlockWithContext mocks base method +func (m *MockS3API) DeletePublicAccessBlockWithContext(arg0 context.Context, arg1 *s3.DeletePublicAccessBlockInput, arg2 ...request.Option) (*s3.DeletePublicAccessBlockOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeletePublicAccessBlockWithContext", varargs...) + ret0, _ := ret[0].(*s3.DeletePublicAccessBlockOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeletePublicAccessBlockWithContext indicates an expected call of DeletePublicAccessBlockWithContext +func (mr *MockS3APIMockRecorder) DeletePublicAccessBlockWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePublicAccessBlockWithContext", reflect.TypeOf((*MockS3API)(nil).DeletePublicAccessBlockWithContext), varargs...) +} + +// GetBucketAccelerateConfiguration mocks base method +func (m *MockS3API) GetBucketAccelerateConfiguration(arg0 *s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketAccelerateConfiguration", arg0) + ret0, _ := ret[0].(*s3.GetBucketAccelerateConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketAccelerateConfiguration indicates an expected call of GetBucketAccelerateConfiguration +func (mr *MockS3APIMockRecorder) GetBucketAccelerateConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAccelerateConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketAccelerateConfiguration), arg0) +} + +// GetBucketAccelerateConfigurationRequest mocks base method +func (m *MockS3API) GetBucketAccelerateConfigurationRequest(arg0 *s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketAccelerateConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketAccelerateConfigurationOutput) + return ret0, ret1 +} + +// GetBucketAccelerateConfigurationRequest indicates an expected call of GetBucketAccelerateConfigurationRequest +func (mr *MockS3APIMockRecorder) GetBucketAccelerateConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAccelerateConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketAccelerateConfigurationRequest), arg0) +} + +// GetBucketAccelerateConfigurationWithContext mocks base method +func (m *MockS3API) GetBucketAccelerateConfigurationWithContext(arg0 context.Context, arg1 *s3.GetBucketAccelerateConfigurationInput, arg2 ...request.Option) (*s3.GetBucketAccelerateConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketAccelerateConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketAccelerateConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketAccelerateConfigurationWithContext indicates an expected call of GetBucketAccelerateConfigurationWithContext +func (mr *MockS3APIMockRecorder) GetBucketAccelerateConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAccelerateConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketAccelerateConfigurationWithContext), varargs...) +} + +// GetBucketAcl mocks base method +func (m *MockS3API) GetBucketAcl(arg0 *s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketAcl", arg0) + ret0, _ := ret[0].(*s3.GetBucketAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketAcl indicates an expected call of GetBucketAcl +func (mr *MockS3APIMockRecorder) GetBucketAcl(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAcl", reflect.TypeOf((*MockS3API)(nil).GetBucketAcl), arg0) +} + +// GetBucketAclRequest mocks base method +func (m *MockS3API) GetBucketAclRequest(arg0 *s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketAclRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketAclOutput) + return ret0, ret1 +} + +// GetBucketAclRequest indicates an expected call of GetBucketAclRequest +func (mr *MockS3APIMockRecorder) GetBucketAclRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAclRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketAclRequest), arg0) +} + +// GetBucketAclWithContext mocks base method +func (m *MockS3API) GetBucketAclWithContext(arg0 context.Context, arg1 *s3.GetBucketAclInput, arg2 ...request.Option) (*s3.GetBucketAclOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketAclWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketAclWithContext indicates an expected call of GetBucketAclWithContext +func (mr *MockS3APIMockRecorder) GetBucketAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAclWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketAclWithContext), varargs...) +} + +// GetBucketAnalyticsConfiguration mocks base method +func (m *MockS3API) GetBucketAnalyticsConfiguration(arg0 *s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketAnalyticsConfiguration", arg0) + ret0, _ := ret[0].(*s3.GetBucketAnalyticsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketAnalyticsConfiguration indicates an expected call of GetBucketAnalyticsConfiguration +func (mr *MockS3APIMockRecorder) GetBucketAnalyticsConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAnalyticsConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketAnalyticsConfiguration), arg0) +} + +// GetBucketAnalyticsConfigurationRequest mocks base method +func (m *MockS3API) GetBucketAnalyticsConfigurationRequest(arg0 *s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketAnalyticsConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketAnalyticsConfigurationOutput) + return ret0, ret1 +} + +// GetBucketAnalyticsConfigurationRequest indicates an expected call of GetBucketAnalyticsConfigurationRequest +func (mr *MockS3APIMockRecorder) GetBucketAnalyticsConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAnalyticsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketAnalyticsConfigurationRequest), arg0) +} + +// GetBucketAnalyticsConfigurationWithContext mocks base method +func (m *MockS3API) GetBucketAnalyticsConfigurationWithContext(arg0 context.Context, arg1 *s3.GetBucketAnalyticsConfigurationInput, arg2 ...request.Option) (*s3.GetBucketAnalyticsConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketAnalyticsConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketAnalyticsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketAnalyticsConfigurationWithContext indicates an expected call of GetBucketAnalyticsConfigurationWithContext +func (mr *MockS3APIMockRecorder) GetBucketAnalyticsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAnalyticsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketAnalyticsConfigurationWithContext), varargs...) +} + +// GetBucketCors mocks base method +func (m *MockS3API) GetBucketCors(arg0 *s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketCors", arg0) + ret0, _ := ret[0].(*s3.GetBucketCorsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketCors indicates an expected call of GetBucketCors +func (mr *MockS3APIMockRecorder) GetBucketCors(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketCors", reflect.TypeOf((*MockS3API)(nil).GetBucketCors), arg0) +} + +// GetBucketCorsRequest mocks base method +func (m *MockS3API) GetBucketCorsRequest(arg0 *s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketCorsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketCorsOutput) + return ret0, ret1 +} + +// GetBucketCorsRequest indicates an expected call of GetBucketCorsRequest +func (mr *MockS3APIMockRecorder) GetBucketCorsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketCorsRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketCorsRequest), arg0) +} + +// GetBucketCorsWithContext mocks base method +func (m *MockS3API) GetBucketCorsWithContext(arg0 context.Context, arg1 *s3.GetBucketCorsInput, arg2 ...request.Option) (*s3.GetBucketCorsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketCorsWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketCorsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketCorsWithContext indicates an expected call of GetBucketCorsWithContext +func (mr *MockS3APIMockRecorder) GetBucketCorsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketCorsWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketCorsWithContext), varargs...) +} + +// GetBucketEncryption mocks base method +func (m *MockS3API) GetBucketEncryption(arg0 *s3.GetBucketEncryptionInput) (*s3.GetBucketEncryptionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketEncryption", arg0) + ret0, _ := ret[0].(*s3.GetBucketEncryptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketEncryption indicates an expected call of GetBucketEncryption +func (mr *MockS3APIMockRecorder) GetBucketEncryption(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketEncryption", reflect.TypeOf((*MockS3API)(nil).GetBucketEncryption), arg0) +} + +// GetBucketEncryptionRequest mocks base method +func (m *MockS3API) GetBucketEncryptionRequest(arg0 *s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketEncryptionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketEncryptionOutput) + return ret0, ret1 +} + +// GetBucketEncryptionRequest indicates an expected call of GetBucketEncryptionRequest +func (mr *MockS3APIMockRecorder) GetBucketEncryptionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketEncryptionRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketEncryptionRequest), arg0) +} + +// GetBucketEncryptionWithContext mocks base method +func (m *MockS3API) GetBucketEncryptionWithContext(arg0 context.Context, arg1 *s3.GetBucketEncryptionInput, arg2 ...request.Option) (*s3.GetBucketEncryptionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketEncryptionWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketEncryptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketEncryptionWithContext indicates an expected call of GetBucketEncryptionWithContext +func (mr *MockS3APIMockRecorder) GetBucketEncryptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketEncryptionWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketEncryptionWithContext), varargs...) +} + +// GetBucketInventoryConfiguration mocks base method +func (m *MockS3API) GetBucketInventoryConfiguration(arg0 *s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketInventoryConfiguration", arg0) + ret0, _ := ret[0].(*s3.GetBucketInventoryConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketInventoryConfiguration indicates an expected call of GetBucketInventoryConfiguration +func (mr *MockS3APIMockRecorder) GetBucketInventoryConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketInventoryConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketInventoryConfiguration), arg0) +} + +// GetBucketInventoryConfigurationRequest mocks base method +func (m *MockS3API) GetBucketInventoryConfigurationRequest(arg0 *s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketInventoryConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketInventoryConfigurationOutput) + return ret0, ret1 +} + +// GetBucketInventoryConfigurationRequest indicates an expected call of GetBucketInventoryConfigurationRequest +func (mr *MockS3APIMockRecorder) GetBucketInventoryConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketInventoryConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketInventoryConfigurationRequest), arg0) +} + +// GetBucketInventoryConfigurationWithContext mocks base method +func (m *MockS3API) GetBucketInventoryConfigurationWithContext(arg0 context.Context, arg1 *s3.GetBucketInventoryConfigurationInput, arg2 ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketInventoryConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketInventoryConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketInventoryConfigurationWithContext indicates an expected call of GetBucketInventoryConfigurationWithContext +func (mr *MockS3APIMockRecorder) GetBucketInventoryConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketInventoryConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketInventoryConfigurationWithContext), varargs...) +} + +// GetBucketLifecycle mocks base method +func (m *MockS3API) GetBucketLifecycle(arg0 *s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLifecycle", arg0) + ret0, _ := ret[0].(*s3.GetBucketLifecycleOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLifecycle indicates an expected call of GetBucketLifecycle +func (mr *MockS3APIMockRecorder) GetBucketLifecycle(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycle", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycle), arg0) +} + +// GetBucketLifecycleConfiguration mocks base method +func (m *MockS3API) GetBucketLifecycleConfiguration(arg0 *s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLifecycleConfiguration", arg0) + ret0, _ := ret[0].(*s3.GetBucketLifecycleConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLifecycleConfiguration indicates an expected call of GetBucketLifecycleConfiguration +func (mr *MockS3APIMockRecorder) GetBucketLifecycleConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleConfiguration), arg0) +} + +// GetBucketLifecycleConfigurationRequest mocks base method +func (m *MockS3API) GetBucketLifecycleConfigurationRequest(arg0 *s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLifecycleConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketLifecycleConfigurationOutput) + return ret0, ret1 +} + +// GetBucketLifecycleConfigurationRequest indicates an expected call of GetBucketLifecycleConfigurationRequest +func (mr *MockS3APIMockRecorder) GetBucketLifecycleConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleConfigurationRequest), arg0) +} + +// GetBucketLifecycleConfigurationWithContext mocks base method +func (m *MockS3API) GetBucketLifecycleConfigurationWithContext(arg0 context.Context, arg1 *s3.GetBucketLifecycleConfigurationInput, arg2 ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketLifecycleConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketLifecycleConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLifecycleConfigurationWithContext indicates an expected call of GetBucketLifecycleConfigurationWithContext +func (mr *MockS3APIMockRecorder) GetBucketLifecycleConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleConfigurationWithContext), varargs...) +} + +// GetBucketLifecycleRequest mocks base method +func (m *MockS3API) GetBucketLifecycleRequest(arg0 *s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLifecycleRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketLifecycleOutput) + return ret0, ret1 +} + +// GetBucketLifecycleRequest indicates an expected call of GetBucketLifecycleRequest +func (mr *MockS3APIMockRecorder) GetBucketLifecycleRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleRequest), arg0) +} + +// GetBucketLifecycleWithContext mocks base method +func (m *MockS3API) GetBucketLifecycleWithContext(arg0 context.Context, arg1 *s3.GetBucketLifecycleInput, arg2 ...request.Option) (*s3.GetBucketLifecycleOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketLifecycleWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketLifecycleOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLifecycleWithContext indicates an expected call of GetBucketLifecycleWithContext +func (mr *MockS3APIMockRecorder) GetBucketLifecycleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleWithContext), varargs...) +} + +// GetBucketLocation mocks base method +func (m *MockS3API) GetBucketLocation(arg0 *s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLocation", arg0) + ret0, _ := ret[0].(*s3.GetBucketLocationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLocation indicates an expected call of GetBucketLocation +func (mr *MockS3APIMockRecorder) GetBucketLocation(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLocation", reflect.TypeOf((*MockS3API)(nil).GetBucketLocation), arg0) +} + +// GetBucketLocationRequest mocks base method +func (m *MockS3API) GetBucketLocationRequest(arg0 *s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLocationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketLocationOutput) + return ret0, ret1 +} + +// GetBucketLocationRequest indicates an expected call of GetBucketLocationRequest +func (mr *MockS3APIMockRecorder) GetBucketLocationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLocationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLocationRequest), arg0) +} + +// GetBucketLocationWithContext mocks base method +func (m *MockS3API) GetBucketLocationWithContext(arg0 context.Context, arg1 *s3.GetBucketLocationInput, arg2 ...request.Option) (*s3.GetBucketLocationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketLocationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketLocationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLocationWithContext indicates an expected call of GetBucketLocationWithContext +func (mr *MockS3APIMockRecorder) GetBucketLocationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLocationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLocationWithContext), varargs...) +} + +// GetBucketLogging mocks base method +func (m *MockS3API) GetBucketLogging(arg0 *s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLogging", arg0) + ret0, _ := ret[0].(*s3.GetBucketLoggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLogging indicates an expected call of GetBucketLogging +func (mr *MockS3APIMockRecorder) GetBucketLogging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLogging", reflect.TypeOf((*MockS3API)(nil).GetBucketLogging), arg0) +} + +// GetBucketLoggingRequest mocks base method +func (m *MockS3API) GetBucketLoggingRequest(arg0 *s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketLoggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketLoggingOutput) + return ret0, ret1 +} + +// GetBucketLoggingRequest indicates an expected call of GetBucketLoggingRequest +func (mr *MockS3APIMockRecorder) GetBucketLoggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLoggingRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLoggingRequest), arg0) +} + +// GetBucketLoggingWithContext mocks base method +func (m *MockS3API) GetBucketLoggingWithContext(arg0 context.Context, arg1 *s3.GetBucketLoggingInput, arg2 ...request.Option) (*s3.GetBucketLoggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketLoggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketLoggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketLoggingWithContext indicates an expected call of GetBucketLoggingWithContext +func (mr *MockS3APIMockRecorder) GetBucketLoggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLoggingWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLoggingWithContext), varargs...) +} + +// GetBucketMetricsConfiguration mocks base method +func (m *MockS3API) GetBucketMetricsConfiguration(arg0 *s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketMetricsConfiguration", arg0) + ret0, _ := ret[0].(*s3.GetBucketMetricsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketMetricsConfiguration indicates an expected call of GetBucketMetricsConfiguration +func (mr *MockS3APIMockRecorder) GetBucketMetricsConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketMetricsConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketMetricsConfiguration), arg0) +} + +// GetBucketMetricsConfigurationRequest mocks base method +func (m *MockS3API) GetBucketMetricsConfigurationRequest(arg0 *s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketMetricsConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketMetricsConfigurationOutput) + return ret0, ret1 +} + +// GetBucketMetricsConfigurationRequest indicates an expected call of GetBucketMetricsConfigurationRequest +func (mr *MockS3APIMockRecorder) GetBucketMetricsConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketMetricsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketMetricsConfigurationRequest), arg0) +} + +// GetBucketMetricsConfigurationWithContext mocks base method +func (m *MockS3API) GetBucketMetricsConfigurationWithContext(arg0 context.Context, arg1 *s3.GetBucketMetricsConfigurationInput, arg2 ...request.Option) (*s3.GetBucketMetricsConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketMetricsConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketMetricsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketMetricsConfigurationWithContext indicates an expected call of GetBucketMetricsConfigurationWithContext +func (mr *MockS3APIMockRecorder) GetBucketMetricsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketMetricsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketMetricsConfigurationWithContext), varargs...) +} + +// GetBucketNotification mocks base method +func (m *MockS3API) GetBucketNotification(arg0 *s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketNotification", arg0) + ret0, _ := ret[0].(*s3.NotificationConfigurationDeprecated) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketNotification indicates an expected call of GetBucketNotification +func (mr *MockS3APIMockRecorder) GetBucketNotification(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotification", reflect.TypeOf((*MockS3API)(nil).GetBucketNotification), arg0) +} + +// GetBucketNotificationConfiguration mocks base method +func (m *MockS3API) GetBucketNotificationConfiguration(arg0 *s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketNotificationConfiguration", arg0) + ret0, _ := ret[0].(*s3.NotificationConfiguration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketNotificationConfiguration indicates an expected call of GetBucketNotificationConfiguration +func (mr *MockS3APIMockRecorder) GetBucketNotificationConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationConfiguration), arg0) +} + +// GetBucketNotificationConfigurationRequest mocks base method +func (m *MockS3API) GetBucketNotificationConfigurationRequest(arg0 *s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketNotificationConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.NotificationConfiguration) + return ret0, ret1 +} + +// GetBucketNotificationConfigurationRequest indicates an expected call of GetBucketNotificationConfigurationRequest +func (mr *MockS3APIMockRecorder) GetBucketNotificationConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationConfigurationRequest), arg0) +} + +// GetBucketNotificationConfigurationWithContext mocks base method +func (m *MockS3API) GetBucketNotificationConfigurationWithContext(arg0 context.Context, arg1 *s3.GetBucketNotificationConfigurationRequest, arg2 ...request.Option) (*s3.NotificationConfiguration, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketNotificationConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.NotificationConfiguration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketNotificationConfigurationWithContext indicates an expected call of GetBucketNotificationConfigurationWithContext +func (mr *MockS3APIMockRecorder) GetBucketNotificationConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationConfigurationWithContext), varargs...) +} + +// GetBucketNotificationRequest mocks base method +func (m *MockS3API) GetBucketNotificationRequest(arg0 *s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketNotificationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.NotificationConfigurationDeprecated) + return ret0, ret1 +} + +// GetBucketNotificationRequest indicates an expected call of GetBucketNotificationRequest +func (mr *MockS3APIMockRecorder) GetBucketNotificationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationRequest), arg0) +} + +// GetBucketNotificationWithContext mocks base method +func (m *MockS3API) GetBucketNotificationWithContext(arg0 context.Context, arg1 *s3.GetBucketNotificationConfigurationRequest, arg2 ...request.Option) (*s3.NotificationConfigurationDeprecated, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketNotificationWithContext", varargs...) + ret0, _ := ret[0].(*s3.NotificationConfigurationDeprecated) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketNotificationWithContext indicates an expected call of GetBucketNotificationWithContext +func (mr *MockS3APIMockRecorder) GetBucketNotificationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationWithContext), varargs...) +} + +// GetBucketPolicy mocks base method +func (m *MockS3API) GetBucketPolicy(arg0 *s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketPolicy", arg0) + ret0, _ := ret[0].(*s3.GetBucketPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketPolicy indicates an expected call of GetBucketPolicy +func (mr *MockS3APIMockRecorder) GetBucketPolicy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicy", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicy), arg0) +} + +// GetBucketPolicyRequest mocks base method +func (m *MockS3API) GetBucketPolicyRequest(arg0 *s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketPolicyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketPolicyOutput) + return ret0, ret1 +} + +// GetBucketPolicyRequest indicates an expected call of GetBucketPolicyRequest +func (mr *MockS3APIMockRecorder) GetBucketPolicyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyRequest), arg0) +} + +// GetBucketPolicyStatus mocks base method +func (m *MockS3API) GetBucketPolicyStatus(arg0 *s3.GetBucketPolicyStatusInput) (*s3.GetBucketPolicyStatusOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketPolicyStatus", arg0) + ret0, _ := ret[0].(*s3.GetBucketPolicyStatusOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketPolicyStatus indicates an expected call of GetBucketPolicyStatus +func (mr *MockS3APIMockRecorder) GetBucketPolicyStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyStatus", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyStatus), arg0) +} + +// GetBucketPolicyStatusRequest mocks base method +func (m *MockS3API) GetBucketPolicyStatusRequest(arg0 *s3.GetBucketPolicyStatusInput) (*request.Request, *s3.GetBucketPolicyStatusOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketPolicyStatusRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketPolicyStatusOutput) + return ret0, ret1 +} + +// GetBucketPolicyStatusRequest indicates an expected call of GetBucketPolicyStatusRequest +func (mr *MockS3APIMockRecorder) GetBucketPolicyStatusRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyStatusRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyStatusRequest), arg0) +} + +// GetBucketPolicyStatusWithContext mocks base method +func (m *MockS3API) GetBucketPolicyStatusWithContext(arg0 context.Context, arg1 *s3.GetBucketPolicyStatusInput, arg2 ...request.Option) (*s3.GetBucketPolicyStatusOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketPolicyStatusWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketPolicyStatusOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketPolicyStatusWithContext indicates an expected call of GetBucketPolicyStatusWithContext +func (mr *MockS3APIMockRecorder) GetBucketPolicyStatusWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyStatusWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyStatusWithContext), varargs...) +} + +// GetBucketPolicyWithContext mocks base method +func (m *MockS3API) GetBucketPolicyWithContext(arg0 context.Context, arg1 *s3.GetBucketPolicyInput, arg2 ...request.Option) (*s3.GetBucketPolicyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketPolicyWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketPolicyWithContext indicates an expected call of GetBucketPolicyWithContext +func (mr *MockS3APIMockRecorder) GetBucketPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyWithContext), varargs...) +} + +// GetBucketReplication mocks base method +func (m *MockS3API) GetBucketReplication(arg0 *s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketReplication", arg0) + ret0, _ := ret[0].(*s3.GetBucketReplicationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketReplication indicates an expected call of GetBucketReplication +func (mr *MockS3APIMockRecorder) GetBucketReplication(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketReplication", reflect.TypeOf((*MockS3API)(nil).GetBucketReplication), arg0) +} + +// GetBucketReplicationRequest mocks base method +func (m *MockS3API) GetBucketReplicationRequest(arg0 *s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketReplicationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketReplicationOutput) + return ret0, ret1 +} + +// GetBucketReplicationRequest indicates an expected call of GetBucketReplicationRequest +func (mr *MockS3APIMockRecorder) GetBucketReplicationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketReplicationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketReplicationRequest), arg0) +} + +// GetBucketReplicationWithContext mocks base method +func (m *MockS3API) GetBucketReplicationWithContext(arg0 context.Context, arg1 *s3.GetBucketReplicationInput, arg2 ...request.Option) (*s3.GetBucketReplicationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketReplicationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketReplicationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketReplicationWithContext indicates an expected call of GetBucketReplicationWithContext +func (mr *MockS3APIMockRecorder) GetBucketReplicationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketReplicationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketReplicationWithContext), varargs...) +} + +// GetBucketRequestPayment mocks base method +func (m *MockS3API) GetBucketRequestPayment(arg0 *s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketRequestPayment", arg0) + ret0, _ := ret[0].(*s3.GetBucketRequestPaymentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketRequestPayment indicates an expected call of GetBucketRequestPayment +func (mr *MockS3APIMockRecorder) GetBucketRequestPayment(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketRequestPayment", reflect.TypeOf((*MockS3API)(nil).GetBucketRequestPayment), arg0) +} + +// GetBucketRequestPaymentRequest mocks base method +func (m *MockS3API) GetBucketRequestPaymentRequest(arg0 *s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketRequestPaymentRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketRequestPaymentOutput) + return ret0, ret1 +} + +// GetBucketRequestPaymentRequest indicates an expected call of GetBucketRequestPaymentRequest +func (mr *MockS3APIMockRecorder) GetBucketRequestPaymentRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketRequestPaymentRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketRequestPaymentRequest), arg0) +} + +// GetBucketRequestPaymentWithContext mocks base method +func (m *MockS3API) GetBucketRequestPaymentWithContext(arg0 context.Context, arg1 *s3.GetBucketRequestPaymentInput, arg2 ...request.Option) (*s3.GetBucketRequestPaymentOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketRequestPaymentWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketRequestPaymentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketRequestPaymentWithContext indicates an expected call of GetBucketRequestPaymentWithContext +func (mr *MockS3APIMockRecorder) GetBucketRequestPaymentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketRequestPaymentWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketRequestPaymentWithContext), varargs...) +} + +// GetBucketTagging mocks base method +func (m *MockS3API) GetBucketTagging(arg0 *s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketTagging", arg0) + ret0, _ := ret[0].(*s3.GetBucketTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketTagging indicates an expected call of GetBucketTagging +func (mr *MockS3APIMockRecorder) GetBucketTagging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketTagging", reflect.TypeOf((*MockS3API)(nil).GetBucketTagging), arg0) +} + +// GetBucketTaggingRequest mocks base method +func (m *MockS3API) GetBucketTaggingRequest(arg0 *s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketTaggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketTaggingOutput) + return ret0, ret1 +} + +// GetBucketTaggingRequest indicates an expected call of GetBucketTaggingRequest +func (mr *MockS3APIMockRecorder) GetBucketTaggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketTaggingRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketTaggingRequest), arg0) +} + +// GetBucketTaggingWithContext mocks base method +func (m *MockS3API) GetBucketTaggingWithContext(arg0 context.Context, arg1 *s3.GetBucketTaggingInput, arg2 ...request.Option) (*s3.GetBucketTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketTaggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketTaggingWithContext indicates an expected call of GetBucketTaggingWithContext +func (mr *MockS3APIMockRecorder) GetBucketTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketTaggingWithContext), varargs...) +} + +// GetBucketVersioning mocks base method +func (m *MockS3API) GetBucketVersioning(arg0 *s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketVersioning", arg0) + ret0, _ := ret[0].(*s3.GetBucketVersioningOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketVersioning indicates an expected call of GetBucketVersioning +func (mr *MockS3APIMockRecorder) GetBucketVersioning(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketVersioning", reflect.TypeOf((*MockS3API)(nil).GetBucketVersioning), arg0) +} + +// GetBucketVersioningRequest mocks base method +func (m *MockS3API) GetBucketVersioningRequest(arg0 *s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketVersioningRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketVersioningOutput) + return ret0, ret1 +} + +// GetBucketVersioningRequest indicates an expected call of GetBucketVersioningRequest +func (mr *MockS3APIMockRecorder) GetBucketVersioningRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketVersioningRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketVersioningRequest), arg0) +} + +// GetBucketVersioningWithContext mocks base method +func (m *MockS3API) GetBucketVersioningWithContext(arg0 context.Context, arg1 *s3.GetBucketVersioningInput, arg2 ...request.Option) (*s3.GetBucketVersioningOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketVersioningWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketVersioningOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketVersioningWithContext indicates an expected call of GetBucketVersioningWithContext +func (mr *MockS3APIMockRecorder) GetBucketVersioningWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketVersioningWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketVersioningWithContext), varargs...) +} + +// GetBucketWebsite mocks base method +func (m *MockS3API) GetBucketWebsite(arg0 *s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketWebsite", arg0) + ret0, _ := ret[0].(*s3.GetBucketWebsiteOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketWebsite indicates an expected call of GetBucketWebsite +func (mr *MockS3APIMockRecorder) GetBucketWebsite(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketWebsite", reflect.TypeOf((*MockS3API)(nil).GetBucketWebsite), arg0) +} + +// GetBucketWebsiteRequest mocks base method +func (m *MockS3API) GetBucketWebsiteRequest(arg0 *s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBucketWebsiteRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetBucketWebsiteOutput) + return ret0, ret1 +} + +// GetBucketWebsiteRequest indicates an expected call of GetBucketWebsiteRequest +func (mr *MockS3APIMockRecorder) GetBucketWebsiteRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketWebsiteRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketWebsiteRequest), arg0) +} + +// GetBucketWebsiteWithContext mocks base method +func (m *MockS3API) GetBucketWebsiteWithContext(arg0 context.Context, arg1 *s3.GetBucketWebsiteInput, arg2 ...request.Option) (*s3.GetBucketWebsiteOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetBucketWebsiteWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetBucketWebsiteOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBucketWebsiteWithContext indicates an expected call of GetBucketWebsiteWithContext +func (mr *MockS3APIMockRecorder) GetBucketWebsiteWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketWebsiteWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketWebsiteWithContext), varargs...) +} + +// GetObject mocks base method +func (m *MockS3API) GetObject(arg0 *s3.GetObjectInput) (*s3.GetObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObject", arg0) + ret0, _ := ret[0].(*s3.GetObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObject indicates an expected call of GetObject +func (mr *MockS3APIMockRecorder) GetObject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObject", reflect.TypeOf((*MockS3API)(nil).GetObject), arg0) +} + +// GetObjectAcl mocks base method +func (m *MockS3API) GetObjectAcl(arg0 *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectAcl", arg0) + ret0, _ := ret[0].(*s3.GetObjectAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectAcl indicates an expected call of GetObjectAcl +func (mr *MockS3APIMockRecorder) GetObjectAcl(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAcl", reflect.TypeOf((*MockS3API)(nil).GetObjectAcl), arg0) +} + +// GetObjectAclRequest mocks base method +func (m *MockS3API) GetObjectAclRequest(arg0 *s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectAclRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectAclOutput) + return ret0, ret1 +} + +// GetObjectAclRequest indicates an expected call of GetObjectAclRequest +func (mr *MockS3APIMockRecorder) GetObjectAclRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAclRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectAclRequest), arg0) +} + +// GetObjectAclWithContext mocks base method +func (m *MockS3API) GetObjectAclWithContext(arg0 context.Context, arg1 *s3.GetObjectAclInput, arg2 ...request.Option) (*s3.GetObjectAclOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectAclWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectAclWithContext indicates an expected call of GetObjectAclWithContext +func (mr *MockS3APIMockRecorder) GetObjectAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAclWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectAclWithContext), varargs...) +} + +// GetObjectLegalHold mocks base method +func (m *MockS3API) GetObjectLegalHold(arg0 *s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectLegalHold", arg0) + ret0, _ := ret[0].(*s3.GetObjectLegalHoldOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectLegalHold indicates an expected call of GetObjectLegalHold +func (mr *MockS3APIMockRecorder) GetObjectLegalHold(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLegalHold", reflect.TypeOf((*MockS3API)(nil).GetObjectLegalHold), arg0) +} + +// GetObjectLegalHoldRequest mocks base method +func (m *MockS3API) GetObjectLegalHoldRequest(arg0 *s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectLegalHoldRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectLegalHoldOutput) + return ret0, ret1 +} + +// GetObjectLegalHoldRequest indicates an expected call of GetObjectLegalHoldRequest +func (mr *MockS3APIMockRecorder) GetObjectLegalHoldRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLegalHoldRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectLegalHoldRequest), arg0) +} + +// GetObjectLegalHoldWithContext mocks base method +func (m *MockS3API) GetObjectLegalHoldWithContext(arg0 context.Context, arg1 *s3.GetObjectLegalHoldInput, arg2 ...request.Option) (*s3.GetObjectLegalHoldOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectLegalHoldWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectLegalHoldOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectLegalHoldWithContext indicates an expected call of GetObjectLegalHoldWithContext +func (mr *MockS3APIMockRecorder) GetObjectLegalHoldWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLegalHoldWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectLegalHoldWithContext), varargs...) +} + +// GetObjectLockConfiguration mocks base method +func (m *MockS3API) GetObjectLockConfiguration(arg0 *s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectLockConfiguration", arg0) + ret0, _ := ret[0].(*s3.GetObjectLockConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectLockConfiguration indicates an expected call of GetObjectLockConfiguration +func (mr *MockS3APIMockRecorder) GetObjectLockConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLockConfiguration", reflect.TypeOf((*MockS3API)(nil).GetObjectLockConfiguration), arg0) +} + +// GetObjectLockConfigurationRequest mocks base method +func (m *MockS3API) GetObjectLockConfigurationRequest(arg0 *s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectLockConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectLockConfigurationOutput) + return ret0, ret1 +} + +// GetObjectLockConfigurationRequest indicates an expected call of GetObjectLockConfigurationRequest +func (mr *MockS3APIMockRecorder) GetObjectLockConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLockConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectLockConfigurationRequest), arg0) +} + +// GetObjectLockConfigurationWithContext mocks base method +func (m *MockS3API) GetObjectLockConfigurationWithContext(arg0 context.Context, arg1 *s3.GetObjectLockConfigurationInput, arg2 ...request.Option) (*s3.GetObjectLockConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectLockConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectLockConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectLockConfigurationWithContext indicates an expected call of GetObjectLockConfigurationWithContext +func (mr *MockS3APIMockRecorder) GetObjectLockConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLockConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectLockConfigurationWithContext), varargs...) +} + +// GetObjectRequest mocks base method +func (m *MockS3API) GetObjectRequest(arg0 *s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectOutput) + return ret0, ret1 +} + +// GetObjectRequest indicates an expected call of GetObjectRequest +func (mr *MockS3APIMockRecorder) GetObjectRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectRequest), arg0) +} + +// GetObjectRetention mocks base method +func (m *MockS3API) GetObjectRetention(arg0 *s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectRetention", arg0) + ret0, _ := ret[0].(*s3.GetObjectRetentionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectRetention indicates an expected call of GetObjectRetention +func (mr *MockS3APIMockRecorder) GetObjectRetention(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRetention", reflect.TypeOf((*MockS3API)(nil).GetObjectRetention), arg0) +} + +// GetObjectRetentionRequest mocks base method +func (m *MockS3API) GetObjectRetentionRequest(arg0 *s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectRetentionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectRetentionOutput) + return ret0, ret1 +} + +// GetObjectRetentionRequest indicates an expected call of GetObjectRetentionRequest +func (mr *MockS3APIMockRecorder) GetObjectRetentionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRetentionRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectRetentionRequest), arg0) +} + +// GetObjectRetentionWithContext mocks base method +func (m *MockS3API) GetObjectRetentionWithContext(arg0 context.Context, arg1 *s3.GetObjectRetentionInput, arg2 ...request.Option) (*s3.GetObjectRetentionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectRetentionWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectRetentionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectRetentionWithContext indicates an expected call of GetObjectRetentionWithContext +func (mr *MockS3APIMockRecorder) GetObjectRetentionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRetentionWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectRetentionWithContext), varargs...) +} + +// GetObjectTagging mocks base method +func (m *MockS3API) GetObjectTagging(arg0 *s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectTagging", arg0) + ret0, _ := ret[0].(*s3.GetObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectTagging indicates an expected call of GetObjectTagging +func (mr *MockS3APIMockRecorder) GetObjectTagging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTagging", reflect.TypeOf((*MockS3API)(nil).GetObjectTagging), arg0) +} + +// GetObjectTaggingRequest mocks base method +func (m *MockS3API) GetObjectTaggingRequest(arg0 *s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectTaggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectTaggingOutput) + return ret0, ret1 +} + +// GetObjectTaggingRequest indicates an expected call of GetObjectTaggingRequest +func (mr *MockS3APIMockRecorder) GetObjectTaggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTaggingRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectTaggingRequest), arg0) +} + +// GetObjectTaggingWithContext mocks base method +func (m *MockS3API) GetObjectTaggingWithContext(arg0 context.Context, arg1 *s3.GetObjectTaggingInput, arg2 ...request.Option) (*s3.GetObjectTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectTaggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectTaggingWithContext indicates an expected call of GetObjectTaggingWithContext +func (mr *MockS3APIMockRecorder) GetObjectTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectTaggingWithContext), varargs...) +} + +// GetObjectTorrent mocks base method +func (m *MockS3API) GetObjectTorrent(arg0 *s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectTorrent", arg0) + ret0, _ := ret[0].(*s3.GetObjectTorrentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectTorrent indicates an expected call of GetObjectTorrent +func (mr *MockS3APIMockRecorder) GetObjectTorrent(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTorrent", reflect.TypeOf((*MockS3API)(nil).GetObjectTorrent), arg0) +} + +// GetObjectTorrentRequest mocks base method +func (m *MockS3API) GetObjectTorrentRequest(arg0 *s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectTorrentRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetObjectTorrentOutput) + return ret0, ret1 +} + +// GetObjectTorrentRequest indicates an expected call of GetObjectTorrentRequest +func (mr *MockS3APIMockRecorder) GetObjectTorrentRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTorrentRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectTorrentRequest), arg0) +} + +// GetObjectTorrentWithContext mocks base method +func (m *MockS3API) GetObjectTorrentWithContext(arg0 context.Context, arg1 *s3.GetObjectTorrentInput, arg2 ...request.Option) (*s3.GetObjectTorrentOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectTorrentWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectTorrentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectTorrentWithContext indicates an expected call of GetObjectTorrentWithContext +func (mr *MockS3APIMockRecorder) GetObjectTorrentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTorrentWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectTorrentWithContext), varargs...) +} + +// GetObjectWithContext mocks base method +func (m *MockS3API) GetObjectWithContext(arg0 context.Context, arg1 *s3.GetObjectInput, arg2 ...request.Option) (*s3.GetObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetObjectWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetObjectWithContext indicates an expected call of GetObjectWithContext +func (mr *MockS3APIMockRecorder) GetObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectWithContext), varargs...) +} + +// GetPublicAccessBlock mocks base method +func (m *MockS3API) GetPublicAccessBlock(arg0 *s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPublicAccessBlock", arg0) + ret0, _ := ret[0].(*s3.GetPublicAccessBlockOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPublicAccessBlock indicates an expected call of GetPublicAccessBlock +func (mr *MockS3APIMockRecorder) GetPublicAccessBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicAccessBlock", reflect.TypeOf((*MockS3API)(nil).GetPublicAccessBlock), arg0) +} + +// GetPublicAccessBlockRequest mocks base method +func (m *MockS3API) GetPublicAccessBlockRequest(arg0 *s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPublicAccessBlockRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.GetPublicAccessBlockOutput) + return ret0, ret1 +} + +// GetPublicAccessBlockRequest indicates an expected call of GetPublicAccessBlockRequest +func (mr *MockS3APIMockRecorder) GetPublicAccessBlockRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicAccessBlockRequest", reflect.TypeOf((*MockS3API)(nil).GetPublicAccessBlockRequest), arg0) +} + +// GetPublicAccessBlockWithContext mocks base method +func (m *MockS3API) GetPublicAccessBlockWithContext(arg0 context.Context, arg1 *s3.GetPublicAccessBlockInput, arg2 ...request.Option) (*s3.GetPublicAccessBlockOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetPublicAccessBlockWithContext", varargs...) + ret0, _ := ret[0].(*s3.GetPublicAccessBlockOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPublicAccessBlockWithContext indicates an expected call of GetPublicAccessBlockWithContext +func (mr *MockS3APIMockRecorder) GetPublicAccessBlockWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicAccessBlockWithContext", reflect.TypeOf((*MockS3API)(nil).GetPublicAccessBlockWithContext), varargs...) +} + +// HeadBucket mocks base method +func (m *MockS3API) HeadBucket(arg0 *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeadBucket", arg0) + ret0, _ := ret[0].(*s3.HeadBucketOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeadBucket indicates an expected call of HeadBucket +func (mr *MockS3APIMockRecorder) HeadBucket(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadBucket", reflect.TypeOf((*MockS3API)(nil).HeadBucket), arg0) +} + +// HeadBucketRequest mocks base method +func (m *MockS3API) HeadBucketRequest(arg0 *s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeadBucketRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.HeadBucketOutput) + return ret0, ret1 +} + +// HeadBucketRequest indicates an expected call of HeadBucketRequest +func (mr *MockS3APIMockRecorder) HeadBucketRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadBucketRequest", reflect.TypeOf((*MockS3API)(nil).HeadBucketRequest), arg0) +} + +// HeadBucketWithContext mocks base method +func (m *MockS3API) HeadBucketWithContext(arg0 context.Context, arg1 *s3.HeadBucketInput, arg2 ...request.Option) (*s3.HeadBucketOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "HeadBucketWithContext", varargs...) + ret0, _ := ret[0].(*s3.HeadBucketOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeadBucketWithContext indicates an expected call of HeadBucketWithContext +func (mr *MockS3APIMockRecorder) HeadBucketWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadBucketWithContext", reflect.TypeOf((*MockS3API)(nil).HeadBucketWithContext), varargs...) +} + +// HeadObject mocks base method +func (m *MockS3API) HeadObject(arg0 *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeadObject", arg0) + ret0, _ := ret[0].(*s3.HeadObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeadObject indicates an expected call of HeadObject +func (mr *MockS3APIMockRecorder) HeadObject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObject", reflect.TypeOf((*MockS3API)(nil).HeadObject), arg0) +} + +// HeadObjectRequest mocks base method +func (m *MockS3API) HeadObjectRequest(arg0 *s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeadObjectRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.HeadObjectOutput) + return ret0, ret1 +} + +// HeadObjectRequest indicates an expected call of HeadObjectRequest +func (mr *MockS3APIMockRecorder) HeadObjectRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObjectRequest", reflect.TypeOf((*MockS3API)(nil).HeadObjectRequest), arg0) +} + +// HeadObjectWithContext mocks base method +func (m *MockS3API) HeadObjectWithContext(arg0 context.Context, arg1 *s3.HeadObjectInput, arg2 ...request.Option) (*s3.HeadObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "HeadObjectWithContext", varargs...) + ret0, _ := ret[0].(*s3.HeadObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeadObjectWithContext indicates an expected call of HeadObjectWithContext +func (mr *MockS3APIMockRecorder) HeadObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObjectWithContext", reflect.TypeOf((*MockS3API)(nil).HeadObjectWithContext), varargs...) +} + +// ListBucketAnalyticsConfigurations mocks base method +func (m *MockS3API) ListBucketAnalyticsConfigurations(arg0 *s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketAnalyticsConfigurations", arg0) + ret0, _ := ret[0].(*s3.ListBucketAnalyticsConfigurationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketAnalyticsConfigurations indicates an expected call of ListBucketAnalyticsConfigurations +func (mr *MockS3APIMockRecorder) ListBucketAnalyticsConfigurations(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketAnalyticsConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketAnalyticsConfigurations), arg0) +} + +// ListBucketAnalyticsConfigurationsRequest mocks base method +func (m *MockS3API) ListBucketAnalyticsConfigurationsRequest(arg0 *s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketAnalyticsConfigurationsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListBucketAnalyticsConfigurationsOutput) + return ret0, ret1 +} + +// ListBucketAnalyticsConfigurationsRequest indicates an expected call of ListBucketAnalyticsConfigurationsRequest +func (mr *MockS3APIMockRecorder) ListBucketAnalyticsConfigurationsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketAnalyticsConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketAnalyticsConfigurationsRequest), arg0) +} + +// ListBucketAnalyticsConfigurationsWithContext mocks base method +func (m *MockS3API) ListBucketAnalyticsConfigurationsWithContext(arg0 context.Context, arg1 *s3.ListBucketAnalyticsConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListBucketAnalyticsConfigurationsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListBucketAnalyticsConfigurationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketAnalyticsConfigurationsWithContext indicates an expected call of ListBucketAnalyticsConfigurationsWithContext +func (mr *MockS3APIMockRecorder) ListBucketAnalyticsConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketAnalyticsConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketAnalyticsConfigurationsWithContext), varargs...) +} + +// ListBucketInventoryConfigurations mocks base method +func (m *MockS3API) ListBucketInventoryConfigurations(arg0 *s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketInventoryConfigurations", arg0) + ret0, _ := ret[0].(*s3.ListBucketInventoryConfigurationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketInventoryConfigurations indicates an expected call of ListBucketInventoryConfigurations +func (mr *MockS3APIMockRecorder) ListBucketInventoryConfigurations(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketInventoryConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketInventoryConfigurations), arg0) +} + +// ListBucketInventoryConfigurationsRequest mocks base method +func (m *MockS3API) ListBucketInventoryConfigurationsRequest(arg0 *s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketInventoryConfigurationsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListBucketInventoryConfigurationsOutput) + return ret0, ret1 +} + +// ListBucketInventoryConfigurationsRequest indicates an expected call of ListBucketInventoryConfigurationsRequest +func (mr *MockS3APIMockRecorder) ListBucketInventoryConfigurationsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketInventoryConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketInventoryConfigurationsRequest), arg0) +} + +// ListBucketInventoryConfigurationsWithContext mocks base method +func (m *MockS3API) ListBucketInventoryConfigurationsWithContext(arg0 context.Context, arg1 *s3.ListBucketInventoryConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListBucketInventoryConfigurationsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListBucketInventoryConfigurationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketInventoryConfigurationsWithContext indicates an expected call of ListBucketInventoryConfigurationsWithContext +func (mr *MockS3APIMockRecorder) ListBucketInventoryConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketInventoryConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketInventoryConfigurationsWithContext), varargs...) +} + +// ListBucketMetricsConfigurations mocks base method +func (m *MockS3API) ListBucketMetricsConfigurations(arg0 *s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketMetricsConfigurations", arg0) + ret0, _ := ret[0].(*s3.ListBucketMetricsConfigurationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketMetricsConfigurations indicates an expected call of ListBucketMetricsConfigurations +func (mr *MockS3APIMockRecorder) ListBucketMetricsConfigurations(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketMetricsConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketMetricsConfigurations), arg0) +} + +// ListBucketMetricsConfigurationsRequest mocks base method +func (m *MockS3API) ListBucketMetricsConfigurationsRequest(arg0 *s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketMetricsConfigurationsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListBucketMetricsConfigurationsOutput) + return ret0, ret1 +} + +// ListBucketMetricsConfigurationsRequest indicates an expected call of ListBucketMetricsConfigurationsRequest +func (mr *MockS3APIMockRecorder) ListBucketMetricsConfigurationsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketMetricsConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketMetricsConfigurationsRequest), arg0) +} + +// ListBucketMetricsConfigurationsWithContext mocks base method +func (m *MockS3API) ListBucketMetricsConfigurationsWithContext(arg0 context.Context, arg1 *s3.ListBucketMetricsConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketMetricsConfigurationsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListBucketMetricsConfigurationsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListBucketMetricsConfigurationsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketMetricsConfigurationsWithContext indicates an expected call of ListBucketMetricsConfigurationsWithContext +func (mr *MockS3APIMockRecorder) ListBucketMetricsConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketMetricsConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketMetricsConfigurationsWithContext), varargs...) +} + +// ListBuckets mocks base method +func (m *MockS3API) ListBuckets(arg0 *s3.ListBucketsInput) (*s3.ListBucketsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBuckets", arg0) + ret0, _ := ret[0].(*s3.ListBucketsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBuckets indicates an expected call of ListBuckets +func (mr *MockS3APIMockRecorder) ListBuckets(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBuckets", reflect.TypeOf((*MockS3API)(nil).ListBuckets), arg0) +} + +// ListBucketsRequest mocks base method +func (m *MockS3API) ListBucketsRequest(arg0 *s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListBucketsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListBucketsOutput) + return ret0, ret1 +} + +// ListBucketsRequest indicates an expected call of ListBucketsRequest +func (mr *MockS3APIMockRecorder) ListBucketsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketsRequest), arg0) +} + +// ListBucketsWithContext mocks base method +func (m *MockS3API) ListBucketsWithContext(arg0 context.Context, arg1 *s3.ListBucketsInput, arg2 ...request.Option) (*s3.ListBucketsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListBucketsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListBucketsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListBucketsWithContext indicates an expected call of ListBucketsWithContext +func (mr *MockS3APIMockRecorder) ListBucketsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketsWithContext), varargs...) +} + +// ListMultipartUploads mocks base method +func (m *MockS3API) ListMultipartUploads(arg0 *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMultipartUploads", arg0) + ret0, _ := ret[0].(*s3.ListMultipartUploadsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMultipartUploads indicates an expected call of ListMultipartUploads +func (mr *MockS3APIMockRecorder) ListMultipartUploads(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploads", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploads), arg0) +} + +// ListMultipartUploadsPages mocks base method +func (m *MockS3API) ListMultipartUploadsPages(arg0 *s3.ListMultipartUploadsInput, arg1 func(*s3.ListMultipartUploadsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMultipartUploadsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListMultipartUploadsPages indicates an expected call of ListMultipartUploadsPages +func (mr *MockS3APIMockRecorder) ListMultipartUploadsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsPages", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsPages), arg0, arg1) +} + +// ListMultipartUploadsPagesWithContext mocks base method +func (m *MockS3API) ListMultipartUploadsPagesWithContext(arg0 context.Context, arg1 *s3.ListMultipartUploadsInput, arg2 func(*s3.ListMultipartUploadsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListMultipartUploadsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListMultipartUploadsPagesWithContext indicates an expected call of ListMultipartUploadsPagesWithContext +func (mr *MockS3APIMockRecorder) ListMultipartUploadsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsPagesWithContext), varargs...) +} + +// ListMultipartUploadsRequest mocks base method +func (m *MockS3API) ListMultipartUploadsRequest(arg0 *s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMultipartUploadsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListMultipartUploadsOutput) + return ret0, ret1 +} + +// ListMultipartUploadsRequest indicates an expected call of ListMultipartUploadsRequest +func (mr *MockS3APIMockRecorder) ListMultipartUploadsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsRequest", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsRequest), arg0) +} + +// ListMultipartUploadsWithContext mocks base method +func (m *MockS3API) ListMultipartUploadsWithContext(arg0 context.Context, arg1 *s3.ListMultipartUploadsInput, arg2 ...request.Option) (*s3.ListMultipartUploadsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListMultipartUploadsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListMultipartUploadsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMultipartUploadsWithContext indicates an expected call of ListMultipartUploadsWithContext +func (mr *MockS3APIMockRecorder) ListMultipartUploadsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsWithContext", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsWithContext), varargs...) +} + +// ListObjectVersions mocks base method +func (m *MockS3API) ListObjectVersions(arg0 *s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectVersions", arg0) + ret0, _ := ret[0].(*s3.ListObjectVersionsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjectVersions indicates an expected call of ListObjectVersions +func (mr *MockS3APIMockRecorder) ListObjectVersions(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersions", reflect.TypeOf((*MockS3API)(nil).ListObjectVersions), arg0) +} + +// ListObjectVersionsPages mocks base method +func (m *MockS3API) ListObjectVersionsPages(arg0 *s3.ListObjectVersionsInput, arg1 func(*s3.ListObjectVersionsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectVersionsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListObjectVersionsPages indicates an expected call of ListObjectVersionsPages +func (mr *MockS3APIMockRecorder) ListObjectVersionsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsPages", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsPages), arg0, arg1) +} + +// ListObjectVersionsPagesWithContext mocks base method +func (m *MockS3API) ListObjectVersionsPagesWithContext(arg0 context.Context, arg1 *s3.ListObjectVersionsInput, arg2 func(*s3.ListObjectVersionsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListObjectVersionsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListObjectVersionsPagesWithContext indicates an expected call of ListObjectVersionsPagesWithContext +func (mr *MockS3APIMockRecorder) ListObjectVersionsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsPagesWithContext), varargs...) +} + +// ListObjectVersionsRequest mocks base method +func (m *MockS3API) ListObjectVersionsRequest(arg0 *s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectVersionsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListObjectVersionsOutput) + return ret0, ret1 +} + +// ListObjectVersionsRequest indicates an expected call of ListObjectVersionsRequest +func (mr *MockS3APIMockRecorder) ListObjectVersionsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsRequest", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsRequest), arg0) +} + +// ListObjectVersionsWithContext mocks base method +func (m *MockS3API) ListObjectVersionsWithContext(arg0 context.Context, arg1 *s3.ListObjectVersionsInput, arg2 ...request.Option) (*s3.ListObjectVersionsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListObjectVersionsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListObjectVersionsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjectVersionsWithContext indicates an expected call of ListObjectVersionsWithContext +func (mr *MockS3APIMockRecorder) ListObjectVersionsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsWithContext), varargs...) +} + +// ListObjects mocks base method +func (m *MockS3API) ListObjects(arg0 *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjects", arg0) + ret0, _ := ret[0].(*s3.ListObjectsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjects indicates an expected call of ListObjects +func (mr *MockS3APIMockRecorder) ListObjects(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjects", reflect.TypeOf((*MockS3API)(nil).ListObjects), arg0) +} + +// ListObjectsPages mocks base method +func (m *MockS3API) ListObjectsPages(arg0 *s3.ListObjectsInput, arg1 func(*s3.ListObjectsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListObjectsPages indicates an expected call of ListObjectsPages +func (mr *MockS3APIMockRecorder) ListObjectsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsPages", reflect.TypeOf((*MockS3API)(nil).ListObjectsPages), arg0, arg1) +} + +// ListObjectsPagesWithContext mocks base method +func (m *MockS3API) ListObjectsPagesWithContext(arg0 context.Context, arg1 *s3.ListObjectsInput, arg2 func(*s3.ListObjectsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListObjectsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListObjectsPagesWithContext indicates an expected call of ListObjectsPagesWithContext +func (mr *MockS3APIMockRecorder) ListObjectsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsPagesWithContext), varargs...) +} + +// ListObjectsRequest mocks base method +func (m *MockS3API) ListObjectsRequest(arg0 *s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListObjectsOutput) + return ret0, ret1 +} + +// ListObjectsRequest indicates an expected call of ListObjectsRequest +func (mr *MockS3APIMockRecorder) ListObjectsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsRequest", reflect.TypeOf((*MockS3API)(nil).ListObjectsRequest), arg0) +} + +// ListObjectsV2 mocks base method +func (m *MockS3API) ListObjectsV2(arg0 *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectsV2", arg0) + ret0, _ := ret[0].(*s3.ListObjectsV2Output) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjectsV2 indicates an expected call of ListObjectsV2 +func (mr *MockS3APIMockRecorder) ListObjectsV2(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2), arg0) +} + +// ListObjectsV2Pages mocks base method +func (m *MockS3API) ListObjectsV2Pages(arg0 *s3.ListObjectsV2Input, arg1 func(*s3.ListObjectsV2Output, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectsV2Pages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListObjectsV2Pages indicates an expected call of ListObjectsV2Pages +func (mr *MockS3APIMockRecorder) ListObjectsV2Pages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2Pages", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2Pages), arg0, arg1) +} + +// ListObjectsV2PagesWithContext mocks base method +func (m *MockS3API) ListObjectsV2PagesWithContext(arg0 context.Context, arg1 *s3.ListObjectsV2Input, arg2 func(*s3.ListObjectsV2Output, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListObjectsV2PagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListObjectsV2PagesWithContext indicates an expected call of ListObjectsV2PagesWithContext +func (mr *MockS3APIMockRecorder) ListObjectsV2PagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2PagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2PagesWithContext), varargs...) +} + +// ListObjectsV2Request mocks base method +func (m *MockS3API) ListObjectsV2Request(arg0 *s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjectsV2Request", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListObjectsV2Output) + return ret0, ret1 +} + +// ListObjectsV2Request indicates an expected call of ListObjectsV2Request +func (mr *MockS3APIMockRecorder) ListObjectsV2Request(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2Request", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2Request), arg0) +} + +// ListObjectsV2WithContext mocks base method +func (m *MockS3API) ListObjectsV2WithContext(arg0 context.Context, arg1 *s3.ListObjectsV2Input, arg2 ...request.Option) (*s3.ListObjectsV2Output, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListObjectsV2WithContext", varargs...) + ret0, _ := ret[0].(*s3.ListObjectsV2Output) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjectsV2WithContext indicates an expected call of ListObjectsV2WithContext +func (mr *MockS3APIMockRecorder) ListObjectsV2WithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2WithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2WithContext), varargs...) +} + +// ListObjectsWithContext mocks base method +func (m *MockS3API) ListObjectsWithContext(arg0 context.Context, arg1 *s3.ListObjectsInput, arg2 ...request.Option) (*s3.ListObjectsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListObjectsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListObjectsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListObjectsWithContext indicates an expected call of ListObjectsWithContext +func (mr *MockS3APIMockRecorder) ListObjectsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsWithContext), varargs...) +} + +// ListParts mocks base method +func (m *MockS3API) ListParts(arg0 *s3.ListPartsInput) (*s3.ListPartsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListParts", arg0) + ret0, _ := ret[0].(*s3.ListPartsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListParts indicates an expected call of ListParts +func (mr *MockS3APIMockRecorder) ListParts(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListParts", reflect.TypeOf((*MockS3API)(nil).ListParts), arg0) +} + +// ListPartsPages mocks base method +func (m *MockS3API) ListPartsPages(arg0 *s3.ListPartsInput, arg1 func(*s3.ListPartsOutput, bool) bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPartsPages", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListPartsPages indicates an expected call of ListPartsPages +func (mr *MockS3APIMockRecorder) ListPartsPages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsPages", reflect.TypeOf((*MockS3API)(nil).ListPartsPages), arg0, arg1) +} + +// ListPartsPagesWithContext mocks base method +func (m *MockS3API) ListPartsPagesWithContext(arg0 context.Context, arg1 *s3.ListPartsInput, arg2 func(*s3.ListPartsOutput, bool) bool, arg3 ...request.Option) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListPartsPagesWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListPartsPagesWithContext indicates an expected call of ListPartsPagesWithContext +func (mr *MockS3APIMockRecorder) ListPartsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListPartsPagesWithContext), varargs...) +} + +// ListPartsRequest mocks base method +func (m *MockS3API) ListPartsRequest(arg0 *s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPartsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.ListPartsOutput) + return ret0, ret1 +} + +// ListPartsRequest indicates an expected call of ListPartsRequest +func (mr *MockS3APIMockRecorder) ListPartsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsRequest", reflect.TypeOf((*MockS3API)(nil).ListPartsRequest), arg0) +} + +// ListPartsWithContext mocks base method +func (m *MockS3API) ListPartsWithContext(arg0 context.Context, arg1 *s3.ListPartsInput, arg2 ...request.Option) (*s3.ListPartsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListPartsWithContext", varargs...) + ret0, _ := ret[0].(*s3.ListPartsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListPartsWithContext indicates an expected call of ListPartsWithContext +func (mr *MockS3APIMockRecorder) ListPartsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsWithContext", reflect.TypeOf((*MockS3API)(nil).ListPartsWithContext), varargs...) +} + +// PutBucketAccelerateConfiguration mocks base method +func (m *MockS3API) PutBucketAccelerateConfiguration(arg0 *s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketAccelerateConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutBucketAccelerateConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketAccelerateConfiguration indicates an expected call of PutBucketAccelerateConfiguration +func (mr *MockS3APIMockRecorder) PutBucketAccelerateConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAccelerateConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketAccelerateConfiguration), arg0) +} + +// PutBucketAccelerateConfigurationRequest mocks base method +func (m *MockS3API) PutBucketAccelerateConfigurationRequest(arg0 *s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketAccelerateConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketAccelerateConfigurationOutput) + return ret0, ret1 +} + +// PutBucketAccelerateConfigurationRequest indicates an expected call of PutBucketAccelerateConfigurationRequest +func (mr *MockS3APIMockRecorder) PutBucketAccelerateConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAccelerateConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketAccelerateConfigurationRequest), arg0) +} + +// PutBucketAccelerateConfigurationWithContext mocks base method +func (m *MockS3API) PutBucketAccelerateConfigurationWithContext(arg0 context.Context, arg1 *s3.PutBucketAccelerateConfigurationInput, arg2 ...request.Option) (*s3.PutBucketAccelerateConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketAccelerateConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketAccelerateConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketAccelerateConfigurationWithContext indicates an expected call of PutBucketAccelerateConfigurationWithContext +func (mr *MockS3APIMockRecorder) PutBucketAccelerateConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAccelerateConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketAccelerateConfigurationWithContext), varargs...) +} + +// PutBucketAcl mocks base method +func (m *MockS3API) PutBucketAcl(arg0 *s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketAcl", arg0) + ret0, _ := ret[0].(*s3.PutBucketAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketAcl indicates an expected call of PutBucketAcl +func (mr *MockS3APIMockRecorder) PutBucketAcl(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAcl", reflect.TypeOf((*MockS3API)(nil).PutBucketAcl), arg0) +} + +// PutBucketAclRequest mocks base method +func (m *MockS3API) PutBucketAclRequest(arg0 *s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketAclRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketAclOutput) + return ret0, ret1 +} + +// PutBucketAclRequest indicates an expected call of PutBucketAclRequest +func (mr *MockS3APIMockRecorder) PutBucketAclRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAclRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketAclRequest), arg0) +} + +// PutBucketAclWithContext mocks base method +func (m *MockS3API) PutBucketAclWithContext(arg0 context.Context, arg1 *s3.PutBucketAclInput, arg2 ...request.Option) (*s3.PutBucketAclOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketAclWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketAclWithContext indicates an expected call of PutBucketAclWithContext +func (mr *MockS3APIMockRecorder) PutBucketAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAclWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketAclWithContext), varargs...) +} + +// PutBucketAnalyticsConfiguration mocks base method +func (m *MockS3API) PutBucketAnalyticsConfiguration(arg0 *s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketAnalyticsConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutBucketAnalyticsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketAnalyticsConfiguration indicates an expected call of PutBucketAnalyticsConfiguration +func (mr *MockS3APIMockRecorder) PutBucketAnalyticsConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAnalyticsConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketAnalyticsConfiguration), arg0) +} + +// PutBucketAnalyticsConfigurationRequest mocks base method +func (m *MockS3API) PutBucketAnalyticsConfigurationRequest(arg0 *s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketAnalyticsConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketAnalyticsConfigurationOutput) + return ret0, ret1 +} + +// PutBucketAnalyticsConfigurationRequest indicates an expected call of PutBucketAnalyticsConfigurationRequest +func (mr *MockS3APIMockRecorder) PutBucketAnalyticsConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAnalyticsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketAnalyticsConfigurationRequest), arg0) +} + +// PutBucketAnalyticsConfigurationWithContext mocks base method +func (m *MockS3API) PutBucketAnalyticsConfigurationWithContext(arg0 context.Context, arg1 *s3.PutBucketAnalyticsConfigurationInput, arg2 ...request.Option) (*s3.PutBucketAnalyticsConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketAnalyticsConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketAnalyticsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketAnalyticsConfigurationWithContext indicates an expected call of PutBucketAnalyticsConfigurationWithContext +func (mr *MockS3APIMockRecorder) PutBucketAnalyticsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAnalyticsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketAnalyticsConfigurationWithContext), varargs...) +} + +// PutBucketCors mocks base method +func (m *MockS3API) PutBucketCors(arg0 *s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketCors", arg0) + ret0, _ := ret[0].(*s3.PutBucketCorsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketCors indicates an expected call of PutBucketCors +func (mr *MockS3APIMockRecorder) PutBucketCors(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketCors", reflect.TypeOf((*MockS3API)(nil).PutBucketCors), arg0) +} + +// PutBucketCorsRequest mocks base method +func (m *MockS3API) PutBucketCorsRequest(arg0 *s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketCorsRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketCorsOutput) + return ret0, ret1 +} + +// PutBucketCorsRequest indicates an expected call of PutBucketCorsRequest +func (mr *MockS3APIMockRecorder) PutBucketCorsRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketCorsRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketCorsRequest), arg0) +} + +// PutBucketCorsWithContext mocks base method +func (m *MockS3API) PutBucketCorsWithContext(arg0 context.Context, arg1 *s3.PutBucketCorsInput, arg2 ...request.Option) (*s3.PutBucketCorsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketCorsWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketCorsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketCorsWithContext indicates an expected call of PutBucketCorsWithContext +func (mr *MockS3APIMockRecorder) PutBucketCorsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketCorsWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketCorsWithContext), varargs...) +} + +// PutBucketEncryption mocks base method +func (m *MockS3API) PutBucketEncryption(arg0 *s3.PutBucketEncryptionInput) (*s3.PutBucketEncryptionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketEncryption", arg0) + ret0, _ := ret[0].(*s3.PutBucketEncryptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketEncryption indicates an expected call of PutBucketEncryption +func (mr *MockS3APIMockRecorder) PutBucketEncryption(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketEncryption", reflect.TypeOf((*MockS3API)(nil).PutBucketEncryption), arg0) +} + +// PutBucketEncryptionRequest mocks base method +func (m *MockS3API) PutBucketEncryptionRequest(arg0 *s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketEncryptionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketEncryptionOutput) + return ret0, ret1 +} + +// PutBucketEncryptionRequest indicates an expected call of PutBucketEncryptionRequest +func (mr *MockS3APIMockRecorder) PutBucketEncryptionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketEncryptionRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketEncryptionRequest), arg0) +} + +// PutBucketEncryptionWithContext mocks base method +func (m *MockS3API) PutBucketEncryptionWithContext(arg0 context.Context, arg1 *s3.PutBucketEncryptionInput, arg2 ...request.Option) (*s3.PutBucketEncryptionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketEncryptionWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketEncryptionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketEncryptionWithContext indicates an expected call of PutBucketEncryptionWithContext +func (mr *MockS3APIMockRecorder) PutBucketEncryptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketEncryptionWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketEncryptionWithContext), varargs...) +} + +// PutBucketInventoryConfiguration mocks base method +func (m *MockS3API) PutBucketInventoryConfiguration(arg0 *s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketInventoryConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutBucketInventoryConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketInventoryConfiguration indicates an expected call of PutBucketInventoryConfiguration +func (mr *MockS3APIMockRecorder) PutBucketInventoryConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketInventoryConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketInventoryConfiguration), arg0) +} + +// PutBucketInventoryConfigurationRequest mocks base method +func (m *MockS3API) PutBucketInventoryConfigurationRequest(arg0 *s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketInventoryConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketInventoryConfigurationOutput) + return ret0, ret1 +} + +// PutBucketInventoryConfigurationRequest indicates an expected call of PutBucketInventoryConfigurationRequest +func (mr *MockS3APIMockRecorder) PutBucketInventoryConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketInventoryConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketInventoryConfigurationRequest), arg0) +} + +// PutBucketInventoryConfigurationWithContext mocks base method +func (m *MockS3API) PutBucketInventoryConfigurationWithContext(arg0 context.Context, arg1 *s3.PutBucketInventoryConfigurationInput, arg2 ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketInventoryConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketInventoryConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketInventoryConfigurationWithContext indicates an expected call of PutBucketInventoryConfigurationWithContext +func (mr *MockS3APIMockRecorder) PutBucketInventoryConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketInventoryConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketInventoryConfigurationWithContext), varargs...) +} + +// PutBucketLifecycle mocks base method +func (m *MockS3API) PutBucketLifecycle(arg0 *s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketLifecycle", arg0) + ret0, _ := ret[0].(*s3.PutBucketLifecycleOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketLifecycle indicates an expected call of PutBucketLifecycle +func (mr *MockS3APIMockRecorder) PutBucketLifecycle(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycle", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycle), arg0) +} + +// PutBucketLifecycleConfiguration mocks base method +func (m *MockS3API) PutBucketLifecycleConfiguration(arg0 *s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketLifecycleConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutBucketLifecycleConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketLifecycleConfiguration indicates an expected call of PutBucketLifecycleConfiguration +func (mr *MockS3APIMockRecorder) PutBucketLifecycleConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleConfiguration), arg0) +} + +// PutBucketLifecycleConfigurationRequest mocks base method +func (m *MockS3API) PutBucketLifecycleConfigurationRequest(arg0 *s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketLifecycleConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketLifecycleConfigurationOutput) + return ret0, ret1 +} + +// PutBucketLifecycleConfigurationRequest indicates an expected call of PutBucketLifecycleConfigurationRequest +func (mr *MockS3APIMockRecorder) PutBucketLifecycleConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleConfigurationRequest), arg0) +} + +// PutBucketLifecycleConfigurationWithContext mocks base method +func (m *MockS3API) PutBucketLifecycleConfigurationWithContext(arg0 context.Context, arg1 *s3.PutBucketLifecycleConfigurationInput, arg2 ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketLifecycleConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketLifecycleConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketLifecycleConfigurationWithContext indicates an expected call of PutBucketLifecycleConfigurationWithContext +func (mr *MockS3APIMockRecorder) PutBucketLifecycleConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleConfigurationWithContext), varargs...) +} + +// PutBucketLifecycleRequest mocks base method +func (m *MockS3API) PutBucketLifecycleRequest(arg0 *s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketLifecycleRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketLifecycleOutput) + return ret0, ret1 +} + +// PutBucketLifecycleRequest indicates an expected call of PutBucketLifecycleRequest +func (mr *MockS3APIMockRecorder) PutBucketLifecycleRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleRequest), arg0) +} + +// PutBucketLifecycleWithContext mocks base method +func (m *MockS3API) PutBucketLifecycleWithContext(arg0 context.Context, arg1 *s3.PutBucketLifecycleInput, arg2 ...request.Option) (*s3.PutBucketLifecycleOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketLifecycleWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketLifecycleOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketLifecycleWithContext indicates an expected call of PutBucketLifecycleWithContext +func (mr *MockS3APIMockRecorder) PutBucketLifecycleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleWithContext), varargs...) +} + +// PutBucketLogging mocks base method +func (m *MockS3API) PutBucketLogging(arg0 *s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketLogging", arg0) + ret0, _ := ret[0].(*s3.PutBucketLoggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketLogging indicates an expected call of PutBucketLogging +func (mr *MockS3APIMockRecorder) PutBucketLogging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLogging", reflect.TypeOf((*MockS3API)(nil).PutBucketLogging), arg0) +} + +// PutBucketLoggingRequest mocks base method +func (m *MockS3API) PutBucketLoggingRequest(arg0 *s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketLoggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketLoggingOutput) + return ret0, ret1 +} + +// PutBucketLoggingRequest indicates an expected call of PutBucketLoggingRequest +func (mr *MockS3APIMockRecorder) PutBucketLoggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLoggingRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketLoggingRequest), arg0) +} + +// PutBucketLoggingWithContext mocks base method +func (m *MockS3API) PutBucketLoggingWithContext(arg0 context.Context, arg1 *s3.PutBucketLoggingInput, arg2 ...request.Option) (*s3.PutBucketLoggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketLoggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketLoggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketLoggingWithContext indicates an expected call of PutBucketLoggingWithContext +func (mr *MockS3APIMockRecorder) PutBucketLoggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLoggingWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketLoggingWithContext), varargs...) +} + +// PutBucketMetricsConfiguration mocks base method +func (m *MockS3API) PutBucketMetricsConfiguration(arg0 *s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketMetricsConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutBucketMetricsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketMetricsConfiguration indicates an expected call of PutBucketMetricsConfiguration +func (mr *MockS3APIMockRecorder) PutBucketMetricsConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketMetricsConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketMetricsConfiguration), arg0) +} + +// PutBucketMetricsConfigurationRequest mocks base method +func (m *MockS3API) PutBucketMetricsConfigurationRequest(arg0 *s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketMetricsConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketMetricsConfigurationOutput) + return ret0, ret1 +} + +// PutBucketMetricsConfigurationRequest indicates an expected call of PutBucketMetricsConfigurationRequest +func (mr *MockS3APIMockRecorder) PutBucketMetricsConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketMetricsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketMetricsConfigurationRequest), arg0) +} + +// PutBucketMetricsConfigurationWithContext mocks base method +func (m *MockS3API) PutBucketMetricsConfigurationWithContext(arg0 context.Context, arg1 *s3.PutBucketMetricsConfigurationInput, arg2 ...request.Option) (*s3.PutBucketMetricsConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketMetricsConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketMetricsConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketMetricsConfigurationWithContext indicates an expected call of PutBucketMetricsConfigurationWithContext +func (mr *MockS3APIMockRecorder) PutBucketMetricsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketMetricsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketMetricsConfigurationWithContext), varargs...) +} + +// PutBucketNotification mocks base method +func (m *MockS3API) PutBucketNotification(arg0 *s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketNotification", arg0) + ret0, _ := ret[0].(*s3.PutBucketNotificationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketNotification indicates an expected call of PutBucketNotification +func (mr *MockS3APIMockRecorder) PutBucketNotification(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotification", reflect.TypeOf((*MockS3API)(nil).PutBucketNotification), arg0) +} + +// PutBucketNotificationConfiguration mocks base method +func (m *MockS3API) PutBucketNotificationConfiguration(arg0 *s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketNotificationConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutBucketNotificationConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketNotificationConfiguration indicates an expected call of PutBucketNotificationConfiguration +func (mr *MockS3APIMockRecorder) PutBucketNotificationConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationConfiguration), arg0) +} + +// PutBucketNotificationConfigurationRequest mocks base method +func (m *MockS3API) PutBucketNotificationConfigurationRequest(arg0 *s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketNotificationConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketNotificationConfigurationOutput) + return ret0, ret1 +} + +// PutBucketNotificationConfigurationRequest indicates an expected call of PutBucketNotificationConfigurationRequest +func (mr *MockS3APIMockRecorder) PutBucketNotificationConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationConfigurationRequest), arg0) +} + +// PutBucketNotificationConfigurationWithContext mocks base method +func (m *MockS3API) PutBucketNotificationConfigurationWithContext(arg0 context.Context, arg1 *s3.PutBucketNotificationConfigurationInput, arg2 ...request.Option) (*s3.PutBucketNotificationConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketNotificationConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketNotificationConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketNotificationConfigurationWithContext indicates an expected call of PutBucketNotificationConfigurationWithContext +func (mr *MockS3APIMockRecorder) PutBucketNotificationConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationConfigurationWithContext), varargs...) +} + +// PutBucketNotificationRequest mocks base method +func (m *MockS3API) PutBucketNotificationRequest(arg0 *s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketNotificationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketNotificationOutput) + return ret0, ret1 +} + +// PutBucketNotificationRequest indicates an expected call of PutBucketNotificationRequest +func (mr *MockS3APIMockRecorder) PutBucketNotificationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationRequest), arg0) +} + +// PutBucketNotificationWithContext mocks base method +func (m *MockS3API) PutBucketNotificationWithContext(arg0 context.Context, arg1 *s3.PutBucketNotificationInput, arg2 ...request.Option) (*s3.PutBucketNotificationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketNotificationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketNotificationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketNotificationWithContext indicates an expected call of PutBucketNotificationWithContext +func (mr *MockS3APIMockRecorder) PutBucketNotificationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationWithContext), varargs...) +} + +// PutBucketPolicy mocks base method +func (m *MockS3API) PutBucketPolicy(arg0 *s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketPolicy", arg0) + ret0, _ := ret[0].(*s3.PutBucketPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketPolicy indicates an expected call of PutBucketPolicy +func (mr *MockS3APIMockRecorder) PutBucketPolicy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketPolicy", reflect.TypeOf((*MockS3API)(nil).PutBucketPolicy), arg0) +} + +// PutBucketPolicyRequest mocks base method +func (m *MockS3API) PutBucketPolicyRequest(arg0 *s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketPolicyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketPolicyOutput) + return ret0, ret1 +} + +// PutBucketPolicyRequest indicates an expected call of PutBucketPolicyRequest +func (mr *MockS3APIMockRecorder) PutBucketPolicyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketPolicyRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketPolicyRequest), arg0) +} + +// PutBucketPolicyWithContext mocks base method +func (m *MockS3API) PutBucketPolicyWithContext(arg0 context.Context, arg1 *s3.PutBucketPolicyInput, arg2 ...request.Option) (*s3.PutBucketPolicyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketPolicyWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketPolicyWithContext indicates an expected call of PutBucketPolicyWithContext +func (mr *MockS3APIMockRecorder) PutBucketPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketPolicyWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketPolicyWithContext), varargs...) +} + +// PutBucketReplication mocks base method +func (m *MockS3API) PutBucketReplication(arg0 *s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketReplication", arg0) + ret0, _ := ret[0].(*s3.PutBucketReplicationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketReplication indicates an expected call of PutBucketReplication +func (mr *MockS3APIMockRecorder) PutBucketReplication(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketReplication", reflect.TypeOf((*MockS3API)(nil).PutBucketReplication), arg0) +} + +// PutBucketReplicationRequest mocks base method +func (m *MockS3API) PutBucketReplicationRequest(arg0 *s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketReplicationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketReplicationOutput) + return ret0, ret1 +} + +// PutBucketReplicationRequest indicates an expected call of PutBucketReplicationRequest +func (mr *MockS3APIMockRecorder) PutBucketReplicationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketReplicationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketReplicationRequest), arg0) +} + +// PutBucketReplicationWithContext mocks base method +func (m *MockS3API) PutBucketReplicationWithContext(arg0 context.Context, arg1 *s3.PutBucketReplicationInput, arg2 ...request.Option) (*s3.PutBucketReplicationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketReplicationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketReplicationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketReplicationWithContext indicates an expected call of PutBucketReplicationWithContext +func (mr *MockS3APIMockRecorder) PutBucketReplicationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketReplicationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketReplicationWithContext), varargs...) +} + +// PutBucketRequestPayment mocks base method +func (m *MockS3API) PutBucketRequestPayment(arg0 *s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketRequestPayment", arg0) + ret0, _ := ret[0].(*s3.PutBucketRequestPaymentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketRequestPayment indicates an expected call of PutBucketRequestPayment +func (mr *MockS3APIMockRecorder) PutBucketRequestPayment(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketRequestPayment", reflect.TypeOf((*MockS3API)(nil).PutBucketRequestPayment), arg0) +} + +// PutBucketRequestPaymentRequest mocks base method +func (m *MockS3API) PutBucketRequestPaymentRequest(arg0 *s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketRequestPaymentRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketRequestPaymentOutput) + return ret0, ret1 +} + +// PutBucketRequestPaymentRequest indicates an expected call of PutBucketRequestPaymentRequest +func (mr *MockS3APIMockRecorder) PutBucketRequestPaymentRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketRequestPaymentRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketRequestPaymentRequest), arg0) +} + +// PutBucketRequestPaymentWithContext mocks base method +func (m *MockS3API) PutBucketRequestPaymentWithContext(arg0 context.Context, arg1 *s3.PutBucketRequestPaymentInput, arg2 ...request.Option) (*s3.PutBucketRequestPaymentOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketRequestPaymentWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketRequestPaymentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketRequestPaymentWithContext indicates an expected call of PutBucketRequestPaymentWithContext +func (mr *MockS3APIMockRecorder) PutBucketRequestPaymentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketRequestPaymentWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketRequestPaymentWithContext), varargs...) +} + +// PutBucketTagging mocks base method +func (m *MockS3API) PutBucketTagging(arg0 *s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketTagging", arg0) + ret0, _ := ret[0].(*s3.PutBucketTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketTagging indicates an expected call of PutBucketTagging +func (mr *MockS3APIMockRecorder) PutBucketTagging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketTagging", reflect.TypeOf((*MockS3API)(nil).PutBucketTagging), arg0) +} + +// PutBucketTaggingRequest mocks base method +func (m *MockS3API) PutBucketTaggingRequest(arg0 *s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketTaggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketTaggingOutput) + return ret0, ret1 +} + +// PutBucketTaggingRequest indicates an expected call of PutBucketTaggingRequest +func (mr *MockS3APIMockRecorder) PutBucketTaggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketTaggingRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketTaggingRequest), arg0) +} + +// PutBucketTaggingWithContext mocks base method +func (m *MockS3API) PutBucketTaggingWithContext(arg0 context.Context, arg1 *s3.PutBucketTaggingInput, arg2 ...request.Option) (*s3.PutBucketTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketTaggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketTaggingWithContext indicates an expected call of PutBucketTaggingWithContext +func (mr *MockS3APIMockRecorder) PutBucketTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketTaggingWithContext), varargs...) +} + +// PutBucketVersioning mocks base method +func (m *MockS3API) PutBucketVersioning(arg0 *s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketVersioning", arg0) + ret0, _ := ret[0].(*s3.PutBucketVersioningOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketVersioning indicates an expected call of PutBucketVersioning +func (mr *MockS3APIMockRecorder) PutBucketVersioning(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketVersioning", reflect.TypeOf((*MockS3API)(nil).PutBucketVersioning), arg0) +} + +// PutBucketVersioningRequest mocks base method +func (m *MockS3API) PutBucketVersioningRequest(arg0 *s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketVersioningRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketVersioningOutput) + return ret0, ret1 +} + +// PutBucketVersioningRequest indicates an expected call of PutBucketVersioningRequest +func (mr *MockS3APIMockRecorder) PutBucketVersioningRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketVersioningRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketVersioningRequest), arg0) +} + +// PutBucketVersioningWithContext mocks base method +func (m *MockS3API) PutBucketVersioningWithContext(arg0 context.Context, arg1 *s3.PutBucketVersioningInput, arg2 ...request.Option) (*s3.PutBucketVersioningOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketVersioningWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketVersioningOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketVersioningWithContext indicates an expected call of PutBucketVersioningWithContext +func (mr *MockS3APIMockRecorder) PutBucketVersioningWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketVersioningWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketVersioningWithContext), varargs...) +} + +// PutBucketWebsite mocks base method +func (m *MockS3API) PutBucketWebsite(arg0 *s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketWebsite", arg0) + ret0, _ := ret[0].(*s3.PutBucketWebsiteOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketWebsite indicates an expected call of PutBucketWebsite +func (mr *MockS3APIMockRecorder) PutBucketWebsite(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketWebsite", reflect.TypeOf((*MockS3API)(nil).PutBucketWebsite), arg0) +} + +// PutBucketWebsiteRequest mocks base method +func (m *MockS3API) PutBucketWebsiteRequest(arg0 *s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutBucketWebsiteRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutBucketWebsiteOutput) + return ret0, ret1 +} + +// PutBucketWebsiteRequest indicates an expected call of PutBucketWebsiteRequest +func (mr *MockS3APIMockRecorder) PutBucketWebsiteRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketWebsiteRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketWebsiteRequest), arg0) +} + +// PutBucketWebsiteWithContext mocks base method +func (m *MockS3API) PutBucketWebsiteWithContext(arg0 context.Context, arg1 *s3.PutBucketWebsiteInput, arg2 ...request.Option) (*s3.PutBucketWebsiteOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutBucketWebsiteWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutBucketWebsiteOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutBucketWebsiteWithContext indicates an expected call of PutBucketWebsiteWithContext +func (mr *MockS3APIMockRecorder) PutBucketWebsiteWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketWebsiteWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketWebsiteWithContext), varargs...) +} + +// PutObject mocks base method +func (m *MockS3API) PutObject(arg0 *s3.PutObjectInput) (*s3.PutObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObject", arg0) + ret0, _ := ret[0].(*s3.PutObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObject indicates an expected call of PutObject +func (mr *MockS3APIMockRecorder) PutObject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObject", reflect.TypeOf((*MockS3API)(nil).PutObject), arg0) +} + +// PutObjectAcl mocks base method +func (m *MockS3API) PutObjectAcl(arg0 *s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectAcl", arg0) + ret0, _ := ret[0].(*s3.PutObjectAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectAcl indicates an expected call of PutObjectAcl +func (mr *MockS3APIMockRecorder) PutObjectAcl(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectAcl", reflect.TypeOf((*MockS3API)(nil).PutObjectAcl), arg0) +} + +// PutObjectAclRequest mocks base method +func (m *MockS3API) PutObjectAclRequest(arg0 *s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectAclRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutObjectAclOutput) + return ret0, ret1 +} + +// PutObjectAclRequest indicates an expected call of PutObjectAclRequest +func (mr *MockS3APIMockRecorder) PutObjectAclRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectAclRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectAclRequest), arg0) +} + +// PutObjectAclWithContext mocks base method +func (m *MockS3API) PutObjectAclWithContext(arg0 context.Context, arg1 *s3.PutObjectAclInput, arg2 ...request.Option) (*s3.PutObjectAclOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObjectAclWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutObjectAclOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectAclWithContext indicates an expected call of PutObjectAclWithContext +func (mr *MockS3APIMockRecorder) PutObjectAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectAclWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectAclWithContext), varargs...) +} + +// PutObjectLegalHold mocks base method +func (m *MockS3API) PutObjectLegalHold(arg0 *s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectLegalHold", arg0) + ret0, _ := ret[0].(*s3.PutObjectLegalHoldOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectLegalHold indicates an expected call of PutObjectLegalHold +func (mr *MockS3APIMockRecorder) PutObjectLegalHold(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLegalHold", reflect.TypeOf((*MockS3API)(nil).PutObjectLegalHold), arg0) +} + +// PutObjectLegalHoldRequest mocks base method +func (m *MockS3API) PutObjectLegalHoldRequest(arg0 *s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectLegalHoldRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutObjectLegalHoldOutput) + return ret0, ret1 +} + +// PutObjectLegalHoldRequest indicates an expected call of PutObjectLegalHoldRequest +func (mr *MockS3APIMockRecorder) PutObjectLegalHoldRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLegalHoldRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectLegalHoldRequest), arg0) +} + +// PutObjectLegalHoldWithContext mocks base method +func (m *MockS3API) PutObjectLegalHoldWithContext(arg0 context.Context, arg1 *s3.PutObjectLegalHoldInput, arg2 ...request.Option) (*s3.PutObjectLegalHoldOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObjectLegalHoldWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutObjectLegalHoldOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectLegalHoldWithContext indicates an expected call of PutObjectLegalHoldWithContext +func (mr *MockS3APIMockRecorder) PutObjectLegalHoldWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLegalHoldWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectLegalHoldWithContext), varargs...) +} + +// PutObjectLockConfiguration mocks base method +func (m *MockS3API) PutObjectLockConfiguration(arg0 *s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectLockConfiguration", arg0) + ret0, _ := ret[0].(*s3.PutObjectLockConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectLockConfiguration indicates an expected call of PutObjectLockConfiguration +func (mr *MockS3APIMockRecorder) PutObjectLockConfiguration(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLockConfiguration", reflect.TypeOf((*MockS3API)(nil).PutObjectLockConfiguration), arg0) +} + +// PutObjectLockConfigurationRequest mocks base method +func (m *MockS3API) PutObjectLockConfigurationRequest(arg0 *s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectLockConfigurationRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutObjectLockConfigurationOutput) + return ret0, ret1 +} + +// PutObjectLockConfigurationRequest indicates an expected call of PutObjectLockConfigurationRequest +func (mr *MockS3APIMockRecorder) PutObjectLockConfigurationRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLockConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectLockConfigurationRequest), arg0) +} + +// PutObjectLockConfigurationWithContext mocks base method +func (m *MockS3API) PutObjectLockConfigurationWithContext(arg0 context.Context, arg1 *s3.PutObjectLockConfigurationInput, arg2 ...request.Option) (*s3.PutObjectLockConfigurationOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObjectLockConfigurationWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutObjectLockConfigurationOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectLockConfigurationWithContext indicates an expected call of PutObjectLockConfigurationWithContext +func (mr *MockS3APIMockRecorder) PutObjectLockConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLockConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectLockConfigurationWithContext), varargs...) +} + +// PutObjectRequest mocks base method +func (m *MockS3API) PutObjectRequest(arg0 *s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutObjectOutput) + return ret0, ret1 +} + +// PutObjectRequest indicates an expected call of PutObjectRequest +func (mr *MockS3APIMockRecorder) PutObjectRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectRequest), arg0) +} + +// PutObjectRetention mocks base method +func (m *MockS3API) PutObjectRetention(arg0 *s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectRetention", arg0) + ret0, _ := ret[0].(*s3.PutObjectRetentionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectRetention indicates an expected call of PutObjectRetention +func (mr *MockS3APIMockRecorder) PutObjectRetention(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRetention", reflect.TypeOf((*MockS3API)(nil).PutObjectRetention), arg0) +} + +// PutObjectRetentionRequest mocks base method +func (m *MockS3API) PutObjectRetentionRequest(arg0 *s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectRetentionRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutObjectRetentionOutput) + return ret0, ret1 +} + +// PutObjectRetentionRequest indicates an expected call of PutObjectRetentionRequest +func (mr *MockS3APIMockRecorder) PutObjectRetentionRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRetentionRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectRetentionRequest), arg0) +} + +// PutObjectRetentionWithContext mocks base method +func (m *MockS3API) PutObjectRetentionWithContext(arg0 context.Context, arg1 *s3.PutObjectRetentionInput, arg2 ...request.Option) (*s3.PutObjectRetentionOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObjectRetentionWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutObjectRetentionOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectRetentionWithContext indicates an expected call of PutObjectRetentionWithContext +func (mr *MockS3APIMockRecorder) PutObjectRetentionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRetentionWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectRetentionWithContext), varargs...) +} + +// PutObjectTagging mocks base method +func (m *MockS3API) PutObjectTagging(arg0 *s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectTagging", arg0) + ret0, _ := ret[0].(*s3.PutObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectTagging indicates an expected call of PutObjectTagging +func (mr *MockS3APIMockRecorder) PutObjectTagging(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTagging", reflect.TypeOf((*MockS3API)(nil).PutObjectTagging), arg0) +} + +// PutObjectTaggingRequest mocks base method +func (m *MockS3API) PutObjectTaggingRequest(arg0 *s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutObjectTaggingRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutObjectTaggingOutput) + return ret0, ret1 +} + +// PutObjectTaggingRequest indicates an expected call of PutObjectTaggingRequest +func (mr *MockS3APIMockRecorder) PutObjectTaggingRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTaggingRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectTaggingRequest), arg0) +} + +// PutObjectTaggingWithContext mocks base method +func (m *MockS3API) PutObjectTaggingWithContext(arg0 context.Context, arg1 *s3.PutObjectTaggingInput, arg2 ...request.Option) (*s3.PutObjectTaggingOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObjectTaggingWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutObjectTaggingOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectTaggingWithContext indicates an expected call of PutObjectTaggingWithContext +func (mr *MockS3APIMockRecorder) PutObjectTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectTaggingWithContext), varargs...) +} + +// PutObjectWithContext mocks base method +func (m *MockS3API) PutObjectWithContext(arg0 context.Context, arg1 *s3.PutObjectInput, arg2 ...request.Option) (*s3.PutObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutObjectWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutObjectWithContext indicates an expected call of PutObjectWithContext +func (mr *MockS3APIMockRecorder) PutObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectWithContext), varargs...) +} + +// PutPublicAccessBlock mocks base method +func (m *MockS3API) PutPublicAccessBlock(arg0 *s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutPublicAccessBlock", arg0) + ret0, _ := ret[0].(*s3.PutPublicAccessBlockOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutPublicAccessBlock indicates an expected call of PutPublicAccessBlock +func (mr *MockS3APIMockRecorder) PutPublicAccessBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPublicAccessBlock", reflect.TypeOf((*MockS3API)(nil).PutPublicAccessBlock), arg0) +} + +// PutPublicAccessBlockRequest mocks base method +func (m *MockS3API) PutPublicAccessBlockRequest(arg0 *s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutPublicAccessBlockRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.PutPublicAccessBlockOutput) + return ret0, ret1 +} + +// PutPublicAccessBlockRequest indicates an expected call of PutPublicAccessBlockRequest +func (mr *MockS3APIMockRecorder) PutPublicAccessBlockRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPublicAccessBlockRequest", reflect.TypeOf((*MockS3API)(nil).PutPublicAccessBlockRequest), arg0) +} + +// PutPublicAccessBlockWithContext mocks base method +func (m *MockS3API) PutPublicAccessBlockWithContext(arg0 context.Context, arg1 *s3.PutPublicAccessBlockInput, arg2 ...request.Option) (*s3.PutPublicAccessBlockOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PutPublicAccessBlockWithContext", varargs...) + ret0, _ := ret[0].(*s3.PutPublicAccessBlockOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PutPublicAccessBlockWithContext indicates an expected call of PutPublicAccessBlockWithContext +func (mr *MockS3APIMockRecorder) PutPublicAccessBlockWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPublicAccessBlockWithContext", reflect.TypeOf((*MockS3API)(nil).PutPublicAccessBlockWithContext), varargs...) +} + +// RestoreObject mocks base method +func (m *MockS3API) RestoreObject(arg0 *s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RestoreObject", arg0) + ret0, _ := ret[0].(*s3.RestoreObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RestoreObject indicates an expected call of RestoreObject +func (mr *MockS3APIMockRecorder) RestoreObject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObject", reflect.TypeOf((*MockS3API)(nil).RestoreObject), arg0) +} + +// RestoreObjectRequest mocks base method +func (m *MockS3API) RestoreObjectRequest(arg0 *s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RestoreObjectRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.RestoreObjectOutput) + return ret0, ret1 +} + +// RestoreObjectRequest indicates an expected call of RestoreObjectRequest +func (mr *MockS3APIMockRecorder) RestoreObjectRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObjectRequest", reflect.TypeOf((*MockS3API)(nil).RestoreObjectRequest), arg0) +} + +// RestoreObjectWithContext mocks base method +func (m *MockS3API) RestoreObjectWithContext(arg0 context.Context, arg1 *s3.RestoreObjectInput, arg2 ...request.Option) (*s3.RestoreObjectOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "RestoreObjectWithContext", varargs...) + ret0, _ := ret[0].(*s3.RestoreObjectOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RestoreObjectWithContext indicates an expected call of RestoreObjectWithContext +func (mr *MockS3APIMockRecorder) RestoreObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObjectWithContext", reflect.TypeOf((*MockS3API)(nil).RestoreObjectWithContext), varargs...) +} + +// SelectObjectContent mocks base method +func (m *MockS3API) SelectObjectContent(arg0 *s3.SelectObjectContentInput) (*s3.SelectObjectContentOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SelectObjectContent", arg0) + ret0, _ := ret[0].(*s3.SelectObjectContentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SelectObjectContent indicates an expected call of SelectObjectContent +func (mr *MockS3APIMockRecorder) SelectObjectContent(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectObjectContent", reflect.TypeOf((*MockS3API)(nil).SelectObjectContent), arg0) +} + +// SelectObjectContentRequest mocks base method +func (m *MockS3API) SelectObjectContentRequest(arg0 *s3.SelectObjectContentInput) (*request.Request, *s3.SelectObjectContentOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SelectObjectContentRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.SelectObjectContentOutput) + return ret0, ret1 +} + +// SelectObjectContentRequest indicates an expected call of SelectObjectContentRequest +func (mr *MockS3APIMockRecorder) SelectObjectContentRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectObjectContentRequest", reflect.TypeOf((*MockS3API)(nil).SelectObjectContentRequest), arg0) +} + +// SelectObjectContentWithContext mocks base method +func (m *MockS3API) SelectObjectContentWithContext(arg0 context.Context, arg1 *s3.SelectObjectContentInput, arg2 ...request.Option) (*s3.SelectObjectContentOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SelectObjectContentWithContext", varargs...) + ret0, _ := ret[0].(*s3.SelectObjectContentOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SelectObjectContentWithContext indicates an expected call of SelectObjectContentWithContext +func (mr *MockS3APIMockRecorder) SelectObjectContentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectObjectContentWithContext", reflect.TypeOf((*MockS3API)(nil).SelectObjectContentWithContext), varargs...) +} + +// UploadPart mocks base method +func (m *MockS3API) UploadPart(arg0 *s3.UploadPartInput) (*s3.UploadPartOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UploadPart", arg0) + ret0, _ := ret[0].(*s3.UploadPartOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UploadPart indicates an expected call of UploadPart +func (mr *MockS3APIMockRecorder) UploadPart(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPart", reflect.TypeOf((*MockS3API)(nil).UploadPart), arg0) +} + +// UploadPartCopy mocks base method +func (m *MockS3API) UploadPartCopy(arg0 *s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UploadPartCopy", arg0) + ret0, _ := ret[0].(*s3.UploadPartCopyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UploadPartCopy indicates an expected call of UploadPartCopy +func (mr *MockS3APIMockRecorder) UploadPartCopy(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopy", reflect.TypeOf((*MockS3API)(nil).UploadPartCopy), arg0) +} + +// UploadPartCopyRequest mocks base method +func (m *MockS3API) UploadPartCopyRequest(arg0 *s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UploadPartCopyRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.UploadPartCopyOutput) + return ret0, ret1 +} + +// UploadPartCopyRequest indicates an expected call of UploadPartCopyRequest +func (mr *MockS3APIMockRecorder) UploadPartCopyRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopyRequest", reflect.TypeOf((*MockS3API)(nil).UploadPartCopyRequest), arg0) +} + +// UploadPartCopyWithContext mocks base method +func (m *MockS3API) UploadPartCopyWithContext(arg0 context.Context, arg1 *s3.UploadPartCopyInput, arg2 ...request.Option) (*s3.UploadPartCopyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UploadPartCopyWithContext", varargs...) + ret0, _ := ret[0].(*s3.UploadPartCopyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UploadPartCopyWithContext indicates an expected call of UploadPartCopyWithContext +func (mr *MockS3APIMockRecorder) UploadPartCopyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopyWithContext", reflect.TypeOf((*MockS3API)(nil).UploadPartCopyWithContext), varargs...) +} + +// UploadPartRequest mocks base method +func (m *MockS3API) UploadPartRequest(arg0 *s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UploadPartRequest", arg0) + ret0, _ := ret[0].(*request.Request) + ret1, _ := ret[1].(*s3.UploadPartOutput) + return ret0, ret1 +} + +// UploadPartRequest indicates an expected call of UploadPartRequest +func (mr *MockS3APIMockRecorder) UploadPartRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartRequest", reflect.TypeOf((*MockS3API)(nil).UploadPartRequest), arg0) +} + +// UploadPartWithContext mocks base method +func (m *MockS3API) UploadPartWithContext(arg0 context.Context, arg1 *s3.UploadPartInput, arg2 ...request.Option) (*s3.UploadPartOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UploadPartWithContext", varargs...) + ret0, _ := ret[0].(*s3.UploadPartOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UploadPartWithContext indicates an expected call of UploadPartWithContext +func (mr *MockS3APIMockRecorder) UploadPartWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartWithContext", reflect.TypeOf((*MockS3API)(nil).UploadPartWithContext), varargs...) +} + +// WaitUntilBucketExists mocks base method +func (m *MockS3API) WaitUntilBucketExists(arg0 *s3.HeadBucketInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitUntilBucketExists", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilBucketExists indicates an expected call of WaitUntilBucketExists +func (mr *MockS3APIMockRecorder) WaitUntilBucketExists(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketExists), arg0) +} + +// WaitUntilBucketExistsWithContext mocks base method +func (m *MockS3API) WaitUntilBucketExistsWithContext(arg0 context.Context, arg1 *s3.HeadBucketInput, arg2 ...request.WaiterOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WaitUntilBucketExistsWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilBucketExistsWithContext indicates an expected call of WaitUntilBucketExistsWithContext +func (mr *MockS3APIMockRecorder) WaitUntilBucketExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketExistsWithContext), varargs...) +} + +// WaitUntilBucketNotExists mocks base method +func (m *MockS3API) WaitUntilBucketNotExists(arg0 *s3.HeadBucketInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitUntilBucketNotExists", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilBucketNotExists indicates an expected call of WaitUntilBucketNotExists +func (mr *MockS3APIMockRecorder) WaitUntilBucketNotExists(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketNotExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketNotExists), arg0) +} + +// WaitUntilBucketNotExistsWithContext mocks base method +func (m *MockS3API) WaitUntilBucketNotExistsWithContext(arg0 context.Context, arg1 *s3.HeadBucketInput, arg2 ...request.WaiterOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WaitUntilBucketNotExistsWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilBucketNotExistsWithContext indicates an expected call of WaitUntilBucketNotExistsWithContext +func (mr *MockS3APIMockRecorder) WaitUntilBucketNotExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketNotExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketNotExistsWithContext), varargs...) +} + +// WaitUntilObjectExists mocks base method +func (m *MockS3API) WaitUntilObjectExists(arg0 *s3.HeadObjectInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitUntilObjectExists", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilObjectExists indicates an expected call of WaitUntilObjectExists +func (mr *MockS3APIMockRecorder) WaitUntilObjectExists(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectExists), arg0) +} + +// WaitUntilObjectExistsWithContext mocks base method +func (m *MockS3API) WaitUntilObjectExistsWithContext(arg0 context.Context, arg1 *s3.HeadObjectInput, arg2 ...request.WaiterOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WaitUntilObjectExistsWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilObjectExistsWithContext indicates an expected call of WaitUntilObjectExistsWithContext +func (mr *MockS3APIMockRecorder) WaitUntilObjectExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectExistsWithContext), varargs...) +} + +// WaitUntilObjectNotExists mocks base method +func (m *MockS3API) WaitUntilObjectNotExists(arg0 *s3.HeadObjectInput) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitUntilObjectNotExists", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilObjectNotExists indicates an expected call of WaitUntilObjectNotExists +func (mr *MockS3APIMockRecorder) WaitUntilObjectNotExists(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectNotExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectNotExists), arg0) +} + +// WaitUntilObjectNotExistsWithContext mocks base method +func (m *MockS3API) WaitUntilObjectNotExistsWithContext(arg0 context.Context, arg1 *s3.HeadObjectInput, arg2 ...request.WaiterOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WaitUntilObjectNotExistsWithContext", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitUntilObjectNotExistsWithContext indicates an expected call of WaitUntilObjectNotExistsWithContext +func (mr *MockS3APIMockRecorder) WaitUntilObjectNotExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectNotExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectNotExistsWithContext), varargs...) +} diff --git a/internal/installcfg/installcfg.go b/internal/installcfg/installcfg.go index 74e654f5c..4ed2e67a7 100644 --- a/internal/installcfg/installcfg.go +++ b/internal/installcfg/installcfg.go @@ -1,49 +1,17 @@ package installcfg import ( - "encoding/json" - "errors" "fmt" "net" + "github.com/filanov/bm-inventory/internal/common" "github.com/filanov/bm-inventory/models" + "github.com/go-openapi/swag" + "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" ) -// [TODO] - remove once we stop supporting none platform -type InstallerConfigNone struct { - APIVersion string `yaml:"apiVersion"` - BaseDomain string `yaml:"baseDomain"` - Compute []struct { - Hyperthreading string `yaml:"hyperthreading"` - Name string `yaml:"name"` - Replicas int `yaml:"replicas"` - } `yaml:"compute"` - ControlPlane struct { - Hyperthreading string `yaml:"hyperthreading"` - Name string `yaml:"name"` - Replicas int `yaml:"replicas"` - } `yaml:"controlPlane"` - Metadata struct { - Name string `yaml:"name"` - } `yaml:"metadata"` - Networking struct { - ClusterNetwork []struct { - Cidr string `yaml:"cidr"` - HostPrefix int `yaml:"hostPrefix"` - } `yaml:"clusterNetwork"` - NetworkType string `yaml:"networkType"` - ServiceNetwork []string `yaml:"serviceNetwork"` - } `yaml:"networking"` - Platform struct { - None struct { - } `yaml:"none"` - } `yaml:"platform"` - PullSecret string `yaml:"pullSecret"` - SSHKey string `yaml:"sshKey"` -} - type bmc struct { Address string `yaml:"address"` Username string `yaml:"username"` @@ -101,49 +69,17 @@ type InstallerConfigBaremetal struct { SSHKey string `yaml:"sshKey"` } -func countHostsByRole(cluster *models.Cluster, role string) int { +func countHostsByRole(cluster *common.Cluster, role models.HostRole) int { var count int for _, host := range cluster.Hosts { - if host.Role == role { + if swag.StringValue(host.Status) != models.HostStatusDisabled && host.Role == role { count += 1 } } return count } -func getMachineCIDR(cluster *models.Cluster) (string, error) { - parsedVipAddr := net.ParseIP(string(cluster.APIVip)) - if parsedVipAddr == nil { - errStr := fmt.Sprintf("Could not parse VIP ip %s", cluster.APIVip) - logrus.Warn(errStr) - return "", errors.New(errStr) - } - for _, h := range cluster.Hosts { - var inventory models.Inventory - err := json.Unmarshal([]byte(h.Inventory), &inventory) - if err != nil { - logrus.WithError(err).Warnf("Error unmarshalling host inventory %s", h.Inventory) - continue - } - for _, intf := range inventory.Interfaces { - for _, ipv4addr := range intf.IPV4Addresses { - _, ipnet, err := net.ParseCIDR(ipv4addr) - if err != nil { - logrus.WithError(err).Warnf("Could not parse cidr %s", ipv4addr) - continue - } - if ipnet.Contains(parsedVipAddr) { - return ipnet.String(), nil - } - } - } - } - errStr := fmt.Sprintf("No suitable matching CIDR found for VIP %s", cluster.APIVip) - logrus.Warn(errStr) - return "", errors.New(errStr) -} - -func getBasicInstallConfig(cluster *models.Cluster, machineCIDR string) *InstallerConfigBaremetal { +func getBasicInstallConfig(cluster *common.Cluster) *InstallerConfigBaremetal { return &InstallerConfigBaremetal{ APIVersion: "v1", BaseDomain: cluster.BaseDNSDomain, @@ -168,7 +104,7 @@ func getBasicInstallConfig(cluster *models.Cluster, machineCIDR string) *Install MachineNetwork: []struct { Cidr string `yaml:"cidr"` }{ - {Cidr: machineCIDR}, + {Cidr: cluster.MachineNetworkCidr}, }, ServiceNetwork: []string{cluster.ServiceNetworkCidr}, }, @@ -181,14 +117,17 @@ func getBasicInstallConfig(cluster *models.Cluster, machineCIDR string) *Install Name string `yaml:"name"` Replicas int `yaml:"replicas"` }{ - {Name: "worker", Replicas: countHostsByRole(cluster, "worker")}, + { + Name: string(models.HostRoleWorker), + Replicas: countHostsByRole(cluster, models.HostRoleWorker), + }, }, ControlPlane: struct { Name string `yaml:"name"` Replicas int `yaml:"replicas"` }{ - Name: "master", - Replicas: countHostsByRole(cluster, "master"), + Name: string(models.HostRoleMaster), + Replicas: countHostsByRole(cluster, models.HostRoleMaster), }, PullSecret: cluster.PullSecret, SSHKey: cluster.SSHPublicKey, @@ -196,20 +135,20 @@ func getBasicInstallConfig(cluster *models.Cluster, machineCIDR string) *Install } // [TODO] - remove once we decide to use specific values from the hosts of the cluster -func getDummyMAC(dummyMAC string, count int) (string, error) { +func getDummyMAC(log logrus.FieldLogger, dummyMAC string, count int) (string, error) { hwMac, err := net.ParseMAC(dummyMAC) if err != nil { - logrus.Warn("Failed to parse dummyMac") + log.Warn("Failed to parse dummyMac") return "", err } hwMac[len(hwMac)-1] = hwMac[len(hwMac)-1] + byte(count) return hwMac.String(), nil } -func setPlatformInstallconfig(cluster *models.Cluster, cfg *InstallerConfigBaremetal) error { +func setBMPlatformInstallconfig(log logrus.FieldLogger, cluster *common.Cluster, cfg *InstallerConfigBaremetal) error { // set hosts - numMasters := countHostsByRole(cluster, "master") - numWorkers := countHostsByRole(cluster, "worker") + numMasters := countHostsByRole(cluster, models.HostRoleMaster) + numWorkers := countHostsByRole(cluster, models.HostRoleWorker) masterCount := 0 workerCount := 0 hosts := make([]host, numWorkers+numMasters) @@ -219,14 +158,14 @@ func setPlatformInstallconfig(cluster *models.Cluster, cfg *InstallerConfigBarem dummyPort := 6230 for i := range hosts { - logrus.Infof("Setting master, host %d, master count %d", i, masterCount) + log.Infof("Setting master, host %d, master count %d", i, masterCount) if i >= numMasters { hosts[i].Name = fmt.Sprintf("openshift-worker-%d", workerCount) - hosts[i].Role = "worker" + hosts[i].Role = string(models.HostRoleWorker) workerCount += 1 } else { hosts[i].Name = fmt.Sprintf("openshift-master-%d", masterCount) - hosts[i].Role = "master" + hosts[i].Role = string(models.HostRoleMaster) masterCount += 1 } hosts[i].Bmc = bmc{ @@ -234,9 +173,9 @@ func setPlatformInstallconfig(cluster *models.Cluster, cfg *InstallerConfigBarem Username: "admin", Password: "rackattack", } - hwMac, err := getDummyMAC(dummyMAC, i) + hwMac, err := getDummyMAC(log, dummyMAC, i) if err != nil { - logrus.Warn("Failed to parse dummyMac") + log.Warn("Failed to parse dummyMac") return err } hosts[i].BootMACAddress = hwMac @@ -245,74 +184,21 @@ func setPlatformInstallconfig(cluster *models.Cluster, cfg *InstallerConfigBarem } cfg.Platform = platform{ Baremetal: baremetal{ - ProvisioningNetworkInterface: "ethh0", - APIVIP: cluster.APIVip.String(), - IngressVIP: cluster.IngressVip.String(), - DNSVIP: cluster.APIVip.String(), + ProvisioningNetworkInterface: "ens4", + APIVIP: cluster.APIVip, + IngressVIP: cluster.IngressVip, + DNSVIP: cluster.APIVip, Hosts: hosts, }, } return nil } -func GetInstallConfig(cluster *models.Cluster) ([]byte, error) { - machineCidr, err := getMachineCIDR(cluster) +func GetInstallConfig(log logrus.FieldLogger, cluster *common.Cluster) ([]byte, error) { + cfg := getBasicInstallConfig(cluster) + err := setBMPlatformInstallconfig(log, cluster, cfg) if err != nil { return nil, err } - if cluster.OpenshiftVersion != models.ClusterOpenshiftVersionNr44 { - cfg := getBasicInstallConfig(cluster, machineCidr) - err = setPlatformInstallconfig(cluster, cfg) - if err != nil { - return nil, err - } - return yaml.Marshal(*cfg) - } else { - cfg := InstallerConfigNone{ - APIVersion: "v1", - BaseDomain: cluster.BaseDNSDomain, - Compute: []struct { - Hyperthreading string `yaml:"hyperthreading"` - Name string `yaml:"name"` - Replicas int `yaml:"replicas"` - }{ - {Hyperthreading: "Enabled", Name: "worker", Replicas: countHostsByRole(cluster, "worker")}, - }, - ControlPlane: struct { - Hyperthreading string `yaml:"hyperthreading"` - Name string `yaml:"name"` - Replicas int `yaml:"replicas"` - }{ - Hyperthreading: "Enabled", - Name: "master", - Replicas: countHostsByRole(cluster, "master"), - }, - Metadata: struct { - Name string `yaml:"name"` - }{Name: cluster.Name}, - Networking: struct { - ClusterNetwork []struct { - Cidr string `yaml:"cidr"` - HostPrefix int `yaml:"hostPrefix"` - } `yaml:"clusterNetwork"` - NetworkType string `yaml:"networkType"` - ServiceNetwork []string `yaml:"serviceNetwork"` - }{ - ClusterNetwork: []struct { - Cidr string `yaml:"cidr"` - HostPrefix int `yaml:"hostPrefix"` - }{ - {Cidr: cluster.ClusterNetworkCidr, HostPrefix: int(cluster.ClusterNetworkHostPrefix)}, - }, - NetworkType: "OpenShiftSDN", - ServiceNetwork: []string{cluster.ServiceNetworkCidr}, - }, - Platform: struct { - None struct{} `yaml:"none"` - }{}, - PullSecret: cluster.PullSecret, - SSHKey: cluster.SSHPublicKey, - } - return yaml.Marshal(cfg) - } + return yaml.Marshal(*cfg) } diff --git a/internal/installcfg/installcfg_test.go b/internal/installcfg/installcfg_test.go index 323728791..0c0fafa77 100644 --- a/internal/installcfg/installcfg_test.go +++ b/internal/installcfg/installcfg_test.go @@ -1,82 +1,91 @@ package installcfg import ( - "encoding/json" "testing" + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" + + "github.com/filanov/bm-inventory/internal/common" "github.com/filanov/bm-inventory/models" "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/golang/mock/gomock" + "github.com/google/uuid" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var _ = Describe("inventory", func() { - - createInterface := func(ipv4Addresses ...string) *models.Interface { - return &models.Interface{ - IPV4Addresses: append([]string{}, ipv4Addresses...), +var _ = Describe("installcfg", func() { + var ( + host1 models.Host + host2 models.Host + host3 models.Host + cluster common.Cluster + ctrl *gomock.Controller + ) + BeforeEach(func() { + clusterId := strfmt.UUID(uuid.New().String()) + cluster = common.Cluster{Cluster: models.Cluster{ + ID: &clusterId, + OpenshiftVersion: "4.5", + BaseDNSDomain: "redhat.com", + APIVip: "102.345.34.34", + IngressVip: "376.5.56.6", + }} + id := strfmt.UUID(uuid.New().String()) + host1 = models.Host{ + ID: &id, + ClusterID: clusterId, + Status: swag.String(models.HostStatusKnown), + Role: "master", } - } - - createInventory := func(interfaces ...*models.Interface) string { - inventory := models.Inventory{Interfaces: interfaces} - ret, _ := json.Marshal(&inventory) - return string(ret) - } - - createHosts := func(inventories ...string) []*models.Host { - ret := make([]*models.Host, 0) - for _, i := range inventories { - ret = append(ret, &models.Host{Inventory: i}) + id = strfmt.UUID(uuid.New().String()) + host2 = models.Host{ + ID: &id, + ClusterID: clusterId, + Status: swag.String(models.HostStatusKnown), + Role: "worker", } - return ret - } - createCluster := func(apiVip string, inventories ...string) *models.Cluster { - return &models.Cluster{ - APIVip: strfmt.IPv4(apiVip), - Hosts: createHosts(inventories...), + host3 = models.Host{ + ID: &id, + ClusterID: clusterId, + Status: swag.String(models.HostStatusKnown), + Role: "worker", } - } - It("happpy flow", func() { - cluster := createCluster("1.2.5.6", - createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), - createInventory(createInterface("127.0.0.1/17"))) - cidr, err := getMachineCIDR(cluster) - Expect(err).To(Not(HaveOccurred())) - Expect(cidr).To(Equal("1.2.4.0/23")) + cluster.Hosts = []*models.Host{&host1, &host2, &host3} + ctrl = gomock.NewController(GinkgoT()) + }) - It("Illegal VIP", func() { - cluster := createCluster("1.2.5.257", - createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), - createInventory(createInterface("127.0.0.1/17"))) - cidr, err := getMachineCIDR(cluster) - Expect(err).To(HaveOccurred()) - Expect(cidr).To(Equal("")) + It("create_configuration_with_all_hosts", func() { + var result InstallerConfigBaremetal + data, err := GetInstallConfig(logrus.New(), &cluster) + Expect(err).ShouldNot(HaveOccurred()) + err = yaml.Unmarshal(data, &result) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(result.Platform.Baremetal.Hosts)).Should(Equal(3)) }) - It("No Match", func() { - cluster := createCluster("1.2.5.200", - createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.6.7/23")), - createInventory(createInterface("127.0.0.1/17"))) - cidr, err := getMachineCIDR(cluster) - Expect(err).To(HaveOccurred()) - Expect(cidr).To(Equal("")) + It("create_configuration_with_one_host_disabled", func() { + var result InstallerConfigBaremetal + host3.Status = swag.String(models.HostStatusDisabled) + data, err := GetInstallConfig(logrus.New(), &cluster) + Expect(err).ShouldNot(HaveOccurred()) + err = yaml.Unmarshal(data, &result) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(result.Platform.Baremetal.Hosts)).Should(Equal(2)) }) - It("Bad inventory", func() { - cluster := createCluster("1.2.5.6", - "Bad inventory", - createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), - createInventory(createInterface("127.0.0.1/17"))) - cidr, err := getMachineCIDR(cluster) - Expect(err).To(Not(HaveOccurred())) - Expect(cidr).To(Equal("1.2.4.0/23")) + + AfterEach(func() { + // cleanup + ctrl.Finish() }) }) func TestSubsystem(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "host state machine tests") + RunSpecs(t, "installcfg tests") } diff --git a/internal/metrics/matchedRouteContext/matchedRouteContext.go b/internal/metrics/matchedRouteContext/matchedRouteContext.go new file mode 100644 index 000000000..296ba0685 --- /dev/null +++ b/internal/metrics/matchedRouteContext/matchedRouteContext.go @@ -0,0 +1,35 @@ +package matchedRouteContext + +import ( + "context" + + rmiddleware "github.com/go-openapi/runtime/middleware" +) + +type ctxKey int8 + +const ( + _ ctxKey = iota + ctxMatchedRoute + ctxMethod +) + +func FromContext(ctx context.Context) (matchedRoute *rmiddleware.MatchedRoute, method string) { + matchedRoute = nil + method = "" + m := ctx.Value(ctxMatchedRoute) + if m != nil { + mm := m.(rmiddleware.MatchedRoute) + matchedRoute = &mm + } + m = ctx.Value(ctxMethod) + if m != nil { + method = m.(string) + } + return matchedRoute, method +} + +func ToContext(ctx context.Context, matchedRoute *rmiddleware.MatchedRoute, method string) context.Context { + c := context.WithValue(ctx, ctxMatchedRoute, *matchedRoute) + return context.WithValue(c, ctxMethod, method) +} diff --git a/internal/metrics/metricsManager.go b/internal/metrics/metricsManager.go new file mode 100644 index 000000000..f43d74635 --- /dev/null +++ b/internal/metrics/metricsManager.go @@ -0,0 +1,250 @@ +package metrics + +import ( + "encoding/json" + "time" + + "github.com/go-openapi/strfmt" + + "github.com/alecthomas/units" + + "github.com/filanov/bm-inventory/models" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" +) + +//go:generate mockgen -source=metricsManager.go -package=metrics -destination=mock_netricsManager_api.go + +////////////////////////////////////////// +// counters name and description +///////////////////////////////////////// +const ( + counterClusterCreation = "assisted_installer_cluster_creations" + counterClusterInstallationStarted = "assisted_installer_cluster_installation_started" + counterClusterInstallationSeconds = "assisted_installer_cluster_installation_seconds" + counterHostInstallationPhaseSeconds = "assisted_installer_host_installation_phase_seconds" + counterClusterHosts = "assisted_installer_cluster_hosts" + counterClusterHostCores = "assisted_installer_cluster_host_cores" + counterClusterHostRAMGb = "assisted_installer_cluster_host_ram_gb" + counterClusterHostDiskGb = "assisted_installer_cluster_host_disk_gb" + counterClusterHostNicGb = "assisted_installer_cluster_host_nic_gb" +) + +const ( + counterDescriptionClusterCreation = "Number of cluster resources created, by version" + counterDescriptionClusterInstallationStarted = "Number of clusters that entered installing state, by version" + counterDescriptionClusterInstallationSeconds = "Histogram/sum/count of installation time for completed clusters, by result and OCP version" + counterDescriptionHostInstallationPhaseSeconds = "Histogram/sum/count of time for each phase, by phase, final install result, and OCP version" + counterDescriptionClusterHosts = "Number of hosts for completed clusters, by role, result, and OCP version" + counterDescriptionClusterHostCores = "Histogram/sum/count of CPU cores in hosts of completed clusters, by role, result, and OCP version" + counterDescriptionClusterHostRAMGb = "Histogram/sum/count of physical RAM in hosts of completed clusters, by role, result, and OCP version" + counterDescriptionClusterHostDiskGb = "Histogram/sum/count of installation disk capacity in hosts of completed clusters, by type, raid (level), role, result, and OCP version" + counterDescriptionClusterHostNicGb = "Histogram/sum/count of management network NIC speed in hosts of completed clusters, by role, result, and OCP version" +) + +const ( + namespace = "" + subsystem = "service" + openshiftVersionLabel = "openshiftVersion" + resultLabel = "result" + phaseLabel = "phase" + roleLabel = "role" + diskTypeLabel = "diskType" +) + +type API interface { + ClusterRegistered(clusterVersion string) + InstallationStarted(clusterVersion string) + ClusterInstallationFinished(log logrus.FieldLogger, result, clusterVersion string, installationStratedTime strfmt.DateTime) + ReportHostInstallationMetrics(log logrus.FieldLogger, clusterVersion string, h *models.Host, previousProgress *models.HostProgressInfo, currentStage models.HostStage) +} + +type MetricsManager struct { + registry prometheus.Registerer + + serviceLogicClusterCreation *prometheus.CounterVec + serviceLogicClusterInstallationStarted *prometheus.CounterVec + serviceLogicClusterInstallationSeconds *prometheus.HistogramVec + serviceLogicHostInstallationPhaseSeconds *prometheus.HistogramVec + serviceLogicClusterHosts *prometheus.CounterVec + serviceLogicClusterHostCores *prometheus.HistogramVec + serviceLogicClusterHostRAMGb *prometheus.HistogramVec + serviceLogicClusterHostDiskGb *prometheus.HistogramVec + serviceLogicClusterHostNicGb *prometheus.HistogramVec +} + +func NewMetricsManager(registry prometheus.Registerer) *MetricsManager { + + m := &MetricsManager{ + registry: registry, + + serviceLogicClusterCreation: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: counterClusterCreation, + Help: counterDescriptionClusterCreation, + }, []string{openshiftVersionLabel}), + + serviceLogicClusterInstallationStarted: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: counterClusterInstallationStarted, + Help: counterDescriptionClusterInstallationStarted, + }, []string{openshiftVersionLabel}), + + serviceLogicClusterInstallationSeconds: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: counterClusterInstallationSeconds, + Help: counterDescriptionClusterInstallationSeconds, + Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 20, 30, 40, 50, 60, 90, 120, 150, 180, 210, 240, 270, 300, 360, 420, 480, 540, + 600, 900, 1200, 1500, 1800, 2100, 2400, 2700, 3000, 3300, 3600}, + }, []string{resultLabel, openshiftVersionLabel}), + + serviceLogicHostInstallationPhaseSeconds: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: counterHostInstallationPhaseSeconds, + Help: counterDescriptionHostInstallationPhaseSeconds, + Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 20, 30, 40, 50, 60, 90, 120, 150, 180, 210, 240, 270, 300, 360, 420, 480, 540, + 600, 900, 1200, 1500, 1800, 2100, 2400, 2700, 3000, 3300, 3600}, + }, []string{phaseLabel, resultLabel, openshiftVersionLabel}), + + serviceLogicClusterHosts: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: counterClusterHosts, + Help: counterDescriptionClusterHosts, + }, []string{roleLabel, resultLabel, openshiftVersionLabel}), + + serviceLogicClusterHostCores: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: counterClusterHostCores, + Help: counterDescriptionClusterHostCores, + Buckets: []float64{1, 2, 4, 8, 16, 32, 64, 128, 256, 512}, + }, []string{roleLabel, resultLabel, openshiftVersionLabel}), + + serviceLogicClusterHostRAMGb: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: counterClusterHostRAMGb, + Help: counterDescriptionClusterHostRAMGb, + Buckets: []float64{8, 16, 32, 64, 128, 256, 512, 1024, 2048}, + }, []string{roleLabel, resultLabel, openshiftVersionLabel}), + + serviceLogicClusterHostDiskGb: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: counterClusterHostDiskGb, + Help: counterDescriptionClusterHostDiskGb, + Buckets: []float64{250, 500, 1000, 2000, 4000, 8000, 16000}, + }, []string{diskTypeLabel, roleLabel, resultLabel, openshiftVersionLabel}), + + serviceLogicClusterHostNicGb: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: counterClusterHostNicGb, + Help: counterDescriptionClusterHostNicGb, + Buckets: []float64{1, 10, 20, 40, 100}, + }, []string{roleLabel, resultLabel, openshiftVersionLabel}), + } + + registry.MustRegister( + m.serviceLogicClusterCreation, + m.serviceLogicClusterInstallationStarted, + m.serviceLogicClusterInstallationSeconds, + m.serviceLogicHostInstallationPhaseSeconds, + m.serviceLogicClusterHosts, + m.serviceLogicClusterHostCores, + m.serviceLogicClusterHostRAMGb, + m.serviceLogicClusterHostDiskGb, + m.serviceLogicClusterHostNicGb, + ) + return m +} + +func (m *MetricsManager) ClusterRegistered(clusterVersion string) { + m.serviceLogicClusterCreation.WithLabelValues(clusterVersion).Inc() +} +func (m *MetricsManager) InstallationStarted(clusterVersion string) { + m.serviceLogicClusterInstallationStarted.WithLabelValues(clusterVersion).Inc() +} + +func (m *MetricsManager) ClusterInstallationFinished(log logrus.FieldLogger, result, clusterVersion string, installationStratedTime strfmt.DateTime) { + duration := time.Since(time.Time(installationStratedTime)).Seconds() + log.Infof("Cluster Installation Finished result %s clusterVersion %s duration %f", result, clusterVersion, duration) + m.serviceLogicClusterInstallationSeconds.WithLabelValues(result, clusterVersion).Observe(duration) +} + +func (m *MetricsManager) ReportHostInstallationMetrics(log logrus.FieldLogger, clusterVersion string, h *models.Host, + previousProgress *models.HostProgressInfo, currentStage models.HostStage) { + + if previousProgress != nil && previousProgress.CurrentStage != currentStage { + + roleStr := string(h.Role) + if h.Bootstrap { + roleStr = "bootstrap" + } + installationStageStr := string(currentStage) + switch currentStage { + case models.HostStageDone, models.HostStageFailed: + m.handleHostInstallationComplete(log, clusterVersion, roleStr, installationStageStr, h) + } + //report the installation phase duration + if previousProgress.CurrentStage != "" { + duration := time.Since(time.Time(previousProgress.StageStartedAt)).Seconds() + phaseResult := models.HostStageDone + if currentStage == models.HostStageFailed { + phaseResult = models.HostStageFailed + } + log.Infof("service Logic Host Installation Phase Seconds phase %s, result %s, duration %f", + string(previousProgress.CurrentStage), string(phaseResult), duration) + m.serviceLogicHostInstallationPhaseSeconds.WithLabelValues(string(previousProgress.CurrentStage), + string(phaseResult), clusterVersion).Observe(duration) + } + } +} + +func (m *MetricsManager) handleHostInstallationComplete(log logrus.FieldLogger, clusterVersion string, roleStr string, installationStageStr string, h *models.Host) { + log.Infof("service Logic Cluster Hosts clusterVersion %s, roleStr %s, result %s", + clusterVersion, roleStr, installationStageStr) + m.serviceLogicClusterHosts.WithLabelValues(roleStr, installationStageStr, clusterVersion).Inc() + var hwInfo models.Inventory + + err := json.Unmarshal([]byte(h.Inventory), &hwInfo) + if err != nil { + log.Errorf("failed to report host hardware installation metrics for %s", h.ID) + } else { + log.Infof("service Logic Cluster Host Cores role %s, result %s cpu %d", + roleStr, installationStageStr, hwInfo.CPU.Count) + m.serviceLogicClusterHostCores.WithLabelValues(roleStr, installationStageStr, + clusterVersion).Observe(float64(hwInfo.CPU.Count)) + log.Infof("service Logic Cluster Host RAMGb role %s, result %s ram %d", + roleStr, installationStageStr, bytesToGib(hwInfo.Memory.PhysicalBytes)) + m.serviceLogicClusterHostRAMGb.WithLabelValues(roleStr, installationStageStr, + clusterVersion).Observe(float64(bytesToGib(hwInfo.Memory.PhysicalBytes))) + for _, disk := range hwInfo.Disks { + //TODO change the code after adding storage controller to disk model + diskTypeStr := disk.DriveType //+ "-" + disk.StorageController + log.Infof("service Logic Cluster Host DiskGb role %s, result %s diskType %s diskSize %d", + roleStr, installationStageStr, diskTypeStr, bytesToGib(disk.SizeBytes)) + //TODO missing raid data + m.serviceLogicClusterHostDiskGb.WithLabelValues(diskTypeStr, roleStr, installationStageStr, + clusterVersion).Observe(float64(bytesToGib(disk.SizeBytes))) + } + for _, inter := range hwInfo.Interfaces { + log.Infof("service Logic Cluster Host NicGb role %s, result %s SpeedMbps %f", + roleStr, installationStageStr, float64(inter.SpeedMbps)) + m.serviceLogicClusterHostNicGb.WithLabelValues(roleStr, installationStageStr, + clusterVersion).Observe(float64(inter.SpeedMbps)) + } + } +} + +func bytesToGib(bytes int64) int64 { + return bytes / int64(units.GiB) +} diff --git a/internal/metrics/middleware.go b/internal/metrics/middleware.go new file mode 100644 index 000000000..60f57b64d --- /dev/null +++ b/internal/metrics/middleware.go @@ -0,0 +1,24 @@ +package metrics + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/sirupsen/logrus" + "github.com/slok/go-http-metrics/middleware" +) + +// To be used as an inner middleware to provide metrics for the endpoints +func WithMatchedRoute(log logrus.FieldLogger, registry prometheus.Registerer) func(http.Handler) http.Handler { + m := middleware.New(middleware.Config{ + Recorder: NewRecorder(Config{ + Log: log, + Registry: registry}), + Service: "assisted-installer", + }) + + return func(next http.Handler) http.Handler { + return Handler(log, m, next) + } +} diff --git a/internal/metrics/mock_netricsManager_api.go b/internal/metrics/mock_netricsManager_api.go new file mode 100644 index 000000000..d331dae52 --- /dev/null +++ b/internal/metrics/mock_netricsManager_api.go @@ -0,0 +1,85 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: metricsManager.go + +// Package metrics is a generated GoMock package. +package metrics + +import ( + reflect "reflect" + + models "github.com/filanov/bm-inventory/models" + strfmt "github.com/go-openapi/strfmt" + gomock "github.com/golang/mock/gomock" + logrus "github.com/sirupsen/logrus" +) + +// MockAPI is a mock of API interface +type MockAPI struct { + ctrl *gomock.Controller + recorder *MockAPIMockRecorder +} + +// MockAPIMockRecorder is the mock recorder for MockAPI +type MockAPIMockRecorder struct { + mock *MockAPI +} + +// NewMockAPI creates a new mock instance +func NewMockAPI(ctrl *gomock.Controller) *MockAPI { + mock := &MockAPI{ctrl: ctrl} + mock.recorder = &MockAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockAPI) EXPECT() *MockAPIMockRecorder { + return m.recorder +} + +// ClusterRegistered mocks base method +func (m *MockAPI) ClusterRegistered(clusterVersion string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ClusterRegistered", clusterVersion) +} + +// ClusterRegistered indicates an expected call of ClusterRegistered +func (mr *MockAPIMockRecorder) ClusterRegistered(clusterVersion interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterRegistered", reflect.TypeOf((*MockAPI)(nil).ClusterRegistered), clusterVersion) +} + +// InstallationStarted mocks base method +func (m *MockAPI) InstallationStarted(clusterVersion string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "InstallationStarted", clusterVersion) +} + +// InstallationStarted indicates an expected call of InstallationStarted +func (mr *MockAPIMockRecorder) InstallationStarted(clusterVersion interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallationStarted", reflect.TypeOf((*MockAPI)(nil).InstallationStarted), clusterVersion) +} + +// ClusterInstallationFinished mocks base method +func (m *MockAPI) ClusterInstallationFinished(log logrus.FieldLogger, result, clusterVersion string, installationStratedTime strfmt.DateTime) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ClusterInstallationFinished", log, result, clusterVersion, installationStratedTime) +} + +// ClusterInstallationFinished indicates an expected call of ClusterInstallationFinished +func (mr *MockAPIMockRecorder) ClusterInstallationFinished(log, result, clusterVersion, installationStratedTime interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterInstallationFinished", reflect.TypeOf((*MockAPI)(nil).ClusterInstallationFinished), log, result, clusterVersion, installationStratedTime) +} + +// ReportHostInstallationMetrics mocks base method +func (m *MockAPI) ReportHostInstallationMetrics(log logrus.FieldLogger, clusterVersion string, h *models.Host, previousProgress *models.HostProgressInfo, currentStage models.HostStage) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ReportHostInstallationMetrics", log, clusterVersion, h, previousProgress, currentStage) +} + +// ReportHostInstallationMetrics indicates an expected call of ReportHostInstallationMetrics +func (mr *MockAPIMockRecorder) ReportHostInstallationMetrics(log, clusterVersion, h, previousProgress, currentStage interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportHostInstallationMetrics", reflect.TypeOf((*MockAPI)(nil).ReportHostInstallationMetrics), log, clusterVersion, h, previousProgress, currentStage) +} diff --git a/internal/metrics/recorder.go b/internal/metrics/recorder.go new file mode 100644 index 000000000..c12777967 --- /dev/null +++ b/internal/metrics/recorder.go @@ -0,0 +1,157 @@ +package metrics + +import ( + "context" + "time" + + "github.com/filanov/bm-inventory/internal/metrics/matchedRouteContext" + "github.com/sirupsen/logrus" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/slok/go-http-metrics/metrics" +) + +// Config has the dependencies and values of the recorder. +type Config struct { + Log logrus.FieldLogger + Service string + // Prefix is the prefix that will be set on the metrics, by default it will be empty. + Prefix string + // DurationBuckets are the buckets used by Prometheus for the HTTP request duration metrics, + // by default uses Prometheus default buckets (from 5ms to 10s). + DurationBuckets []float64 + // SizeBuckets are the buckets used by Prometheus for the HTTP response size metrics, + // by default uses a exponential buckets from 100B to 1GB. + SizeBuckets []float64 + // Registry is the registry that will be used by the recorder to store the metrics, + // if the default registry is not used then it will use the default one. + Registry prometheus.Registerer + // HandlerIDLabel is the name that will be set to the handler ID label, by default is `handler`. + HandlerIDLabel string + // StatusCodeLabel is the name that will be set to the status code label, by default is `code`. + StatusCodeLabel string + // MethodLabel is the name that will be set to the method label, by default is `method`. + MethodLabel string + // ServiceLabel is the name that will be set to the service label, by default is `service`. + ServiceLabel string + // IDLabel is the name that will be set to the ID label. ny default is 'id'. + IDLabel string +} + +func (c *Config) defaults() { + if len(c.DurationBuckets) == 0 { + c.DurationBuckets = prometheus.DefBuckets + } + + if len(c.SizeBuckets) == 0 { + c.SizeBuckets = prometheus.ExponentialBuckets(100, 10, 8) + } + + if c.HandlerIDLabel == "" { + c.HandlerIDLabel = "handler" + } + + if c.StatusCodeLabel == "" { + c.StatusCodeLabel = "code" + } + + if c.MethodLabel == "" { + c.MethodLabel = "method" + } + + if c.ServiceLabel == "" { + c.ServiceLabel = "service" + } + if c.IDLabel == "" { + c.IDLabel = "id" + } +} + +type recorder struct { + config Config + httpRequestDurHistogram *prometheus.HistogramVec + httpResponseSizeHistogram *prometheus.HistogramVec + httpRequestsInflight *prometheus.GaugeVec +} + +// NewRecorder returns a new metrics recorder that implements the recorder +// using Prometheus as the backend. +func NewRecorder(cfg Config) metrics.Recorder { + cfg.defaults() + + r := &recorder{ + config: cfg, + httpRequestDurHistogram: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: cfg.Prefix, + Subsystem: "http", + Name: "request_duration_seconds", + Help: "The latency of the HTTP requests.", + Buckets: cfg.DurationBuckets, + }, []string{cfg.ServiceLabel, cfg.HandlerIDLabel, cfg.MethodLabel, cfg.StatusCodeLabel, cfg.IDLabel}), + + httpResponseSizeHistogram: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: cfg.Prefix, + Subsystem: "http", + Name: "response_size_bytes", + Help: "The size of the HTTP responses.", + Buckets: cfg.SizeBuckets, + }, []string{cfg.ServiceLabel, cfg.HandlerIDLabel, cfg.MethodLabel, cfg.StatusCodeLabel, cfg.IDLabel}), + + httpRequestsInflight: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: cfg.Prefix, + Subsystem: "http", + Name: "requests_inflight", + Help: "The number of inflight requests being handled at the same time.", + }, []string{cfg.ServiceLabel, cfg.HandlerIDLabel, cfg.MethodLabel, cfg.IDLabel}), + } + + cfg.Registry.MustRegister( + r.httpRequestDurHistogram, + r.httpResponseSizeHistogram, + r.httpRequestsInflight, + ) + + return r +} + +type metricLabels struct { + clusterID string + path string + method string +} + +func extractRequestLabels(ctx context.Context) metricLabels { + ret := metricLabels{ + clusterID: "", + path: "", + method: "", + } + mr, method := matchedRouteContext.FromContext(ctx) + if mr != nil { + ret.path = mr.PathPattern + + if v, _, ok := mr.Params.GetOK("cluster_id"); ok { + if len(v) == 1 { + ret.clusterID = v[0] + } + } + } + ret.method = method + return ret +} + +func (r recorder) ObserveHTTPRequestDuration(ctx context.Context, p metrics.HTTPReqProperties, duration time.Duration) { + labels := extractRequestLabels(ctx) + r.httpRequestDurHistogram.WithLabelValues(p.Service, labels.path, labels.method, p.Code, labels.clusterID).Observe(duration.Seconds()) +} + +func (r recorder) ObserveHTTPResponseSize(ctx context.Context, p metrics.HTTPReqProperties, sizeBytes int64) { + labels := extractRequestLabels(ctx) + r.httpResponseSizeHistogram.WithLabelValues(p.Service, labels.path, labels.method, p.Code, labels.clusterID).Observe(float64(sizeBytes)) +} + +func (r recorder) AddInflightRequests(ctx context.Context, p metrics.HTTPProperties, quantity int) { + labels := extractRequestLabels(ctx) + r.httpRequestsInflight.WithLabelValues(p.Service, labels.path, labels.method, labels.clusterID).Add(float64(quantity)) +} diff --git a/internal/metrics/reporter.go b/internal/metrics/reporter.go new file mode 100644 index 000000000..dda3019c7 --- /dev/null +++ b/internal/metrics/reporter.go @@ -0,0 +1,100 @@ +package metrics + +import ( + "bufio" + "context" + "net" + "net/http" + + "github.com/filanov/bm-inventory/internal/metrics/matchedRouteContext" + rmiddleware "github.com/go-openapi/runtime/middleware" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + goMiddleware "github.com/slok/go-http-metrics/middleware" +) + +// Handler returns an measuring standard http.Handler. it should be added as an innerMiddleware because +// it relies on the MatchedRoute to provide more information about the route +func Handler(log logrus.FieldLogger, m goMiddleware.Middleware, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + log.Debugf("Request: %v", *r) + wi := &responseWriterInterceptor{ + statusCode: http.StatusOK, + ResponseWriter: w, + } + reporter := &myReporter{ + w: wi, + method: r.Method, + urlPath: r.URL.Path, + ctx: r.Context(), + } + + mr := rmiddleware.MatchedRouteFrom(r) + if mr != nil { + reporter.ctx = matchedRouteContext.ToContext(reporter.ctx, mr, r.Method) + } + m.Measure("", reporter, func() { + h.ServeHTTP(wi, r) + }) + }) +} + +type myReporter struct { + ctx context.Context + method string + urlPath string + w *responseWriterInterceptor +} + +func (s *myReporter) Method() string { return s.method } + +func (s *myReporter) Context() context.Context { return s.ctx } + +func (s *myReporter) URLPath() string { return s.urlPath } + +func (s *myReporter) StatusCode() int { return s.w.statusCode } + +func (s *myReporter) BytesWritten() int64 { return int64(s.w.bytesWritten) } + +// responseWriterInterceptor is a simple wrapper to intercept set data on a +// ResponseWriter. +type responseWriterInterceptor struct { + http.ResponseWriter + statusCode int + bytesWritten int +} + +func (w *responseWriterInterceptor) WriteHeader(statusCode int) { + w.statusCode = statusCode + w.ResponseWriter.WriteHeader(statusCode) +} + +func (w *responseWriterInterceptor) Write(p []byte) (int, error) { + w.bytesWritten += len(p) + return w.ResponseWriter.Write(p) +} + +func (w *responseWriterInterceptor) Hijack() (net.Conn, *bufio.ReadWriter, error) { + h, ok := w.ResponseWriter.(http.Hijacker) + if !ok { + return nil, nil, errors.New("type assertion failed http.ResponseWriter not a http.Hijacker") + } + return h.Hijack() +} + +func (w *responseWriterInterceptor) Flush() { + f, ok := w.ResponseWriter.(http.Flusher) + if !ok { + return + } + + f.Flush() +} + +// Check interface implementations. +var ( + _ http.ResponseWriter = &responseWriterInterceptor{} + _ http.Hijacker = &responseWriterInterceptor{} + _ http.Flusher = &responseWriterInterceptor{} + _ goMiddleware.Reporter = &myReporter{} +) diff --git a/internal/network/machine_network_cidr.go b/internal/network/machine_network_cidr.go new file mode 100644 index 000000000..f751704e9 --- /dev/null +++ b/internal/network/machine_network_cidr.go @@ -0,0 +1,233 @@ +package network + +import ( + "encoding/json" + "fmt" + "net" + "strings" + + "github.com/go-openapi/swag" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + + "github.com/filanov/bm-inventory/internal/common" + + "github.com/filanov/bm-inventory/models" + "github.com/sirupsen/logrus" +) + +/* + * Calculate the machine network CIDR from the one of (ApiVip, IngressVip) and the ip addresses of the hosts. + * The ip addresses of the host appear with CIDR notation. Therefore, the network can be calculated from it. + * The goal of this function is to find the first network that one of the vips belongs to it. + * This network is returned as a result. + */ +func CalculateMachineNetworkCIDR(apiVip string, ingressVip string, hosts []*models.Host) (string, error) { + var ip string + if apiVip != "" { + ip = apiVip + } else if ingressVip != "" { + ip = ingressVip + } else { + return "", nil + } + parsedVipAddr := net.ParseIP(ip) + if parsedVipAddr == nil { + return "", fmt.Errorf("Could not parse VIP ip %s", ip) + } + for _, h := range hosts { + if swag.StringValue(h.Status) == models.HostStatusDisabled { + continue + } + var inventory models.Inventory + err := json.Unmarshal([]byte(h.Inventory), &inventory) + if err != nil { + continue + } + for _, intf := range inventory.Interfaces { + for _, ipv4addr := range intf.IPV4Addresses { + _, ipnet, err := net.ParseCIDR(ipv4addr) + if err != nil { + continue + } + if ipnet.Contains(parsedVipAddr) { + return ipnet.String(), nil + } + } + } + } + return "", fmt.Errorf("No suitable matching CIDR found for VIP %s", ip) +} + +func ipInCidr(ipStr, cidrStr string) bool { + ip := net.ParseIP(ipStr) + if ip == nil { + return false + } + _, ipnet, err := net.ParseCIDR(cidrStr) + if err != nil { + return false + } + return ipnet.Contains(ip) +} + +func verifyVip(hosts []*models.Host, machineNetworkCidr string, vip string, vipName string, mustExist bool, log logrus.FieldLogger) error { + if !mustExist && vip == "" { + return nil + } + if !ipInCidr(vip, machineNetworkCidr) { + return fmt.Errorf("%s <%s> does not belong to machine-network-cidr <%s>", vipName, vip, machineNetworkCidr) + } + if !IpInFreeList(hosts, vip, machineNetworkCidr, log) { + return fmt.Errorf("%s <%s> is already in use in cidr %s", vipName, vip, machineNetworkCidr) + } + return nil +} + +func verifyDifferentVipAddresses(apiVip string, ingressVip string) error { + if apiVip == ingressVip && apiVip != "" { + return fmt.Errorf("api-vip and ingress-vip cannot have the same value: %s", apiVip) + } + return nil +} + +func VerifyVips(hosts []*models.Host, machineNetworkCidr string, apiVip string, ingressVip string, mustExist bool, log logrus.FieldLogger) error { + err := verifyVip(hosts, machineNetworkCidr, apiVip, "api-vip", mustExist, log) + if err == nil { + err = verifyVip(hosts, machineNetworkCidr, ingressVip, "ingress-vip", mustExist, log) + } + if err == nil { + err = verifyDifferentVipAddresses(apiVip, ingressVip) + } + return err +} + +func belongsToNetwork(log logrus.FieldLogger, h *models.Host, machineIpnet *net.IPNet) bool { + var inventory models.Inventory + err := json.Unmarshal([]byte(h.Inventory), &inventory) + if err != nil { + log.WithError(err).Warnf("Error unmarshalling host %s inventory %s", h.ID, h.Inventory) + return false + } + for _, intf := range inventory.Interfaces { + for _, ipv4addr := range intf.IPV4Addresses { + ip, _, err := net.ParseCIDR(ipv4addr) + if err != nil { + log.WithError(err).Warnf("Could not parse cidr %s", ipv4addr) + continue + } + if machineIpnet.Contains(ip) { + return true + } + } + } + return false +} + +func GetMachineCIDRHosts(log logrus.FieldLogger, cluster *common.Cluster) ([]*models.Host, error) { + if cluster.MachineNetworkCidr == "" { + return nil, errors.New("Machine network CIDR was not set in cluster") + } + _, machineIpnet, err := net.ParseCIDR(cluster.MachineNetworkCidr) + if err != nil { + return nil, err + } + ret := make([]*models.Host, 0) + for _, h := range cluster.Hosts { + if belongsToNetwork(log, h, machineIpnet) { + ret = append(ret, h) + } + } + return ret, nil +} + +func IsHostInMachineNetCidr(log logrus.FieldLogger, cluster *common.Cluster, host *models.Host) bool { + _, machineIpnet, err := net.ParseCIDR(cluster.MachineNetworkCidr) + if err != nil { + return false + } + return belongsToNetwork(log, host, machineIpnet) +} + +type IPSet map[strfmt.IPv4]struct{} + +func (s IPSet) Add(str strfmt.IPv4) { + s[str] = struct{}{} +} + +func (s IPSet) Intersect(other IPSet) IPSet { + ret := make(IPSet) + for k := range s { + if v, ok := other[k]; ok { + ret[k] = v + } + } + return ret +} + +func freeAddressesUnmarshal(network, freeAddressesStr string, prefix *string) (IPSet, error) { + var unmarshaled models.FreeNetworksAddresses + err := json.Unmarshal([]byte(freeAddressesStr), &unmarshaled) + if err != nil { + return nil, err + } + for _, f := range unmarshaled { + if f.Network == network { + ret := make(IPSet) + for _, a := range f.FreeAddresses { + if prefix == nil || strings.HasPrefix(a.String(), *prefix) { + ret.Add(a) + } + } + return ret, nil + } + } + return nil, errors.Errorf("No network %s found", network) +} + +func MakeFreeAddressesSet(hosts []*models.Host, network string, prefix *string, log logrus.FieldLogger) IPSet { + var ( + availableFreeAddresses []string + sets = make([]IPSet, 0) + resultingSet = make(IPSet) + ) + for _, h := range hosts { + if swag.StringValue(h.Status) != models.HostStatusDisabled && h.FreeAddresses != "" { + availableFreeAddresses = append(availableFreeAddresses, h.FreeAddresses) + } + } + if len(availableFreeAddresses) == 0 { + return resultingSet + } + // Create IP sets from each of the hosts free-addresses + for _, a := range availableFreeAddresses { + s, err := freeAddressesUnmarshal(network, a, prefix) + if err != nil { + log.WithError(err).Warnf("Unmarshal free addresses for network %s", network) + continue + } + // TODO: Have to decide if we want to filter empty sets + sets = append(sets, s) + } + if len(sets) == 0 { + return resultingSet + } + + // Perform set intersection between all valid sets + resultingSet = sets[0] + for _, s := range sets[1:] { + resultingSet = resultingSet.Intersect(s) + } + return resultingSet +} + +// This is best effort validation. Therefore, validation will be done only if there are IPs in free list +func IpInFreeList(hosts []*models.Host, vipIPStr, network string, log logrus.FieldLogger) bool { + isFree := true + freeSet := MakeFreeAddressesSet(hosts, network, nil, log) + if len(freeSet) > 0 { + _, isFree = freeSet[strfmt.IPv4(vipIPStr)] + } + return isFree +} diff --git a/internal/network/machine_network_cidr_test.go b/internal/network/machine_network_cidr_test.go new file mode 100644 index 000000000..472b078f6 --- /dev/null +++ b/internal/network/machine_network_cidr_test.go @@ -0,0 +1,235 @@ +package network + +import ( + "encoding/json" + "testing" + + "github.com/go-openapi/swag" + + "github.com/filanov/bm-inventory/internal/common" + + "github.com/sirupsen/logrus" + + "github.com/filanov/bm-inventory/models" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("inventory", func() { + + createInterface := func(ipv4Addresses ...string) *models.Interface { + return &models.Interface{ + IPV4Addresses: append([]string{}, ipv4Addresses...), + } + } + + createInventory := func(interfaces ...*models.Interface) string { + inventory := models.Inventory{Interfaces: interfaces} + ret, _ := json.Marshal(&inventory) + return string(ret) + } + + createHosts := func(inventories ...string) []*models.Host { + ret := make([]*models.Host, 0) + for _, i := range inventories { + ret = append(ret, &models.Host{Inventory: i}) + } + return ret + } + + createDisabledHosts := func(inventories ...string) []*models.Host { + ret := make([]*models.Host, 0) + for _, i := range inventories { + ret = append(ret, &models.Host{Inventory: i, + Status: swag.String(models.HostStatusDisabled)}) + } + return ret + } + + createCluster := func(apiVip string, machineCidr string, inventories ...string) *common.Cluster { + return &common.Cluster{Cluster: models.Cluster{ + APIVip: apiVip, + MachineNetworkCidr: machineCidr, + Hosts: createHosts(inventories...), + }} + } + createDisabledCluster := func(apiVip string, machineCidr string, inventories ...string) *common.Cluster { + return &common.Cluster{Cluster: models.Cluster{ + APIVip: apiVip, + MachineNetworkCidr: machineCidr, + Hosts: createDisabledHosts(inventories...), + }} + } + Context("CalculateMachineNetworkCIDR", func() { + It("happpy flow", func() { + cluster := createCluster("1.2.5.6", "", + createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), + createInventory(createInterface("127.0.0.1/17"))) + cidr, err := CalculateMachineNetworkCIDR(cluster.APIVip, cluster.IngressVip, cluster.Hosts) + Expect(err).To(Not(HaveOccurred())) + Expect(cidr).To(Equal("1.2.4.0/23")) + }) + + It("Disabled", func() { + cluster := createDisabledCluster("1.2.5.6", "", + createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), + createInventory(createInterface("127.0.0.1/17"))) + _, err := CalculateMachineNetworkCIDR(cluster.APIVip, cluster.IngressVip, cluster.Hosts) + Expect(err).To(HaveOccurred()) + }) + + It("Illegal VIP", func() { + cluster := createCluster("1.2.5.257", "", + createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), + createInventory(createInterface("127.0.0.1/17"))) + cidr, err := CalculateMachineNetworkCIDR(cluster.APIVip, cluster.IngressVip, cluster.Hosts) + Expect(err).To(HaveOccurred()) + Expect(cidr).To(Equal("")) + }) + + It("No Match", func() { + cluster := createCluster("1.2.5.200", "", + createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.6.7/23")), + createInventory(createInterface("127.0.0.1/17"))) + cidr, err := CalculateMachineNetworkCIDR(cluster.APIVip, cluster.IngressVip, cluster.Hosts) + Expect(err).To(HaveOccurred()) + Expect(cidr).To(Equal("")) + }) + It("Bad inventory", func() { + cluster := createCluster("1.2.5.6", "", + "Bad inventory", + createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), + createInventory(createInterface("127.0.0.1/17"))) + cidr, err := CalculateMachineNetworkCIDR(cluster.APIVip, cluster.IngressVip, cluster.Hosts) + Expect(err).To(Not(HaveOccurred())) + Expect(cidr).To(Equal("1.2.4.0/23")) + }) + }) + Context("GetMachineCIDRHosts", func() { + It("No Machine CIDR", func() { + cluster := createCluster("1.2.5.6", "", + createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), + createInventory(createInterface("127.0.0.1/17"))) + _, err := GetMachineCIDRHosts(logrus.New(), cluster) + Expect(err).To(HaveOccurred()) + }) + It("No matching Machine CIDR", func() { + cluster := createCluster("1.2.5.6", "1.1.0.0/16", + createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), + createInventory(createInterface("127.0.0.1/17"))) + hosts, err := GetMachineCIDRHosts(logrus.New(), cluster) + Expect(err).To(Not(HaveOccurred())) + Expect(hosts).To(BeEmpty()) + }) + It("Some matched", func() { + cluster := createCluster("1.2.5.6", "1.2.4.0/23", + createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), + createInventory(createInterface("127.0.0.1/17")), + createInventory(createInterface("1.2.4.79/23"))) + hosts, err := GetMachineCIDRHosts(logrus.New(), cluster) + Expect(err).To(Not(HaveOccurred())) + Expect(hosts).To(Equal([]*models.Host{ + cluster.Hosts[0], + cluster.Hosts[2], + })) + + }) + }) + Context("VerifyVips", func() { + var log logrus.FieldLogger + + BeforeEach(func() { + log = logrus.New() + }) + It("Same vips", func() { + cluster := createCluster("1.2.5.6", "1.2.4.0/23", + createInventory(createInterface("1.2.5.7/23"))) + cluster.Hosts = []*models.Host{ + { + FreeAddresses: "[{\"network\":\"1.2.4.0/23\",\"free_addresses\":[\"1.2.5.6\",\"1.2.5.8\"]}]", + }, + } + cluster.IngressVip = cluster.APIVip + err := VerifyVips(cluster.Hosts, cluster.MachineNetworkCidr, cluster.APIVip, cluster.IngressVip, false, log) + Expect(err).To(HaveOccurred()) + err = VerifyVips(cluster.Hosts, cluster.MachineNetworkCidr, cluster.APIVip, cluster.IngressVip, true, log) + Expect(err).To(HaveOccurred()) + }) + It("Different vips", func() { + cluster := createCluster("1.2.5.6", "1.2.4.0/23", + createInventory(createInterface("1.2.5.7/23"))) + cluster.IngressVip = "1.2.5.8" + cluster.Hosts = []*models.Host{ + { + FreeAddresses: "[{\"network\":\"1.2.4.0/23\",\"free_addresses\":[\"1.2.5.6\",\"1.2.5.8\"]}]", + }, + } + err := VerifyVips(cluster.Hosts, cluster.MachineNetworkCidr, cluster.APIVip, cluster.IngressVip, false, log) + Expect(err).ToNot(HaveOccurred()) + err = VerifyVips(cluster.Hosts, cluster.MachineNetworkCidr, cluster.APIVip, cluster.IngressVip, true, log) + Expect(err).ToNot(HaveOccurred()) + }) + It("Not free", func() { + cluster := createCluster("1.2.5.6", "1.2.4.0/23", + createInventory(createInterface("1.2.5.7/23"))) + cluster.IngressVip = "1.2.5.8" + cluster.Hosts = []*models.Host{ + { + FreeAddresses: "[{\"network\":\"1.2.4.0/23\",\"free_addresses\":[\"1.2.5.9\"]}]", + }, + } + err := VerifyVips(cluster.Hosts, cluster.MachineNetworkCidr, cluster.APIVip, cluster.IngressVip, false, log) + Expect(err).To(HaveOccurred()) + err = VerifyVips(cluster.Hosts, cluster.MachineNetworkCidr, cluster.APIVip, cluster.IngressVip, true, log) + Expect(err).To(HaveOccurred()) + }) + It("Disabled", func() { + cluster := createCluster("1.2.5.6", "1.2.4.0/23", + createInventory(createInterface("1.2.5.7/23"))) + cluster.IngressVip = "1.2.5.8" + cluster.Hosts = []*models.Host{ + { + FreeAddresses: "[{\"network\":\"1.2.4.0/23\",\"free_addresses\":[\"1.2.5.9\"]}]", + Status: swag.String(models.HostStatusDisabled), + }, + } + err := VerifyVips(cluster.Hosts, cluster.MachineNetworkCidr, cluster.APIVip, cluster.IngressVip, false, log) + Expect(err).ToNot(HaveOccurred()) + err = VerifyVips(cluster.Hosts, cluster.MachineNetworkCidr, cluster.APIVip, cluster.IngressVip, true, log) + Expect(err).ToNot(HaveOccurred()) + }) + It("Empty", func() { + cluster := createCluster("1.2.5.6", "1.2.4.0/23", + createInventory(createInterface("1.2.5.7/23"))) + cluster.IngressVip = "1.2.5.8" + cluster.Hosts = []*models.Host{ + { + FreeAddresses: "", + }, + } + err := VerifyVips(cluster.Hosts, cluster.MachineNetworkCidr, cluster.APIVip, cluster.IngressVip, false, log) + Expect(err).ToNot(HaveOccurred()) + err = VerifyVips(cluster.Hosts, cluster.MachineNetworkCidr, cluster.APIVip, cluster.IngressVip, true, log) + Expect(err).ToNot(HaveOccurred()) + }) + It("Free", func() { + cluster := createCluster("1.2.5.6", "1.2.4.0/23", + createInventory(createInterface("1.2.5.7/23"))) + cluster.IngressVip = "1.2.5.8" + cluster.Hosts = []*models.Host{ + { + FreeAddresses: "[{\"network\":\"1.2.4.0/23\",\"free_addresses\":[\"1.2.5.6\",\"1.2.5.8\",\"1.2.5.9\"]}]", + }, + } + err := VerifyVips(cluster.Hosts, cluster.MachineNetworkCidr, cluster.APIVip, cluster.IngressVip, false, log) + Expect(err).ToNot(HaveOccurred()) + err = VerifyVips(cluster.Hosts, cluster.MachineNetworkCidr, cluster.APIVip, cluster.IngressVip, true, log) + Expect(err).ToNot(HaveOccurred()) + }) + }) +}) + +func TestMachineNetworkCidr(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Machine network cider Suite") +} diff --git a/internal/versions/versions.go b/internal/versions/versions.go new file mode 100644 index 000000000..cc33ef19f --- /dev/null +++ b/internal/versions/versions.go @@ -0,0 +1,45 @@ +package versions + +import ( + "context" + + "github.com/filanov/bm-inventory/models" + "github.com/filanov/bm-inventory/restapi" + operations "github.com/filanov/bm-inventory/restapi/operations/versions" + "github.com/go-openapi/runtime/middleware" +) + +type Versions struct { + SelfVersion string `envconfig:"SELF_VERSION" default:"quay.io/ocpmetal/installer-image-build:latest"` + ImageBuilder string `envconfig:"IMAGE_BUILDER" default:"quay.io/ocpmetal/installer-image-build:latest"` + AgentDockerImg string `envconfig:"AGENT_DOCKER_IMAGE" default:"quay.io/ocpmetal/agent:latest"` + KubeconfigGenerator string `envconfig:"KUBECONFIG_GENERATE_IMAGE" default:"quay.io/ocpmetal/ignition-manifests-and-kubeconfig-generate:latest"` + InstallerImage string `envconfig:"INSTALLER_IMAGE" default:"quay.io/ocpmetal/assisted-installer:latest"` + ControllerImage string `envconfig:"CONTROLLER_IMAGE" default:"quay.io/ocpmetal/assisted-installer-controller:latest"` + ReleaseTag string `envconfig:"RELEASE_TAG" default:""` +} + +func NewHandler(versions Versions) *handler { + return &handler{versions: versions} +} + +var _ restapi.VersionsAPI = (*handler)(nil) + +type handler struct { + versions Versions +} + +func (h *handler) ListComponentVersions(ctx context.Context, params operations.ListComponentVersionsParams) middleware.Responder { + return operations.NewListComponentVersionsOK().WithPayload( + &models.ListVersions{ + Versions: models.Versions{ + "assisted-installer-service": h.versions.SelfVersion, + "image-builder": h.versions.ImageBuilder, + "discovery-agent": h.versions.AgentDockerImg, + "ignition-manifests-and-kubeconfig-generate": h.versions.KubeconfigGenerator, + "assisted-installer": h.versions.InstallerImage, + "assisted-installer-controller": h.versions.ControllerImage, + }, + ReleaseTag: h.versions.ReleaseTag, + }) +} diff --git a/internal/versions/versions_test.go b/internal/versions/versions_test.go new file mode 100644 index 000000000..6000cd9c6 --- /dev/null +++ b/internal/versions/versions_test.go @@ -0,0 +1,61 @@ +package versions + +import ( + "context" + "os" + "testing" + + "github.com/kelseyhightower/envconfig" + + operations "github.com/filanov/bm-inventory/restapi/operations/versions" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestHandler_ListComponentVersions(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "versions") +} + +var _ = Describe("list versions", func() { + var ( + h *handler + versions Versions + ) + It("default values", func() { + Expect(envconfig.Process("test", &versions)).ShouldNot(HaveOccurred()) + h = NewHandler(versions) + reply := h.ListComponentVersions(context.Background(), operations.ListComponentVersionsParams{}) + Expect(reply).Should(BeAssignableToTypeOf(operations.NewListComponentVersionsOK())) + val, _ := reply.(*operations.ListComponentVersionsOK) + Expect(val.Payload.Versions["assisted-installer-service"]). + Should(Equal("quay.io/ocpmetal/installer-image-build:latest")) + Expect(val.Payload.Versions["image-builder"]).Should(Equal("quay.io/ocpmetal/installer-image-build:latest")) + Expect(val.Payload.Versions["discovery-agent"]).Should(Equal("quay.io/ocpmetal/agent:latest")) + Expect(val.Payload.Versions["ignition-manifests-and-kubeconfig-generate"]). + Should(Equal("quay.io/ocpmetal/ignition-manifests-and-kubeconfig-generate:latest")) + Expect(val.Payload.Versions["assisted-installer"]).Should(Equal("quay.io/ocpmetal/assisted-installer:latest")) + Expect(val.Payload.ReleaseTag).Should(Equal("")) + }) + + It("mix default and non default", func() { + os.Setenv("SELF_VERSION", "self-version") + os.Setenv("IMAGE_BUILDER", "image-builder") + os.Setenv("AGENT_DOCKER_IMAGE", "agent-image") + os.Setenv("INSTALLER_IMAGE", "installer-image") + os.Setenv("CONTROLLER_IMAGE", "controller-image") + Expect(envconfig.Process("test", &versions)).ShouldNot(HaveOccurred()) + h = NewHandler(versions) + reply := h.ListComponentVersions(context.Background(), operations.ListComponentVersionsParams{}) + Expect(reply).Should(BeAssignableToTypeOf(operations.NewListComponentVersionsOK())) + val, _ := reply.(*operations.ListComponentVersionsOK) + Expect(val.Payload.Versions["assisted-installer-service"]).Should(Equal("self-version")) + Expect(val.Payload.Versions["image-builder"]).Should(Equal("image-builder")) + Expect(val.Payload.Versions["discovery-agent"]).Should(Equal("agent-image")) + Expect(val.Payload.Versions["ignition-manifests-and-kubeconfig-generate"]). + Should(Equal("quay.io/ocpmetal/ignition-manifests-and-kubeconfig-generate:latest")) + Expect(val.Payload.Versions["assisted-installer"]).Should(Equal("installer-image")) + Expect(val.Payload.Versions["assisted-installer-controller"]).Should(Equal("controller-image")) + Expect(val.Payload.ReleaseTag).Should(Equal("")) + }) +}) diff --git a/models/block_device.go b/models/block_device.go deleted file mode 100644 index 696b25503..000000000 --- a/models/block_device.go +++ /dev/null @@ -1,67 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// BlockDevice block device -// -// swagger:model block-device -type BlockDevice struct { - - // device type - DeviceType string `json:"device_type,omitempty"` - - // fstype - Fstype string `json:"fstype,omitempty"` - - // major device number - MajorDeviceNumber int64 `json:"major_device_number,omitempty"` - - // minor device number - MinorDeviceNumber int64 `json:"minor_device_number,omitempty"` - - // mountpoint - Mountpoint string `json:"mountpoint,omitempty"` - - // name - Name string `json:"name,omitempty"` - - // read only - ReadOnly bool `json:"read_only,omitempty"` - - // removable device - RemovableDevice int64 `json:"removable_device,omitempty"` - - // size - Size int64 `json:"size,omitempty"` -} - -// Validate validates this block device -func (m *BlockDevice) Validate(formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *BlockDevice) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *BlockDevice) UnmarshalBinary(b []byte) error { - var res BlockDevice - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/models/cidr.go b/models/cidr.go deleted file mode 100644 index 7ec142443..000000000 --- a/models/cidr.go +++ /dev/null @@ -1,46 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// Cidr cidr -// -// swagger:model cidr -type Cidr struct { - - // ip address - IPAddress string `json:"ip_address,omitempty"` - - // mask - Mask int64 `json:"mask,omitempty"` -} - -// Validate validates this cidr -func (m *Cidr) Validate(formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *Cidr) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Cidr) UnmarshalBinary(b []byte) error { - var res Cidr - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/models/cluster.go b/models/cluster.go index 49f5fe18f..f0360add4 100644 --- a/models/cluster.go +++ b/models/cluster.go @@ -21,8 +21,8 @@ import ( type Cluster struct { // Virtual IP used to reach the OpenShift cluster API. - // Format: ipv4 - APIVip strfmt.IPv4 `json:"api_vip,omitempty"` + // Pattern: ^(([0-9]{1,3}\.){3}[0-9]{1,3})?$ + APIVip string `json:"api_vip,omitempty"` // Base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name. BaseDNSDomain string `json:"base_dns_domain,omitempty"` @@ -38,11 +38,10 @@ type Cluster struct { // The time that this cluster was created. // Format: date-time - CreatedAt strfmt.DateTime `json:"created_at,omitempty" gorm:"type:datetime"` + CreatedAt strfmt.DateTime `json:"created_at,omitempty" gorm:"type:timestamp with time zone"` - // Virtual IP used internally by the cluster for automating internal DNS requirements. - // Format: ipv4 - DNSVip strfmt.IPv4 `json:"dns_vip,omitempty"` + // List of host networks to be filled during query. + HostNetworks []*HostNetwork `json:"host_networks" gorm:"-"` // Hosts that are associated with this cluster. Hosts []*Host `json:"hosts" gorm:"foreignkey:ClusterID;association_foreignkey:ID"` @@ -56,36 +55,46 @@ type Cluster struct { // Format: uuid ID *strfmt.UUID `json:"id" gorm:"primary_key"` + // ignition generator version + IgnitionGeneratorVersion string `json:"ignition_generator_version,omitempty"` + // image info // Required: true ImageInfo *ImageInfo `json:"image_info" gorm:"embedded;embedded_prefix:image_"` // Virtual IP used for cluster ingress traffic. - // Format: ipv4 - IngressVip strfmt.IPv4 `json:"ingress_vip,omitempty"` + // Pattern: ^(([0-9]{1,3}\.){3}[0-9]{1,3})?$ + IngressVip string `json:"ingress_vip,omitempty"` // The time that this cluster completed installation. // Format: date-time - InstallCompletedAt strfmt.DateTime `json:"install_completed_at,omitempty" gorm:"type:datetime;default:0"` + InstallCompletedAt strfmt.DateTime `json:"install_completed_at,omitempty" gorm:"type:timestamp with time zone;default:'2000-01-01 00:00:00z'"` // The time that this cluster began installation. // Format: date-time - InstallStartedAt strfmt.DateTime `json:"install_started_at,omitempty" gorm:"type:datetime;default:0"` + InstallStartedAt strfmt.DateTime `json:"install_started_at,omitempty" gorm:"type:timestamp with time zone;default:'2000-01-01 00:00:00z'"` // Indicates the type of this object. Will be 'Cluster' if this is a complete object or 'ClusterLink' if it is just a link. // Required: true // Enum: [Cluster] Kind *string `json:"kind"` + // A CIDR that all hosts belonging to the cluster should have an interfaces with IP address that belongs to this CIDR. The api_vip belongs to this CIDR. + // Pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$ + MachineNetworkCidr string `json:"machine_network_cidr,omitempty"` + // Name of the OpenShift cluster. Name string `json:"name,omitempty"` // Version of the OpenShift cluster. - // Enum: [4.4 4.5] + // Enum: [4.5] OpenshiftVersion string `json:"openshift_version,omitempty"` - // The pull secret that obtained from the Pull Secret page on the Red Hat OpenShift Cluster Manager site. - PullSecret string `json:"pull_secret,omitempty" gorm:"type:varchar(4096)"` + // org id + OrgID string `json:"org_id,omitempty"` + + // True if the pull-secret has been added to the cluster + PullSecretSet bool `json:"pull_secret_set,omitempty"` // The IP address pool to use for service IP addresses. You can enter only one IP address pool. If you need to access the services from an external network, configure load balancers and routers to manage the traffic. // Pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$ @@ -96,16 +105,23 @@ type Cluster struct { // Status of the OpenShift cluster. // Required: true - // Enum: [insufficient ready error installing installed] + // Enum: [insufficient ready error preparing-for-installation installing finalizing installed] Status *string `json:"status"` // Additional information pertaining to the status of the OpenShift cluster. // Required: true StatusInfo *string `json:"status_info" gorm:"type:varchar(2048)"` + // The last time that the cluster status has been updated + // Format: date-time + StatusUpdatedAt strfmt.DateTime `json:"status_updated_at,omitempty" gorm:"type:timestamp with time zone"` + // The last time that this cluster was updated. // Format: date-time - UpdatedAt strfmt.DateTime `json:"updated_at,omitempty" gorm:"type:datetime"` + UpdatedAt strfmt.DateTime `json:"updated_at,omitempty" gorm:"type:timestamp with time zone"` + + // user id + UserID string `json:"user_id,omitempty"` } // Validate validates this cluster @@ -128,7 +144,7 @@ func (m *Cluster) Validate(formats strfmt.Registry) error { res = append(res, err) } - if err := m.validateDNSVip(formats); err != nil { + if err := m.validateHostNetworks(formats); err != nil { res = append(res, err) } @@ -164,6 +180,10 @@ func (m *Cluster) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateMachineNetworkCidr(formats); err != nil { + res = append(res, err) + } + if err := m.validateOpenshiftVersion(formats); err != nil { res = append(res, err) } @@ -180,6 +200,10 @@ func (m *Cluster) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateStatusUpdatedAt(formats); err != nil { + res = append(res, err) + } + if err := m.validateUpdatedAt(formats); err != nil { res = append(res, err) } @@ -196,7 +220,7 @@ func (m *Cluster) validateAPIVip(formats strfmt.Registry) error { return nil } - if err := validate.FormatOf("api_vip", "body", "ipv4", m.APIVip.String(), formats); err != nil { + if err := validate.Pattern("api_vip", "body", string(m.APIVip), `^(([0-9]{1,3}\.){3}[0-9]{1,3})?$`); err != nil { return err } @@ -246,14 +270,26 @@ func (m *Cluster) validateCreatedAt(formats strfmt.Registry) error { return nil } -func (m *Cluster) validateDNSVip(formats strfmt.Registry) error { +func (m *Cluster) validateHostNetworks(formats strfmt.Registry) error { - if swag.IsZero(m.DNSVip) { // not required + if swag.IsZero(m.HostNetworks) { // not required return nil } - if err := validate.FormatOf("dns_vip", "body", "ipv4", m.DNSVip.String(), formats); err != nil { - return err + for i := 0; i < len(m.HostNetworks); i++ { + if swag.IsZero(m.HostNetworks[i]) { // not required + continue + } + + if m.HostNetworks[i] != nil { + if err := m.HostNetworks[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("host_networks" + "." + strconv.Itoa(i)) + } + return err + } + } + } return nil @@ -330,7 +366,7 @@ func (m *Cluster) validateIngressVip(formats strfmt.Registry) error { return nil } - if err := validate.FormatOf("ingress_vip", "body", "ipv4", m.IngressVip.String(), formats); err != nil { + if err := validate.Pattern("ingress_vip", "body", string(m.IngressVip), `^(([0-9]{1,3}\.){3}[0-9]{1,3})?$`); err != nil { return err } @@ -383,7 +419,7 @@ const ( // prop value enum func (m *Cluster) validateKindEnum(path, location string, value string) error { - if err := validate.Enum(path, location, value, clusterTypeKindPropEnum); err != nil { + if err := validate.EnumCase(path, location, value, clusterTypeKindPropEnum, true); err != nil { return err } return nil @@ -403,11 +439,24 @@ func (m *Cluster) validateKind(formats strfmt.Registry) error { return nil } +func (m *Cluster) validateMachineNetworkCidr(formats strfmt.Registry) error { + + if swag.IsZero(m.MachineNetworkCidr) { // not required + return nil + } + + if err := validate.Pattern("machine_network_cidr", "body", string(m.MachineNetworkCidr), `^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$`); err != nil { + return err + } + + return nil +} + var clusterTypeOpenshiftVersionPropEnum []interface{} func init() { var res []string - if err := json.Unmarshal([]byte(`["4.4","4.5"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["4.5"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -417,16 +466,13 @@ func init() { const ( - // ClusterOpenshiftVersionNr44 captures enum value "4.4" - ClusterOpenshiftVersionNr44 string = "4.4" - // ClusterOpenshiftVersionNr45 captures enum value "4.5" ClusterOpenshiftVersionNr45 string = "4.5" ) // prop value enum func (m *Cluster) validateOpenshiftVersionEnum(path, location string, value string) error { - if err := validate.Enum(path, location, value, clusterTypeOpenshiftVersionPropEnum); err != nil { + if err := validate.EnumCase(path, location, value, clusterTypeOpenshiftVersionPropEnum, true); err != nil { return err } return nil @@ -463,7 +509,7 @@ var clusterTypeStatusPropEnum []interface{} func init() { var res []string - if err := json.Unmarshal([]byte(`["insufficient","ready","error","installing","installed"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["insufficient","ready","error","preparing-for-installation","installing","finalizing","installed"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -482,16 +528,22 @@ const ( // ClusterStatusError captures enum value "error" ClusterStatusError string = "error" + // ClusterStatusPreparingForInstallation captures enum value "preparing-for-installation" + ClusterStatusPreparingForInstallation string = "preparing-for-installation" + // ClusterStatusInstalling captures enum value "installing" ClusterStatusInstalling string = "installing" + // ClusterStatusFinalizing captures enum value "finalizing" + ClusterStatusFinalizing string = "finalizing" + // ClusterStatusInstalled captures enum value "installed" ClusterStatusInstalled string = "installed" ) // prop value enum func (m *Cluster) validateStatusEnum(path, location string, value string) error { - if err := validate.Enum(path, location, value, clusterTypeStatusPropEnum); err != nil { + if err := validate.EnumCase(path, location, value, clusterTypeStatusPropEnum, true); err != nil { return err } return nil @@ -520,6 +572,19 @@ func (m *Cluster) validateStatusInfo(formats strfmt.Registry) error { return nil } +func (m *Cluster) validateStatusUpdatedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.StatusUpdatedAt) { // not required + return nil + } + + if err := validate.FormatOf("status_updated_at", "body", "date-time", m.StatusUpdatedAt.String(), formats); err != nil { + return err + } + + return nil +} + func (m *Cluster) validateUpdatedAt(formats strfmt.Registry) error { if swag.IsZero(m.UpdatedAt) { // not required diff --git a/models/cluster_create_params.go b/models/cluster_create_params.go index 954fd4cd2..a502aff81 100644 --- a/models/cluster_create_params.go +++ b/models/cluster_create_params.go @@ -19,29 +19,21 @@ import ( // swagger:model cluster-create-params type ClusterCreateParams struct { - // Virtual IP used to reach the OpenShift cluster API. - // Format: ipv4 - APIVip strfmt.IPv4 `json:"api_vip,omitempty"` - // Base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name. BaseDNSDomain string `json:"base_dns_domain,omitempty"` // IP address block from which Pod IPs are allocated This block must not overlap with existing physical networks. These IP addresses are used for the Pod network, and if you need to access the Pods from an external network, configure load balancers and routers to manage the traffic. // Pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$ - ClusterNetworkCidr string `json:"cluster_network_cidr,omitempty"` + ClusterNetworkCidr *string `json:"cluster_network_cidr,omitempty"` // The subnet prefix length to assign to each individual node. For example, if clusterNetworkHostPrefix is set to 23, then each node is assigned a /23 subnet out of the given cidr (clusterNetworkCIDR), which allows for 510 (2^(32 - 23) - 2) pod IPs addresses. If you are required to provide access to nodes from an external network, configure load balancers and routers to manage the traffic. // Maximum: 32 // Minimum: 1 ClusterNetworkHostPrefix int64 `json:"cluster_network_host_prefix,omitempty"` - // Virtual IP used internally by the cluster for automating internal DNS requirements. - // Format: ipv4 - DNSVip strfmt.IPv4 `json:"dns_vip,omitempty"` - // Virtual IP used for cluster ingress traffic. - // Format: ipv4 - IngressVip strfmt.IPv4 `json:"ingress_vip,omitempty"` + // Pattern: ^(([0-9]{1,3}\.){3}[0-9]{1,3})?$ + IngressVip string `json:"ingress_vip,omitempty"` // Name of the OpenShift cluster. // Required: true @@ -49,7 +41,7 @@ type ClusterCreateParams struct { // Version of the OpenShift cluster. // Required: true - // Enum: [4.4 4.5] + // Enum: [4.5] OpenshiftVersion *string `json:"openshift_version"` // The pull secret that obtained from the Pull Secret page on the Red Hat OpenShift Cluster Manager site. @@ -57,7 +49,7 @@ type ClusterCreateParams struct { // The IP address pool to use for service IP addresses. You can enter only one IP address pool. If you need to access the services from an external network, configure load balancers and routers to manage the traffic. // Pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$ - ServiceNetworkCidr string `json:"service_network_cidr,omitempty"` + ServiceNetworkCidr *string `json:"service_network_cidr,omitempty"` // SSH public key for debugging OpenShift nodes. SSHPublicKey string `json:"ssh_public_key,omitempty"` @@ -67,10 +59,6 @@ type ClusterCreateParams struct { func (m *ClusterCreateParams) Validate(formats strfmt.Registry) error { var res []error - if err := m.validateAPIVip(formats); err != nil { - res = append(res, err) - } - if err := m.validateClusterNetworkCidr(formats); err != nil { res = append(res, err) } @@ -79,10 +67,6 @@ func (m *ClusterCreateParams) Validate(formats strfmt.Registry) error { res = append(res, err) } - if err := m.validateDNSVip(formats); err != nil { - res = append(res, err) - } - if err := m.validateIngressVip(formats); err != nil { res = append(res, err) } @@ -105,26 +89,13 @@ func (m *ClusterCreateParams) Validate(formats strfmt.Registry) error { return nil } -func (m *ClusterCreateParams) validateAPIVip(formats strfmt.Registry) error { - - if swag.IsZero(m.APIVip) { // not required - return nil - } - - if err := validate.FormatOf("api_vip", "body", "ipv4", m.APIVip.String(), formats); err != nil { - return err - } - - return nil -} - func (m *ClusterCreateParams) validateClusterNetworkCidr(formats strfmt.Registry) error { if swag.IsZero(m.ClusterNetworkCidr) { // not required return nil } - if err := validate.Pattern("cluster_network_cidr", "body", string(m.ClusterNetworkCidr), `^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$`); err != nil { + if err := validate.Pattern("cluster_network_cidr", "body", string(*m.ClusterNetworkCidr), `^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$`); err != nil { return err } @@ -148,26 +119,13 @@ func (m *ClusterCreateParams) validateClusterNetworkHostPrefix(formats strfmt.Re return nil } -func (m *ClusterCreateParams) validateDNSVip(formats strfmt.Registry) error { - - if swag.IsZero(m.DNSVip) { // not required - return nil - } - - if err := validate.FormatOf("dns_vip", "body", "ipv4", m.DNSVip.String(), formats); err != nil { - return err - } - - return nil -} - func (m *ClusterCreateParams) validateIngressVip(formats strfmt.Registry) error { if swag.IsZero(m.IngressVip) { // not required return nil } - if err := validate.FormatOf("ingress_vip", "body", "ipv4", m.IngressVip.String(), formats); err != nil { + if err := validate.Pattern("ingress_vip", "body", string(m.IngressVip), `^(([0-9]{1,3}\.){3}[0-9]{1,3})?$`); err != nil { return err } @@ -187,7 +145,7 @@ var clusterCreateParamsTypeOpenshiftVersionPropEnum []interface{} func init() { var res []string - if err := json.Unmarshal([]byte(`["4.4","4.5"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["4.5"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -197,16 +155,13 @@ func init() { const ( - // ClusterCreateParamsOpenshiftVersionNr44 captures enum value "4.4" - ClusterCreateParamsOpenshiftVersionNr44 string = "4.4" - // ClusterCreateParamsOpenshiftVersionNr45 captures enum value "4.5" ClusterCreateParamsOpenshiftVersionNr45 string = "4.5" ) // prop value enum func (m *ClusterCreateParams) validateOpenshiftVersionEnum(path, location string, value string) error { - if err := validate.Enum(path, location, value, clusterCreateParamsTypeOpenshiftVersionPropEnum); err != nil { + if err := validate.EnumCase(path, location, value, clusterCreateParamsTypeOpenshiftVersionPropEnum, true); err != nil { return err } return nil @@ -232,7 +187,7 @@ func (m *ClusterCreateParams) validateServiceNetworkCidr(formats strfmt.Registry return nil } - if err := validate.Pattern("service_network_cidr", "body", string(m.ServiceNetworkCidr), `^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$`); err != nil { + if err := validate.Pattern("service_network_cidr", "body", string(*m.ServiceNetworkCidr), `^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$`); err != nil { return err } diff --git a/models/cluster_update_params.go b/models/cluster_update_params.go index 92bd2a183..581bc9340 100644 --- a/models/cluster_update_params.go +++ b/models/cluster_update_params.go @@ -6,7 +6,6 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( - "encoding/json" "strconv" "github.com/go-openapi/errors" @@ -21,44 +20,43 @@ import ( type ClusterUpdateParams struct { // Virtual IP used to reach the OpenShift cluster API. - // Format: ipv4 - APIVip strfmt.IPv4 `json:"api_vip,omitempty"` + // Pattern: ^(([0-9]{1,3}\.){3}[0-9]{1,3})?$ + APIVip *string `json:"api_vip,omitempty"` // Base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name. - BaseDNSDomain string `json:"base_dns_domain,omitempty"` + BaseDNSDomain *string `json:"base_dns_domain,omitempty"` // IP address block from which Pod IPs are allocated This block must not overlap with existing physical networks. These IP addresses are used for the Pod network, and if you need to access the Pods from an external network, configure load balancers and routers to manage the traffic. // Pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$ - ClusterNetworkCidr string `json:"cluster_network_cidr,omitempty"` + ClusterNetworkCidr *string `json:"cluster_network_cidr,omitempty"` // The subnet prefix length to assign to each individual node. For example, if clusterNetworkHostPrefix is set to 23, then each node is assigned a /23 subnet out of the given cidr (clusterNetworkCIDR), which allows for 510 (2^(32 - 23) - 2) pod IPs addresses. If you are required to provide access to nodes from an external network, configure load balancers and routers to manage the traffic. // Maximum: 32 // Minimum: 1 - ClusterNetworkHostPrefix int64 `json:"cluster_network_host_prefix,omitempty"` + ClusterNetworkHostPrefix *int64 `json:"cluster_network_host_prefix,omitempty"` - // Virtual IP used internally by the cluster for automating internal DNS requirements. - // Format: ipv4 - DNSVip strfmt.IPv4 `json:"dns_vip,omitempty"` + // The desired hostname for hosts associated with the cluster. + HostsNames []*ClusterUpdateParamsHostsNamesItems0 `json:"hosts_names" gorm:"type:varchar(64)[]"` // The desired role for hosts associated with the cluster. HostsRoles []*ClusterUpdateParamsHostsRolesItems0 `json:"hosts_roles" gorm:"type:varchar(64)[]"` // Virtual IP used for cluster ingress traffic. - // Format: ipv4 - IngressVip strfmt.IPv4 `json:"ingress_vip,omitempty"` + // Pattern: ^(([0-9]{1,3}\.){3}[0-9]{1,3})?$ + IngressVip *string `json:"ingress_vip,omitempty"` // OpenShift cluster name - Name string `json:"name,omitempty"` + Name *string `json:"name,omitempty"` // The pull secret that obtained from the Pull Secret page on the Red Hat OpenShift Cluster Manager site. - PullSecret string `json:"pull_secret,omitempty"` + PullSecret *string `json:"pull_secret,omitempty"` // The IP address pool to use for service IP addresses. You can enter only one IP address pool. If you need to access the services from an external network, configure load balancers and routers to manage the traffic. // Pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$ - ServiceNetworkCidr string `json:"service_network_cidr,omitempty"` + ServiceNetworkCidr *string `json:"service_network_cidr,omitempty"` // SSH public key for debugging OpenShift nodes. - SSHPublicKey string `json:"ssh_public_key,omitempty"` + SSHPublicKey *string `json:"ssh_public_key,omitempty"` } // Validate validates this cluster update params @@ -77,7 +75,7 @@ func (m *ClusterUpdateParams) Validate(formats strfmt.Registry) error { res = append(res, err) } - if err := m.validateDNSVip(formats); err != nil { + if err := m.validateHostsNames(formats); err != nil { res = append(res, err) } @@ -105,7 +103,7 @@ func (m *ClusterUpdateParams) validateAPIVip(formats strfmt.Registry) error { return nil } - if err := validate.FormatOf("api_vip", "body", "ipv4", m.APIVip.String(), formats); err != nil { + if err := validate.Pattern("api_vip", "body", string(*m.APIVip), `^(([0-9]{1,3}\.){3}[0-9]{1,3})?$`); err != nil { return err } @@ -118,7 +116,7 @@ func (m *ClusterUpdateParams) validateClusterNetworkCidr(formats strfmt.Registry return nil } - if err := validate.Pattern("cluster_network_cidr", "body", string(m.ClusterNetworkCidr), `^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$`); err != nil { + if err := validate.Pattern("cluster_network_cidr", "body", string(*m.ClusterNetworkCidr), `^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$`); err != nil { return err } @@ -131,25 +129,37 @@ func (m *ClusterUpdateParams) validateClusterNetworkHostPrefix(formats strfmt.Re return nil } - if err := validate.MinimumInt("cluster_network_host_prefix", "body", int64(m.ClusterNetworkHostPrefix), 1, false); err != nil { + if err := validate.MinimumInt("cluster_network_host_prefix", "body", int64(*m.ClusterNetworkHostPrefix), 1, false); err != nil { return err } - if err := validate.MaximumInt("cluster_network_host_prefix", "body", int64(m.ClusterNetworkHostPrefix), 32, false); err != nil { + if err := validate.MaximumInt("cluster_network_host_prefix", "body", int64(*m.ClusterNetworkHostPrefix), 32, false); err != nil { return err } return nil } -func (m *ClusterUpdateParams) validateDNSVip(formats strfmt.Registry) error { +func (m *ClusterUpdateParams) validateHostsNames(formats strfmt.Registry) error { - if swag.IsZero(m.DNSVip) { // not required + if swag.IsZero(m.HostsNames) { // not required return nil } - if err := validate.FormatOf("dns_vip", "body", "ipv4", m.DNSVip.String(), formats); err != nil { - return err + for i := 0; i < len(m.HostsNames); i++ { + if swag.IsZero(m.HostsNames[i]) { // not required + continue + } + + if m.HostsNames[i] != nil { + if err := m.HostsNames[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("hosts_names" + "." + strconv.Itoa(i)) + } + return err + } + } + } return nil @@ -186,7 +196,7 @@ func (m *ClusterUpdateParams) validateIngressVip(formats strfmt.Registry) error return nil } - if err := validate.FormatOf("ingress_vip", "body", "ipv4", m.IngressVip.String(), formats); err != nil { + if err := validate.Pattern("ingress_vip", "body", string(*m.IngressVip), `^(([0-9]{1,3}\.){3}[0-9]{1,3})?$`); err != nil { return err } @@ -199,7 +209,7 @@ func (m *ClusterUpdateParams) validateServiceNetworkCidr(formats strfmt.Registry return nil } - if err := validate.Pattern("service_network_cidr", "body", string(m.ServiceNetworkCidr), `^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$`); err != nil { + if err := validate.Pattern("service_network_cidr", "body", string(*m.ServiceNetworkCidr), `^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$`); err != nil { return err } @@ -224,39 +234,34 @@ func (m *ClusterUpdateParams) UnmarshalBinary(b []byte) error { return nil } -// ClusterUpdateParamsHostsRolesItems0 cluster update params hosts roles items0 +// ClusterUpdateParamsHostsNamesItems0 cluster update params hosts names items0 // -// swagger:model ClusterUpdateParamsHostsRolesItems0 -type ClusterUpdateParamsHostsRolesItems0 struct { +// swagger:model ClusterUpdateParamsHostsNamesItems0 +type ClusterUpdateParamsHostsNamesItems0 struct { + + // hostname + Hostname string `json:"hostname,omitempty"` // id // Format: uuid ID strfmt.UUID `json:"id,omitempty"` - - // role - // Enum: [master worker] - Role string `json:"role,omitempty"` } -// Validate validates this cluster update params hosts roles items0 -func (m *ClusterUpdateParamsHostsRolesItems0) Validate(formats strfmt.Registry) error { +// Validate validates this cluster update params hosts names items0 +func (m *ClusterUpdateParamsHostsNamesItems0) Validate(formats strfmt.Registry) error { var res []error if err := m.validateID(formats); err != nil { res = append(res, err) } - if err := m.validateRole(formats); err != nil { - res = append(res, err) - } - if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } -func (m *ClusterUpdateParamsHostsRolesItems0) validateID(formats strfmt.Registry) error { +func (m *ClusterUpdateParamsHostsNamesItems0) validateID(formats strfmt.Registry) error { if swag.IsZero(m.ID) { // not required return nil @@ -269,32 +274,65 @@ func (m *ClusterUpdateParamsHostsRolesItems0) validateID(formats strfmt.Registry return nil } -var clusterUpdateParamsHostsRolesItems0TypeRolePropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["master","worker"]`), &res); err != nil { - panic(err) +// MarshalBinary interface implementation +func (m *ClusterUpdateParamsHostsNamesItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil } - for _, v := range res { - clusterUpdateParamsHostsRolesItems0TypeRolePropEnum = append(clusterUpdateParamsHostsRolesItems0TypeRolePropEnum, v) + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ClusterUpdateParamsHostsNamesItems0) UnmarshalBinary(b []byte) error { + var res ClusterUpdateParamsHostsNamesItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err } + *m = res + return nil +} + +// ClusterUpdateParamsHostsRolesItems0 cluster update params hosts roles items0 +// +// swagger:model ClusterUpdateParamsHostsRolesItems0 +type ClusterUpdateParamsHostsRolesItems0 struct { + + // id + // Format: uuid + ID strfmt.UUID `json:"id,omitempty"` + + // role + Role HostRoleUpdateParams `json:"role,omitempty"` } -const ( +// Validate validates this cluster update params hosts roles items0 +func (m *ClusterUpdateParamsHostsRolesItems0) Validate(formats strfmt.Registry) error { + var res []error - // ClusterUpdateParamsHostsRolesItems0RoleMaster captures enum value "master" - ClusterUpdateParamsHostsRolesItems0RoleMaster string = "master" + if err := m.validateID(formats); err != nil { + res = append(res, err) + } - // ClusterUpdateParamsHostsRolesItems0RoleWorker captures enum value "worker" - ClusterUpdateParamsHostsRolesItems0RoleWorker string = "worker" -) + if err := m.validateRole(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} -// prop value enum -func (m *ClusterUpdateParamsHostsRolesItems0) validateRoleEnum(path, location string, value string) error { - if err := validate.Enum(path, location, value, clusterUpdateParamsHostsRolesItems0TypeRolePropEnum); err != nil { +func (m *ClusterUpdateParamsHostsRolesItems0) validateID(formats strfmt.Registry) error { + + if swag.IsZero(m.ID) { // not required + return nil + } + + if err := validate.FormatOf("id", "body", "uuid", m.ID.String(), formats); err != nil { return err } + return nil } @@ -304,8 +342,10 @@ func (m *ClusterUpdateParamsHostsRolesItems0) validateRole(formats strfmt.Regist return nil } - // value enum - if err := m.validateRoleEnum("role", "body", m.Role); err != nil { + if err := m.Role.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("role") + } return err } diff --git a/models/completion_params.go b/models/completion_params.go new file mode 100644 index 000000000..678942d7b --- /dev/null +++ b/models/completion_params.go @@ -0,0 +1,67 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CompletionParams completion params +// +// swagger:model completion-params +type CompletionParams struct { + + // error info + ErrorInfo string `json:"error_info,omitempty"` + + // is success + // Required: true + IsSuccess *bool `json:"is_success"` +} + +// Validate validates this completion params +func (m *CompletionParams) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateIsSuccess(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CompletionParams) validateIsSuccess(formats strfmt.Registry) error { + + if err := validate.Required("is_success", "body", m.IsSuccess); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CompletionParams) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CompletionParams) UnmarshalBinary(b []byte) error { + var res CompletionParams + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/models/cpu_details.go b/models/cpu_details.go deleted file mode 100644 index 6d4660a6e..000000000 --- a/models/cpu_details.go +++ /dev/null @@ -1,58 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// CPUDetails cpu details -// -// swagger:model cpu_details -type CPUDetails struct { - - // architecture - Architecture string `json:"architecture,omitempty"` - - // cpu mhz - CPUMhz float64 `json:"cpu_mhz,omitempty"` - - // cpus - Cpus int64 `json:"cpus,omitempty"` - - // model name - ModelName string `json:"model_name,omitempty"` - - // sockets - Sockets int64 `json:"sockets,omitempty"` - - // threads per core - ThreadsPerCore int64 `json:"threads_per_core,omitempty"` -} - -// Validate validates this cpu details -func (m *CPUDetails) Validate(formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *CPUDetails) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *CPUDetails) UnmarshalBinary(b []byte) error { - var res CPUDetails - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/models/credentials.go b/models/credentials.go index 151496162..865646d51 100644 --- a/models/credentials.go +++ b/models/credentials.go @@ -15,6 +15,9 @@ import ( // swagger:model credentials type Credentials struct { + // console url + ConsoleURL string `json:"console_url,omitempty"` + // password Password string `json:"password,omitempty"` diff --git a/models/error.go b/models/error.go index f00fa7a9b..d77bfb8f4 100644 --- a/models/error.go +++ b/models/error.go @@ -128,7 +128,7 @@ const ( // prop value enum func (m *Error) validateKindEnum(path, location string, value string) error { - if err := validate.Enum(path, location, value, errorTypeKindPropEnum); err != nil { + if err := validate.EnumCase(path, location, value, errorTypeKindPropEnum, true); err != nil { return err } return nil diff --git a/models/event.go b/models/event.go index 4c61a1f0c..3fc3bf5ae 100644 --- a/models/event.go +++ b/models/event.go @@ -6,6 +6,8 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -25,15 +27,20 @@ type Event struct { // event time // Required: true // Format: date-time - EventTime *strfmt.DateTime `json:"event_time" gorm:"type:datetime"` + EventTime *strfmt.DateTime `json:"event_time" gorm:"type:timestamp with time zone"` // message // Required: true - Message *string `json:"message"` + Message *string `json:"message" gorm:"type:varchar(4096)"` // Unique identifier for the request that caused this event to occure // Format: uuid RequestID strfmt.UUID `json:"request_id,omitempty"` + + // severity + // Required: true + // Enum: [info warning error critical] + Severity *string `json:"severity"` } // Validate validates this event @@ -56,6 +63,10 @@ func (m *Event) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateSeverity(formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -110,6 +121,55 @@ func (m *Event) validateRequestID(formats strfmt.Registry) error { return nil } +var eventTypeSeverityPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["info","warning","error","critical"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + eventTypeSeverityPropEnum = append(eventTypeSeverityPropEnum, v) + } +} + +const ( + + // EventSeverityInfo captures enum value "info" + EventSeverityInfo string = "info" + + // EventSeverityWarning captures enum value "warning" + EventSeverityWarning string = "warning" + + // EventSeverityError captures enum value "error" + EventSeverityError string = "error" + + // EventSeverityCritical captures enum value "critical" + EventSeverityCritical string = "critical" +) + +// prop value enum +func (m *Event) validateSeverityEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, eventTypeSeverityPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *Event) validateSeverity(formats strfmt.Registry) error { + + if err := validate.Required("severity", "body", m.Severity); err != nil { + return err + } + + // value enum + if err := m.validateSeverityEnum("severity", "body", *m.Severity); err != nil { + return err + } + + return nil +} + // MarshalBinary interface implementation func (m *Event) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/models/free_addresses_list.go b/models/free_addresses_list.go new file mode 100644 index 000000000..f2ee2ba3f --- /dev/null +++ b/models/free_addresses_list.go @@ -0,0 +1,37 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// FreeAddressesList free addresses list +// +// swagger:model free-addresses-list +type FreeAddressesList []strfmt.IPv4 + +// Validate validates this free addresses list +func (m FreeAddressesList) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if err := validate.FormatOf(strconv.Itoa(i), "body", "ipv4", m[i].String(), formats); err != nil { + return err + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/models/free_addresses_request.go b/models/free_addresses_request.go new file mode 100644 index 000000000..bf5cd7c0b --- /dev/null +++ b/models/free_addresses_request.go @@ -0,0 +1,37 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// FreeAddressesRequest free addresses request +// +// swagger:model free_addresses_request +type FreeAddressesRequest []string + +// Validate validates this free addresses request +func (m FreeAddressesRequest) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if err := validate.Pattern(strconv.Itoa(i), "body", string(m[i]), `^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$`); err != nil { + return err + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/models/free_network_addresses.go b/models/free_network_addresses.go new file mode 100644 index 000000000..0171ad616 --- /dev/null +++ b/models/free_network_addresses.go @@ -0,0 +1,94 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// FreeNetworkAddresses free network addresses +// +// swagger:model free_network_addresses +type FreeNetworkAddresses struct { + + // free addresses + FreeAddresses []strfmt.IPv4 `json:"free_addresses"` + + // network + // Pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$ + Network string `json:"network,omitempty"` +} + +// Validate validates this free network addresses +func (m *FreeNetworkAddresses) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateFreeAddresses(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNetwork(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *FreeNetworkAddresses) validateFreeAddresses(formats strfmt.Registry) error { + + if swag.IsZero(m.FreeAddresses) { // not required + return nil + } + + for i := 0; i < len(m.FreeAddresses); i++ { + + if err := validate.FormatOf("free_addresses"+"."+strconv.Itoa(i), "body", "ipv4", m.FreeAddresses[i].String(), formats); err != nil { + return err + } + + } + + return nil +} + +func (m *FreeNetworkAddresses) validateNetwork(formats strfmt.Registry) error { + + if swag.IsZero(m.Network) { // not required + return nil + } + + if err := validate.Pattern("network", "body", string(m.Network), `^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$`); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *FreeNetworkAddresses) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *FreeNetworkAddresses) UnmarshalBinary(b []byte) error { + var res FreeNetworkAddresses + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/models/free_networks_addresses.go b/models/free_networks_addresses.go new file mode 100644 index 000000000..f493074ef --- /dev/null +++ b/models/free_networks_addresses.go @@ -0,0 +1,45 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// FreeNetworksAddresses free networks addresses +// +// swagger:model free_networks_addresses +type FreeNetworksAddresses []*FreeNetworkAddresses + +// Validate validates this free networks addresses +func (m FreeNetworksAddresses) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/models/host.go b/models/host.go index 43e581de4..fbbeaeda0 100644 --- a/models/host.go +++ b/models/host.go @@ -7,6 +7,7 @@ package models import ( "encoding/json" + "strconv" "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" @@ -22,6 +23,10 @@ type Host struct { // bootstrap Bootstrap bool `json:"bootstrap,omitempty"` + // The last time the host's agent communicated with the service. + // Format: date-time + CheckedInAt strfmt.DateTime `json:"checked_in_at,omitempty" gorm:"type:timestamp with time zone"` + // The cluster that this host is associated with. // Format: uuid ClusterID strfmt.UUID `json:"cluster_id,omitempty" gorm:"primary_key;foreignkey:Cluster"` @@ -31,10 +36,13 @@ type Host struct { // created at // Format: date-time - CreatedAt strfmt.DateTime `json:"created_at,omitempty" gorm:"type:datetime"` + CreatedAt strfmt.DateTime `json:"created_at,omitempty" gorm:"type:timestamp with time zone"` + + // discovery agent version + DiscoveryAgentVersion string `json:"discovery_agent_version,omitempty"` - // hardware info - HardwareInfo string `json:"hardware_info,omitempty" gorm:"type:text"` + // free addresses + FreeAddresses string `json:"free_addresses,omitempty" gorm:"type:text"` // Self link. // Required: true @@ -45,6 +53,9 @@ type Host struct { // Format: uuid ID *strfmt.UUID `json:"id" gorm:"primary_key"` + // Installer version + InstallerVersion string `json:"installer_version,omitempty"` + // inventory Inventory string `json:"inventory,omitempty" gorm:"type:text"` @@ -53,28 +64,55 @@ type Host struct { // Enum: [Host] Kind *string `json:"kind"` + // progress + Progress *HostProgressInfo `json:"progress,omitempty" gorm:"embedded;embedded_prefix:progress_"` + + // progress stages + ProgressStages []HostStage `json:"progress_stages" gorm:"-"` + + // requested hostname + RequestedHostname string `json:"requested_hostname,omitempty"` + // role - // Enum: [undefined master worker] - Role string `json:"role,omitempty"` + Role HostRole `json:"role,omitempty"` + + // Time at which the current progress stage started + // Format: date-time + StageStartedAt strfmt.DateTime `json:"stage_started_at,omitempty" gorm:"type:timestamp with time zone"` + + // Time at which the current progress stage was last updated + // Format: date-time + StageUpdatedAt strfmt.DateTime `json:"stage_updated_at,omitempty" gorm:"type:timestamp with time zone"` // status // Required: true - // Enum: [discovering known disconnected insufficient disabled installing installing-in-progress installed error] + // Enum: [discovering known disconnected insufficient disabled preparing-for-installation pending-for-input installing installing-in-progress installing-pending-user-action resetting-pending-user-action installed error resetting] Status *string `json:"status"` // status info // Required: true StatusInfo *string `json:"status_info" gorm:"type:varchar(2048)"` + // The last time that the host status has been updated + // Format: date-time + StatusUpdatedAt strfmt.DateTime `json:"status_updated_at,omitempty" gorm:"type:timestamp with time zone"` + // updated at // Format: date-time - UpdatedAt strfmt.DateTime `json:"updated_at,omitempty" gorm:"type:datetime"` + UpdatedAt strfmt.DateTime `json:"updated_at,omitempty" gorm:"type:timestamp with time zone"` + + // Json formatted string containing the validations results for each validation id grouped by category (network, hardware, etc.) + ValidationsInfo string `json:"validations_info,omitempty" gorm:"type:varchar(2048)"` } // Validate validates this host func (m *Host) Validate(formats strfmt.Registry) error { var res []error + if err := m.validateCheckedInAt(formats); err != nil { + res = append(res, err) + } + if err := m.validateClusterID(formats); err != nil { res = append(res, err) } @@ -95,10 +133,26 @@ func (m *Host) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateProgress(formats); err != nil { + res = append(res, err) + } + + if err := m.validateProgressStages(formats); err != nil { + res = append(res, err) + } + if err := m.validateRole(formats); err != nil { res = append(res, err) } + if err := m.validateStageStartedAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStageUpdatedAt(formats); err != nil { + res = append(res, err) + } + if err := m.validateStatus(formats); err != nil { res = append(res, err) } @@ -107,6 +161,10 @@ func (m *Host) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateStatusUpdatedAt(formats); err != nil { + res = append(res, err) + } + if err := m.validateUpdatedAt(formats); err != nil { res = append(res, err) } @@ -117,6 +175,19 @@ func (m *Host) Validate(formats strfmt.Registry) error { return nil } +func (m *Host) validateCheckedInAt(formats strfmt.Registry) error { + + if swag.IsZero(m.CheckedInAt) { // not required + return nil + } + + if err := validate.FormatOf("checked_in_at", "body", "date-time", m.CheckedInAt.String(), formats); err != nil { + return err + } + + return nil +} + func (m *Host) validateClusterID(formats strfmt.Registry) error { if swag.IsZero(m.ClusterID) { // not required @@ -185,7 +256,7 @@ const ( // prop value enum func (m *Host) validateKindEnum(path, location string, value string) error { - if err := validate.Enum(path, location, value, hostTypeKindPropEnum); err != nil { + if err := validate.EnumCase(path, location, value, hostTypeKindPropEnum, true); err != nil { return err } return nil @@ -205,35 +276,41 @@ func (m *Host) validateKind(formats strfmt.Registry) error { return nil } -var hostTypeRolePropEnum []interface{} +func (m *Host) validateProgress(formats strfmt.Registry) error { -func init() { - var res []string - if err := json.Unmarshal([]byte(`["undefined","master","worker"]`), &res); err != nil { - panic(err) + if swag.IsZero(m.Progress) { // not required + return nil } - for _, v := range res { - hostTypeRolePropEnum = append(hostTypeRolePropEnum, v) + + if m.Progress != nil { + if err := m.Progress.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("progress") + } + return err + } } + + return nil } -const ( +func (m *Host) validateProgressStages(formats strfmt.Registry) error { - // HostRoleUndefined captures enum value "undefined" - HostRoleUndefined string = "undefined" + if swag.IsZero(m.ProgressStages) { // not required + return nil + } - // HostRoleMaster captures enum value "master" - HostRoleMaster string = "master" + for i := 0; i < len(m.ProgressStages); i++ { - // HostRoleWorker captures enum value "worker" - HostRoleWorker string = "worker" -) + if err := m.ProgressStages[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("progress_stages" + "." + strconv.Itoa(i)) + } + return err + } -// prop value enum -func (m *Host) validateRoleEnum(path, location string, value string) error { - if err := validate.Enum(path, location, value, hostTypeRolePropEnum); err != nil { - return err } + return nil } @@ -243,8 +320,36 @@ func (m *Host) validateRole(formats strfmt.Registry) error { return nil } - // value enum - if err := m.validateRoleEnum("role", "body", m.Role); err != nil { + if err := m.Role.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("role") + } + return err + } + + return nil +} + +func (m *Host) validateStageStartedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.StageStartedAt) { // not required + return nil + } + + if err := validate.FormatOf("stage_started_at", "body", "date-time", m.StageStartedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Host) validateStageUpdatedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.StageUpdatedAt) { // not required + return nil + } + + if err := validate.FormatOf("stage_updated_at", "body", "date-time", m.StageUpdatedAt.String(), formats); err != nil { return err } @@ -255,7 +360,7 @@ var hostTypeStatusPropEnum []interface{} func init() { var res []string - if err := json.Unmarshal([]byte(`["discovering","known","disconnected","insufficient","disabled","installing","installing-in-progress","installed","error"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["discovering","known","disconnected","insufficient","disabled","preparing-for-installation","pending-for-input","installing","installing-in-progress","installing-pending-user-action","resetting-pending-user-action","installed","error","resetting"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -280,22 +385,37 @@ const ( // HostStatusDisabled captures enum value "disabled" HostStatusDisabled string = "disabled" + // HostStatusPreparingForInstallation captures enum value "preparing-for-installation" + HostStatusPreparingForInstallation string = "preparing-for-installation" + + // HostStatusPendingForInput captures enum value "pending-for-input" + HostStatusPendingForInput string = "pending-for-input" + // HostStatusInstalling captures enum value "installing" HostStatusInstalling string = "installing" // HostStatusInstallingInProgress captures enum value "installing-in-progress" HostStatusInstallingInProgress string = "installing-in-progress" + // HostStatusInstallingPendingUserAction captures enum value "installing-pending-user-action" + HostStatusInstallingPendingUserAction string = "installing-pending-user-action" + + // HostStatusResettingPendingUserAction captures enum value "resetting-pending-user-action" + HostStatusResettingPendingUserAction string = "resetting-pending-user-action" + // HostStatusInstalled captures enum value "installed" HostStatusInstalled string = "installed" // HostStatusError captures enum value "error" HostStatusError string = "error" + + // HostStatusResetting captures enum value "resetting" + HostStatusResetting string = "resetting" ) // prop value enum func (m *Host) validateStatusEnum(path, location string, value string) error { - if err := validate.Enum(path, location, value, hostTypeStatusPropEnum); err != nil { + if err := validate.EnumCase(path, location, value, hostTypeStatusPropEnum, true); err != nil { return err } return nil @@ -324,6 +444,19 @@ func (m *Host) validateStatusInfo(formats strfmt.Registry) error { return nil } +func (m *Host) validateStatusUpdatedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.StatusUpdatedAt) { // not required + return nil + } + + if err := validate.FormatOf("status_updated_at", "body", "date-time", m.StatusUpdatedAt.String(), formats); err != nil { + return err + } + + return nil +} + func (m *Host) validateUpdatedAt(formats strfmt.Registry) error { if swag.IsZero(m.UpdatedAt) { // not required diff --git a/models/host_create_params.go b/models/host_create_params.go index d5c131c52..e3f4c1fdf 100644 --- a/models/host_create_params.go +++ b/models/host_create_params.go @@ -17,6 +17,9 @@ import ( // swagger:model host-create-params type HostCreateParams struct { + // discovery agent version + DiscoveryAgentVersion string `json:"discovery_agent_version,omitempty"` + // host id // Required: true // Format: uuid diff --git a/models/host_install_progress_params.go b/models/host_install_progress_params.go deleted file mode 100644 index 19e759695..000000000 --- a/models/host_install_progress_params.go +++ /dev/null @@ -1,20 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "github.com/go-openapi/strfmt" -) - -// HostInstallProgressParams host install progress params -// -// swagger:model host-install-progress-params -type HostInstallProgressParams string - -// Validate validates this host install progress params -func (m HostInstallProgressParams) Validate(formats strfmt.Registry) error { - return nil -} diff --git a/models/host_network.go b/models/host_network.go new file mode 100644 index 000000000..29c39ede8 --- /dev/null +++ b/models/host_network.go @@ -0,0 +1,76 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// HostNetwork host network +// +// swagger:model host_network +type HostNetwork struct { + + // cidr + Cidr string `json:"cidr,omitempty"` + + // host ids + HostIds []strfmt.UUID `json:"host_ids"` +} + +// Validate validates this host network +func (m *HostNetwork) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHostIds(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HostNetwork) validateHostIds(formats strfmt.Registry) error { + + if swag.IsZero(m.HostIds) { // not required + return nil + } + + for i := 0; i < len(m.HostIds); i++ { + + if err := validate.FormatOf("host_ids"+"."+strconv.Itoa(i), "body", "uuid", m.HostIds[i].String(), formats); err != nil { + return err + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HostNetwork) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HostNetwork) UnmarshalBinary(b []byte) error { + var res HostNetwork + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/models/host_progress.go b/models/host_progress.go new file mode 100644 index 000000000..4bddd7dbd --- /dev/null +++ b/models/host_progress.go @@ -0,0 +1,69 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// HostProgress host progress +// +// swagger:model host-progress +type HostProgress struct { + + // current stage + // Required: true + CurrentStage HostStage `json:"current_stage"` + + // progress info + ProgressInfo string `json:"progress_info,omitempty" gorm:"type:varchar(2048)"` +} + +// Validate validates this host progress +func (m *HostProgress) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCurrentStage(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HostProgress) validateCurrentStage(formats strfmt.Registry) error { + + if err := m.CurrentStage.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("current_stage") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HostProgress) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HostProgress) UnmarshalBinary(b []byte) error { + var res HostProgress + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/models/host_progress_info.go b/models/host_progress_info.go new file mode 100644 index 000000000..c1d119d32 --- /dev/null +++ b/models/host_progress_info.go @@ -0,0 +1,112 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// HostProgressInfo host progress info +// +// swagger:model host-progress-info +type HostProgressInfo struct { + + // current stage + // Required: true + CurrentStage HostStage `json:"current_stage"` + + // progress info + ProgressInfo string `json:"progress_info,omitempty" gorm:"type:varchar(2048)"` + + // Time at which the current progress stage started + // Format: date-time + StageStartedAt strfmt.DateTime `json:"stage_started_at,omitempty" gorm:"type:timestamp with time zone"` + + // Time at which the current progress stage was last updated + // Format: date-time + StageUpdatedAt strfmt.DateTime `json:"stage_updated_at,omitempty" gorm:"type:timestamp with time zone"` +} + +// Validate validates this host progress info +func (m *HostProgressInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCurrentStage(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStageStartedAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStageUpdatedAt(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *HostProgressInfo) validateCurrentStage(formats strfmt.Registry) error { + + if err := m.CurrentStage.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("current_stage") + } + return err + } + + return nil +} + +func (m *HostProgressInfo) validateStageStartedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.StageStartedAt) { // not required + return nil + } + + if err := validate.FormatOf("stage_started_at", "body", "date-time", m.StageStartedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *HostProgressInfo) validateStageUpdatedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.StageUpdatedAt) { // not required + return nil + } + + if err := validate.FormatOf("stage_updated_at", "body", "date-time", m.StageUpdatedAt.String(), formats); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *HostProgressInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HostProgressInfo) UnmarshalBinary(b []byte) error { + var res HostProgressInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/models/host_role.go b/models/host_role.go new file mode 100644 index 000000000..d5304e24f --- /dev/null +++ b/models/host_role.go @@ -0,0 +1,66 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// HostRole host role +// +// swagger:model host-role +type HostRole string + +const ( + + // HostRoleMaster captures enum value "master" + HostRoleMaster HostRole = "master" + + // HostRoleWorker captures enum value "worker" + HostRoleWorker HostRole = "worker" + + // HostRoleBootstrap captures enum value "bootstrap" + HostRoleBootstrap HostRole = "bootstrap" +) + +// for schema +var hostRoleEnum []interface{} + +func init() { + var res []HostRole + if err := json.Unmarshal([]byte(`["master","worker","bootstrap"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + hostRoleEnum = append(hostRoleEnum, v) + } +} + +func (m HostRole) validateHostRoleEnum(path, location string, value HostRole) error { + if err := validate.EnumCase(path, location, value, hostRoleEnum, true); err != nil { + return err + } + return nil +} + +// Validate validates this host role +func (m HostRole) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validateHostRoleEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/models/host_role_update_params.go b/models/host_role_update_params.go new file mode 100644 index 000000000..c9621965f --- /dev/null +++ b/models/host_role_update_params.go @@ -0,0 +1,63 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// HostRoleUpdateParams host role update params +// +// swagger:model host-role-update-params +type HostRoleUpdateParams string + +const ( + + // HostRoleUpdateParamsMaster captures enum value "master" + HostRoleUpdateParamsMaster HostRoleUpdateParams = "master" + + // HostRoleUpdateParamsWorker captures enum value "worker" + HostRoleUpdateParamsWorker HostRoleUpdateParams = "worker" +) + +// for schema +var hostRoleUpdateParamsEnum []interface{} + +func init() { + var res []HostRoleUpdateParams + if err := json.Unmarshal([]byte(`["master","worker"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + hostRoleUpdateParamsEnum = append(hostRoleUpdateParamsEnum, v) + } +} + +func (m HostRoleUpdateParams) validateHostRoleUpdateParamsEnum(path, location string, value HostRoleUpdateParams) error { + if err := validate.EnumCase(path, location, value, hostRoleUpdateParamsEnum, true); err != nil { + return err + } + return nil +} + +// Validate validates this host role update params +func (m HostRoleUpdateParams) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validateHostRoleUpdateParamsEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/models/host_stage.go b/models/host_stage.go new file mode 100644 index 000000000..4daedbe3f --- /dev/null +++ b/models/host_stage.go @@ -0,0 +1,90 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// HostStage host stage +// +// swagger:model host-stage +type HostStage string + +const ( + + // HostStageStartingInstallation captures enum value "Starting installation" + HostStageStartingInstallation HostStage = "Starting installation" + + // HostStageWaitingForControlPlane captures enum value "Waiting for control plane" + HostStageWaitingForControlPlane HostStage = "Waiting for control plane" + + // HostStageStartWaitingForControlPlane captures enum value "Start Waiting for control plane" + HostStageStartWaitingForControlPlane HostStage = "Start Waiting for control plane" + + // HostStageInstalling captures enum value "Installing" + HostStageInstalling HostStage = "Installing" + + // HostStageWritingImageToDisk captures enum value "Writing image to disk" + HostStageWritingImageToDisk HostStage = "Writing image to disk" + + // HostStageRebooting captures enum value "Rebooting" + HostStageRebooting HostStage = "Rebooting" + + // HostStageWaitingForIgnition captures enum value "Waiting for ignition" + HostStageWaitingForIgnition HostStage = "Waiting for ignition" + + // HostStageConfiguring captures enum value "Configuring" + HostStageConfiguring HostStage = "Configuring" + + // HostStageJoined captures enum value "Joined" + HostStageJoined HostStage = "Joined" + + // HostStageDone captures enum value "Done" + HostStageDone HostStage = "Done" + + // HostStageFailed captures enum value "Failed" + HostStageFailed HostStage = "Failed" +) + +// for schema +var hostStageEnum []interface{} + +func init() { + var res []HostStage + if err := json.Unmarshal([]byte(`["Starting installation","Waiting for control plane","Start Waiting for control plane","Installing","Writing image to disk","Rebooting","Waiting for ignition","Configuring","Joined","Done","Failed"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + hostStageEnum = append(hostStageEnum, v) + } +} + +func (m HostStage) validateHostStageEnum(path, location string, value HostStage) error { + if err := validate.EnumCase(path, location, value, hostStageEnum, true); err != nil { + return err + } + return nil +} + +// Validate validates this host stage +func (m HostStage) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validateHostStageEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/models/host_validation_id.go b/models/host_validation_id.go new file mode 100644 index 000000000..6841c23ac --- /dev/null +++ b/models/host_validation_id.go @@ -0,0 +1,93 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// HostValidationID host validation id +// +// swagger:model host-validation-id +type HostValidationID string + +const ( + + // HostValidationIDConnected captures enum value "connected" + HostValidationIDConnected HostValidationID = "connected" + + // HostValidationIDHasInventory captures enum value "has-inventory" + HostValidationIDHasInventory HostValidationID = "has-inventory" + + // HostValidationIDHasMinCPUCores captures enum value "has-min-cpu-cores" + HostValidationIDHasMinCPUCores HostValidationID = "has-min-cpu-cores" + + // HostValidationIDHasMinValidDisks captures enum value "has-min-valid-disks" + HostValidationIDHasMinValidDisks HostValidationID = "has-min-valid-disks" + + // HostValidationIDHasMinMemory captures enum value "has-min-memory" + HostValidationIDHasMinMemory HostValidationID = "has-min-memory" + + // HostValidationIDMachineCidrDefined captures enum value "machine-cidr-defined" + HostValidationIDMachineCidrDefined HostValidationID = "machine-cidr-defined" + + // HostValidationIDRoleDefined captures enum value "role-defined" + HostValidationIDRoleDefined HostValidationID = "role-defined" + + // HostValidationIDHasCPUCoresForRole captures enum value "has-cpu-cores-for-role" + HostValidationIDHasCPUCoresForRole HostValidationID = "has-cpu-cores-for-role" + + // HostValidationIDHasMemoryForRole captures enum value "has-memory-for-role" + HostValidationIDHasMemoryForRole HostValidationID = "has-memory-for-role" + + // HostValidationIDHostnameUnique captures enum value "hostname-unique" + HostValidationIDHostnameUnique HostValidationID = "hostname-unique" + + // HostValidationIDHostnameValid captures enum value "hostname-valid" + HostValidationIDHostnameValid HostValidationID = "hostname-valid" + + // HostValidationIDBelongsToMachineCidr captures enum value "belongs-to-machine-cidr" + HostValidationIDBelongsToMachineCidr HostValidationID = "belongs-to-machine-cidr" +) + +// for schema +var hostValidationIdEnum []interface{} + +func init() { + var res []HostValidationID + if err := json.Unmarshal([]byte(`["connected","has-inventory","has-min-cpu-cores","has-min-valid-disks","has-min-memory","machine-cidr-defined","role-defined","has-cpu-cores-for-role","has-memory-for-role","hostname-unique","hostname-valid","belongs-to-machine-cidr"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + hostValidationIdEnum = append(hostValidationIdEnum, v) + } +} + +func (m HostValidationID) validateHostValidationIDEnum(path, location string, value HostValidationID) error { + if err := validate.EnumCase(path, location, value, hostValidationIdEnum, true); err != nil { + return err + } + return nil +} + +// Validate validates this host validation id +func (m HostValidationID) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validateHostValidationIDEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/models/image_info.go b/models/image_info.go index aefd34b0a..16cd66b7d 100644 --- a/models/image_info.go +++ b/models/image_info.go @@ -19,7 +19,10 @@ type ImageInfo struct { // created at // Format: date-time - CreatedAt strfmt.DateTime `json:"created_at,omitempty" gorm:"type:datetime"` + CreatedAt strfmt.DateTime `json:"created_at,omitempty" gorm:"type:timestamp with time zone"` + + // Image generator version + GeneratorVersion string `json:"generator_version,omitempty"` // The URL of the HTTP/S proxy that agents should use to access the discovery service // http://\:\@\:\/ diff --git a/models/ingress_cert_params.go b/models/ingress_cert_params.go new file mode 100644 index 000000000..3d1dc8a6a --- /dev/null +++ b/models/ingress_cert_params.go @@ -0,0 +1,20 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" +) + +// IngressCertParams ingress cert params +// +// swagger:model ingress-cert-params +type IngressCertParams string + +// Validate validates this ingress cert params +func (m IngressCertParams) Validate(formats strfmt.Registry) error { + return nil +} diff --git a/models/introspection.go b/models/introspection.go deleted file mode 100644 index 53ffb278e..000000000 --- a/models/introspection.go +++ /dev/null @@ -1,169 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// Introspection introspection -// -// swagger:model introspection -type Introspection struct { - - // block devices - BlockDevices []*BlockDevice `json:"block_devices"` - - // cpu - CPU *CPUDetails `json:"cpu,omitempty"` - - // memory - Memory []*MemoryDetails `json:"memory"` - - // nics - Nics []*Nic `json:"nics"` -} - -// Validate validates this introspection -func (m *Introspection) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateBlockDevices(formats); err != nil { - res = append(res, err) - } - - if err := m.validateCPU(formats); err != nil { - res = append(res, err) - } - - if err := m.validateMemory(formats); err != nil { - res = append(res, err) - } - - if err := m.validateNics(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Introspection) validateBlockDevices(formats strfmt.Registry) error { - - if swag.IsZero(m.BlockDevices) { // not required - return nil - } - - for i := 0; i < len(m.BlockDevices); i++ { - if swag.IsZero(m.BlockDevices[i]) { // not required - continue - } - - if m.BlockDevices[i] != nil { - if err := m.BlockDevices[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("block_devices" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -func (m *Introspection) validateCPU(formats strfmt.Registry) error { - - if swag.IsZero(m.CPU) { // not required - return nil - } - - if m.CPU != nil { - if err := m.CPU.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("cpu") - } - return err - } - } - - return nil -} - -func (m *Introspection) validateMemory(formats strfmt.Registry) error { - - if swag.IsZero(m.Memory) { // not required - return nil - } - - for i := 0; i < len(m.Memory); i++ { - if swag.IsZero(m.Memory[i]) { // not required - continue - } - - if m.Memory[i] != nil { - if err := m.Memory[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("memory" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -func (m *Introspection) validateNics(formats strfmt.Registry) error { - - if swag.IsZero(m.Nics) { // not required - return nil - } - - for i := 0; i < len(m.Nics); i++ { - if swag.IsZero(m.Nics[i]) { // not required - continue - } - - if m.Nics[i] != nil { - if err := m.Nics[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("nics" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *Introspection) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Introspection) UnmarshalBinary(b []byte) error { - var res Introspection - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/models/list_managed_domains.go b/models/list_managed_domains.go new file mode 100644 index 000000000..05a52d0e2 --- /dev/null +++ b/models/list_managed_domains.go @@ -0,0 +1,45 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ListManagedDomains list managed domains +// +// swagger:model list-managed-domains +type ListManagedDomains []*ManagedDomain + +// Validate validates this list managed domains +func (m ListManagedDomains) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/models/list_versions.go b/models/list_versions.go new file mode 100644 index 000000000..94f357cd0 --- /dev/null +++ b/models/list_versions.go @@ -0,0 +1,72 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ListVersions list versions +// +// swagger:model list-versions +type ListVersions struct { + + // release tag + ReleaseTag string `json:"release_tag,omitempty"` + + // versions + Versions Versions `json:"versions,omitempty"` +} + +// Validate validates this list versions +func (m *ListVersions) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateVersions(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ListVersions) validateVersions(formats strfmt.Registry) error { + + if swag.IsZero(m.Versions) { // not required + return nil + } + + if err := m.Versions.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("versions") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ListVersions) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ListVersions) UnmarshalBinary(b []byte) error { + var res ListVersions + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/models/managed_domain.go b/models/managed_domain.go new file mode 100644 index 000000000..4c051ee7a --- /dev/null +++ b/models/managed_domain.go @@ -0,0 +1,100 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ManagedDomain managed domain +// +// swagger:model managed-domain +type ManagedDomain struct { + + // domain + Domain string `json:"domain,omitempty"` + + // provider + // Enum: [route53] + Provider string `json:"provider,omitempty"` +} + +// Validate validates this managed domain +func (m *ManagedDomain) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateProvider(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var managedDomainTypeProviderPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["route53"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + managedDomainTypeProviderPropEnum = append(managedDomainTypeProviderPropEnum, v) + } +} + +const ( + + // ManagedDomainProviderRoute53 captures enum value "route53" + ManagedDomainProviderRoute53 string = "route53" +) + +// prop value enum +func (m *ManagedDomain) validateProviderEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, managedDomainTypeProviderPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *ManagedDomain) validateProvider(formats strfmt.Registry) error { + + if swag.IsZero(m.Provider) { // not required + return nil + } + + // value enum + if err := m.validateProviderEnum("provider", "body", m.Provider); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ManagedDomain) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ManagedDomain) UnmarshalBinary(b []byte) error { + var res ManagedDomain + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/models/memory_details.go b/models/memory_details.go deleted file mode 100644 index f39bae546..000000000 --- a/models/memory_details.go +++ /dev/null @@ -1,61 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// MemoryDetails memory details -// -// swagger:model memory_details -type MemoryDetails struct { - - // available - Available int64 `json:"available,omitempty"` - - // buff cached - BuffCached int64 `json:"buff_cached,omitempty"` - - // free - Free int64 `json:"free,omitempty"` - - // name - Name string `json:"name,omitempty"` - - // shared - Shared int64 `json:"shared,omitempty"` - - // total - Total int64 `json:"total,omitempty"` - - // used - Used int64 `json:"used,omitempty"` -} - -// Validate validates this memory details -func (m *MemoryDetails) Validate(formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *MemoryDetails) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *MemoryDetails) UnmarshalBinary(b []byte) error { - var res MemoryDetails - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/models/nic.go b/models/nic.go deleted file mode 100644 index 899b32ae9..000000000 --- a/models/nic.go +++ /dev/null @@ -1,92 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// Nic nic -// -// swagger:model nic -type Nic struct { - - // cidrs - Cidrs []*Cidr `json:"cidrs"` - - // mac - Mac string `json:"mac,omitempty"` - - // mtu - Mtu int64 `json:"mtu,omitempty"` - - // name - Name string `json:"name,omitempty"` - - // state - State string `json:"state,omitempty"` -} - -// Validate validates this nic -func (m *Nic) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateCidrs(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Nic) validateCidrs(formats strfmt.Registry) error { - - if swag.IsZero(m.Cidrs) { // not required - return nil - } - - for i := 0; i < len(m.Cidrs); i++ { - if swag.IsZero(m.Cidrs[i]) { // not required - continue - } - - if m.Cidrs[i] != nil { - if err := m.Cidrs[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("cidrs" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *Nic) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Nic) UnmarshalBinary(b []byte) error { - var res Nic - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/models/step_reply.go b/models/step_reply.go index d1ea315ce..1e364a754 100644 --- a/models/step_reply.go +++ b/models/step_reply.go @@ -6,6 +6,7 @@ package models // Editing this file might prove futile when you re-run the swagger generate command import ( + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" ) @@ -26,10 +27,38 @@ type StepReply struct { // step id StepID string `json:"step_id,omitempty"` + + // step type + StepType StepType `json:"step_type,omitempty"` } // Validate validates this step reply func (m *StepReply) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateStepType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StepReply) validateStepType(formats strfmt.Registry) error { + + if swag.IsZero(m.StepType) { // not required + return nil + } + + if err := m.StepType.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("step_type") + } + return err + } + return nil } diff --git a/models/step_type.go b/models/step_type.go index 10e3588a7..ccfcbc33a 100644 --- a/models/step_type.go +++ b/models/step_type.go @@ -20,9 +20,6 @@ type StepType string const ( - // StepTypeHardwareInfo captures enum value "hardware-info" - StepTypeHardwareInfo StepType = "hardware-info" - // StepTypeConnectivityCheck captures enum value "connectivity-check" StepTypeConnectivityCheck StepType = "connectivity-check" @@ -31,6 +28,15 @@ const ( // StepTypeInventory captures enum value "inventory" StepTypeInventory StepType = "inventory" + + // StepTypeInstall captures enum value "install" + StepTypeInstall StepType = "install" + + // StepTypeFreeNetworkAddresses captures enum value "free-network-addresses" + StepTypeFreeNetworkAddresses StepType = "free-network-addresses" + + // StepTypeResetInstallation captures enum value "reset-installation" + StepTypeResetInstallation StepType = "reset-installation" ) // for schema @@ -38,7 +44,7 @@ var stepTypeEnum []interface{} func init() { var res []StepType - if err := json.Unmarshal([]byte(`["hardware-info","connectivity-check","execute","inventory"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["connectivity-check","execute","inventory","install","free-network-addresses","reset-installation"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -47,7 +53,7 @@ func init() { } func (m StepType) validateStepTypeEnum(path, location string, value StepType) error { - if err := validate.Enum(path, location, value, stepTypeEnum); err != nil { + if err := validate.EnumCase(path, location, value, stepTypeEnum, true); err != nil { return err } return nil diff --git a/models/steps.go b/models/steps.go index b8a7a0c22..6876b72f7 100644 --- a/models/steps.go +++ b/models/steps.go @@ -16,21 +16,44 @@ import ( // Steps steps // // swagger:model steps -type Steps []*Step +type Steps struct { + + // instructions + Instructions []*Step `json:"instructions"` + + // next instruction seconds + NextInstructionSeconds int64 `json:"next_instruction_seconds,omitempty"` +} // Validate validates this steps -func (m Steps) Validate(formats strfmt.Registry) error { +func (m *Steps) Validate(formats strfmt.Registry) error { var res []error - for i := 0; i < len(m); i++ { - if swag.IsZero(m[i]) { // not required + if err := m.validateInstructions(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Steps) validateInstructions(formats strfmt.Registry) error { + + if swag.IsZero(m.Instructions) { // not required + return nil + } + + for i := 0; i < len(m.Instructions); i++ { + if swag.IsZero(m.Instructions[i]) { // not required continue } - if m[i] != nil { - if err := m[i].Validate(formats); err != nil { + if m.Instructions[i] != nil { + if err := m.Instructions[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName(strconv.Itoa(i)) + return ve.ValidateName("instructions" + "." + strconv.Itoa(i)) } return err } @@ -38,8 +61,23 @@ func (m Steps) Validate(formats strfmt.Registry) error { } - if len(res) > 0 { - return errors.CompositeValidationError(res...) + return nil +} + +// MarshalBinary interface implementation +func (m *Steps) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Steps) UnmarshalBinary(b []byte) error { + var res Steps + if err := swag.ReadJSON(b, &res); err != nil { + return err } + *m = res return nil } diff --git a/models/versions.go b/models/versions.go new file mode 100644 index 000000000..549e543c3 --- /dev/null +++ b/models/versions.go @@ -0,0 +1,20 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" +) + +// Versions versions +// +// swagger:model versions +type Versions map[string]string + +// Validate validates this versions +func (m Versions) Validate(formats strfmt.Registry) error { + return nil +} diff --git a/openshift/template.yaml b/openshift/template.yaml new file mode 100644 index 000000000..3291499aa --- /dev/null +++ b/openshift/template.yaml @@ -0,0 +1,119 @@ +--- +parameters: +- name: BM_INVENTORY_IMAGE + value: quay.io/app-sre/bm-inventory +- name: IMAGE_TAG + value: '' + required: true +- name: OBJ_EXPIRER_IMAGE + value: quay.io/ocpmetal/s3-object-expirer +- name: OBJ_EXPIRER_IMAGE_TAG + value: latest +apiVersion: v1 +kind: Template +metadata: + name: assisted-installer +objects: +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: assisted-service + namespace: assisted-installer + spec: + selector: + matchLabels: + app: assisted-service + replicas: 1 + template: + metadata: + labels: + app: assisted-service + spec: + serviceAccountName: assisted-service + containers: + - name: assisted-service + image: ${BM_INVENTORY_IMAGE}:${IMAGE_TAG} + imagePullPolicy: Always + resources: + limits: + cpu: 200m + memory: 500Mi + requests: + cpu: 100m + memory: 400Mi + ports: + - name: assisted-svc + containerPort: 8090 + env: + - name: ROUTE53_CREDS + valueFrom: + secretKeyRef: + key: creds + name: route53-creds + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: aws_secret_access_key + name: assisted-installer-s3 + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: aws_access_key_id + name: assisted-installer-s3 + - name: S3_REGION + valueFrom: + secretKeyRef: + key: aws_region + name: assisted-installer-s3 + - name: S3_BUCKET + valueFrom: + secretKeyRef: + key: bucket + name: assisted-installer-s3 + - name: S3_ENDPOINT_URL + valueFrom: + secretKeyRef: + key: endpoint + name: assisted-installer-s3 + - name: DB_HOST + valueFrom: + secretKeyRef: + key: db.host + name: assisted-installer-rds + - name: DB_NAME + valueFrom: + secretKeyRef: + key: db.name + name: assisted-installer-rds + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + key: db.password + name: assisted-installer-rds + - name: DB_PORT + valueFrom: + secretKeyRef: + key: db.port + name: assisted-installer-rds + - name: DB_USER + valueFrom: + secretKeyRef: + key: db.user + name: assisted-installer-rds + - name: IMAGE_BUILDER_CMD + value: "" +- apiVersion: v1 + kind: Service + metadata: + labels: + app: assisted-service + name: assisted-service + namespace: assisted-installer + spec: + ports: + - name: assisted-svc + port: 8090 + protocol: TCP + targetPort: 8090 + selector: + app: assisted-service diff --git a/pkg/app/middleware.go b/pkg/app/middleware.go new file mode 100644 index 000000000..de915398c --- /dev/null +++ b/pkg/app/middleware.go @@ -0,0 +1,30 @@ +package app + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// WithMetricsResponderMiddleware Returns middleware which responds to /metrics endpoint with the prometheus metrics +// of the service +func WithMetricsResponderMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodGet && r.URL.Path == "/metrics" { + promhttp.Handler().ServeHTTP(w, r) + return + } + next.ServeHTTP(w, r) + }) +} + +// WithHealthMiddleware returns middleware which responds to the /health endpoint +func WithHealthMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodGet && r.URL.Path == "/health" { + w.WriteHeader(http.StatusOK) + return + } + next.ServeHTTP(w, r) + }) +} diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go new file mode 100644 index 000000000..6b034671c --- /dev/null +++ b/pkg/auth/auth.go @@ -0,0 +1,65 @@ +package auth + +import ( + "context" + "net/http" +) + +type contextKey string + +const contextUserIDKey = contextKey("user_id") +const contextOrgIDKey = contextKey("org_id") +const contextRoleKey = contextKey("role") +const AdminUserRole = "admin" +const DefaultUserID = "0000000" +const DefaultOrgID = "0000000" + +// Fake auth Middleware handler to add username from headers to request context +func GetUserInfoMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Update the context, as the jwt middleware will update it + ctx := r.Context() + ctx = UserIDToContext(ctx, DefaultUserID) + ctx = OrgIDToContext(ctx, DefaultOrgID) + ctx = UserRoleToContext(ctx, AdminUserRole) + *r = *r.WithContext(ctx) + + next.ServeHTTP(w, r) + }) +} + +func UserIDFromContext(ctx context.Context) string { + userID := ctx.Value(contextUserIDKey) + if userID == nil { + userID = "" + } + return userID.(string) +} + +func UserIDToContext(ctx context.Context, userID string) context.Context { + return context.WithValue(ctx, contextUserIDKey, userID) +} + +func OrgIDFromContext(ctx context.Context) string { + orgID := ctx.Value(contextOrgIDKey) + if orgID == nil { + orgID = "" + } + return orgID.(string) +} + +func OrgIDToContext(ctx context.Context, orgID string) context.Context { + return context.WithValue(ctx, contextOrgIDKey, orgID) +} + +func UserRoleFromContext(ctx context.Context) string { + role := ctx.Value(contextRoleKey) + if role == nil { + role = "" + } + return role.(string) +} + +func UserRoleToContext(ctx context.Context, roleID string) context.Context { + return context.WithValue(ctx, contextRoleKey, roleID) +} diff --git a/pkg/db/db.go b/pkg/db/db.go new file mode 100644 index 000000000..6b55779c5 --- /dev/null +++ b/pkg/db/db.go @@ -0,0 +1,9 @@ +package db + +type Config struct { + Host string `envconfig:"DB_HOST"` + Port string `envconfig:"DB_PORT"` + User string `envconfig:"DB_USER"` + Pass string `envconfig:"DB_PASS"` + Name string `envconfig:"DB_NAME"` +} diff --git a/pkg/externalmocks/mock_kube_client.go b/pkg/externalmocks/mock_kube_client.go index 0a11e6dc3..e622486c9 100644 --- a/pkg/externalmocks/mock_kube_client.go +++ b/pkg/externalmocks/mock_kube_client.go @@ -6,10 +6,11 @@ package externalmocks import ( context "context" + reflect "reflect" + gomock "github.com/golang/mock/gomock" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" - reflect "reflect" client "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/pkg/filemiddleware/middleware.go b/pkg/filemiddleware/middleware.go index 99b6028c6..3a437c3b3 100644 --- a/pkg/filemiddleware/middleware.go +++ b/pkg/filemiddleware/middleware.go @@ -3,24 +3,30 @@ package filemiddleware import ( "fmt" "net/http" + "strconv" "github.com/go-openapi/runtime" "github.com/go-openapi/runtime/middleware" ) -func NewResponder(next middleware.Responder, fname string) middleware.Responder { +func NewResponder(next middleware.Responder, fname string, length int64) middleware.Responder { return &fileMiddlewareResponder{ next: next, fileName: fname, + length: length, } } type fileMiddlewareResponder struct { next middleware.Responder fileName string + length int64 } func (f *fileMiddlewareResponder) WriteResponse(rw http.ResponseWriter, r runtime.Producer) { rw.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", f.fileName)) + if f.length != 0 { + rw.Header().Set("Content-Length", strconv.FormatInt(f.length, 10)) + } f.next.WriteResponse(rw, r) } diff --git a/pkg/job/mock_job.go b/pkg/job/mock_job.go index c9cfddff2..b752a8def 100644 --- a/pkg/job/mock_job.go +++ b/pkg/job/mock_job.go @@ -12,30 +12,30 @@ import ( client "sigs.k8s.io/controller-runtime/pkg/client" ) -// MockAPI is a mock of API interface. +// MockAPI is a mock of API interface type MockAPI struct { ctrl *gomock.Controller recorder *MockAPIMockRecorder } -// MockAPIMockRecorder is the mock recorder for MockAPI. +// MockAPIMockRecorder is the mock recorder for MockAPI type MockAPIMockRecorder struct { mock *MockAPI } -// NewMockAPI creates a new mock instance. +// NewMockAPI creates a new mock instance func NewMockAPI(ctrl *gomock.Controller) *MockAPI { mock := &MockAPI{ctrl: ctrl} mock.recorder = &MockAPIMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use. +// EXPECT returns an object that allows the caller to indicate expected use func (m *MockAPI) EXPECT() *MockAPIMockRecorder { return m.recorder } -// Create mocks base method. +// Create mocks base method func (m *MockAPI) Create(ctx context.Context, obj runtime.Object, opts ...client.CreateOption) error { m.ctrl.T.Helper() varargs := []interface{}{ctx, obj} @@ -47,14 +47,14 @@ func (m *MockAPI) Create(ctx context.Context, obj runtime.Object, opts ...client return ret0 } -// Create indicates an expected call of Create. +// Create indicates an expected call of Create func (mr *MockAPIMockRecorder) Create(ctx, obj interface{}, opts ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]interface{}{ctx, obj}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockAPI)(nil).Create), varargs...) } -// Monitor mocks base method. +// Monitor mocks base method func (m *MockAPI) Monitor(ctx context.Context, name, namespace string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Monitor", ctx, name, namespace) @@ -62,13 +62,13 @@ func (m *MockAPI) Monitor(ctx context.Context, name, namespace string) error { return ret0 } -// Monitor indicates an expected call of Monitor. +// Monitor indicates an expected call of Monitor func (mr *MockAPIMockRecorder) Monitor(ctx, name, namespace interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Monitor", reflect.TypeOf((*MockAPI)(nil).Monitor), ctx, name, namespace) } -// Delete mocks base method. +// Delete mocks base method func (m *MockAPI) Delete(ctx context.Context, name, namespace string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Delete", ctx, name, namespace) @@ -76,8 +76,8 @@ func (m *MockAPI) Delete(ctx context.Context, name, namespace string) error { return ret0 } -// Delete indicates an expected call of Monitor. +// Delete indicates an expected call of Delete func (mr *MockAPIMockRecorder) Delete(ctx, name, namespace interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockAPI)(nil).Monitor), ctx, name, namespace) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockAPI)(nil).Delete), ctx, name, namespace) } diff --git a/pkg/requestid/requestid_test.go b/pkg/requestid/requestid_test.go index cdabf8fa2..1bc869ae7 100644 --- a/pkg/requestid/requestid_test.go +++ b/pkg/requestid/requestid_test.go @@ -6,6 +6,10 @@ import ( "net/http/httptest" "testing" + "github.com/go-openapi/swag" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" ) @@ -65,3 +69,60 @@ func TestTransport(t *testing.T) { }) } } + +func TestMiddleware(t *testing.T) { + t.Parallel() + tests := []struct { + name string + requestID *string + }{ + { + name: "RequestID exist", + requestID: swag.String("1234"), + }, + { + name: "no request in context", + requestID: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // create a handler to use as "next" which will verify the request + nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + val := r.Context().Value(ctxKey) + assert.NotNil(t, val) + valStr, ok := val.(string) + if !ok { + t.Error("not string") + } + + if tt.requestID != nil { + // if request-id passed in header, that should be in the context + assert.Equal(t, valStr, *tt.requestID) + } else { + // if no request-id passed in header, valid uuid should be generated by middleware + assert.True(t, IsValidUUID(valStr)) + } + + }) + + // create the handler to test, using our custom "next" handler + h := Middleware(nextHandler) + + // create a mock request to use + req := httptest.NewRequest("GET", "http://testing", nil) + if tt.requestID != nil { + req.Header.Set(headerKey, *tt.requestID) + } + + // call the handler using a mock response recorder (we'll not use that anyway) + h.ServeHTTP(httptest.NewRecorder(), req) + }) + } +} + +func IsValidUUID(u string) bool { + _, err := uuid.Parse(u) + return err == nil +} diff --git a/pkg/s3Client/mock_s3client.go b/pkg/s3Client/mock_s3client.go new file mode 100644 index 000000000..164eec130 --- /dev/null +++ b/pkg/s3Client/mock_s3client.go @@ -0,0 +1,109 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: s3Client.go + +// Package s3Client is a generated GoMock package. +package s3Client + +import ( + context "context" + gomock "github.com/golang/mock/gomock" + io "io" + reflect "reflect" +) + +// MockS3Client is a mock of S3Client interface +type MockS3Client struct { + ctrl *gomock.Controller + recorder *MockS3ClientMockRecorder +} + +// MockS3ClientMockRecorder is the mock recorder for MockS3Client +type MockS3ClientMockRecorder struct { + mock *MockS3Client +} + +// NewMockS3Client creates a new mock instance +func NewMockS3Client(ctrl *gomock.Controller) *MockS3Client { + mock := &MockS3Client{ctrl: ctrl} + mock.recorder = &MockS3ClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockS3Client) EXPECT() *MockS3ClientMockRecorder { + return m.recorder +} + +// PushDataToS3 mocks base method +func (m *MockS3Client) PushDataToS3(ctx context.Context, data []byte, fileName, s3Bucket string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PushDataToS3", ctx, data, fileName, s3Bucket) + ret0, _ := ret[0].(error) + return ret0 +} + +// PushDataToS3 indicates an expected call of PushDataToS3 +func (mr *MockS3ClientMockRecorder) PushDataToS3(ctx, data, fileName, s3Bucket interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushDataToS3", reflect.TypeOf((*MockS3Client)(nil).PushDataToS3), ctx, data, fileName, s3Bucket) +} + +// DownloadFileFromS3 mocks base method +func (m *MockS3Client) DownloadFileFromS3(ctx context.Context, fileName, s3Bucket string) (io.ReadCloser, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DownloadFileFromS3", ctx, fileName, s3Bucket) + ret0, _ := ret[0].(io.ReadCloser) + ret1, _ := ret[1].(int64) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// DownloadFileFromS3 indicates an expected call of DownloadFileFromS3 +func (mr *MockS3ClientMockRecorder) DownloadFileFromS3(ctx, fileName, s3Bucket interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadFileFromS3", reflect.TypeOf((*MockS3Client)(nil).DownloadFileFromS3), ctx, fileName, s3Bucket) +} + +// DoesObjectExist mocks base method +func (m *MockS3Client) DoesObjectExist(ctx context.Context, fileName, s3Bucket string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DoesObjectExist", ctx, fileName, s3Bucket) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DoesObjectExist indicates an expected call of DoesObjectExist +func (mr *MockS3ClientMockRecorder) DoesObjectExist(ctx, fileName, s3Bucket interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoesObjectExist", reflect.TypeOf((*MockS3Client)(nil).DoesObjectExist), ctx, fileName, s3Bucket) +} + +// UpdateObjectTag mocks base method +func (m *MockS3Client) UpdateObjectTag(ctx context.Context, objectName, s3Bucket, key, value string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateObjectTag", ctx, objectName, s3Bucket, key, value) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateObjectTag indicates an expected call of UpdateObjectTag +func (mr *MockS3ClientMockRecorder) UpdateObjectTag(ctx, objectName, s3Bucket, key, value interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateObjectTag", reflect.TypeOf((*MockS3Client)(nil).UpdateObjectTag), ctx, objectName, s3Bucket, key, value) +} + +// DeleteFileFromS3 mocks base method +func (m *MockS3Client) DeleteFileFromS3(ctx context.Context, fileName, s3Bucket string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteFileFromS3", ctx, fileName, s3Bucket) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteFileFromS3 indicates an expected call of DeleteFileFromS3 +func (mr *MockS3ClientMockRecorder) DeleteFileFromS3(ctx, fileName, s3Bucket interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteFileFromS3", reflect.TypeOf((*MockS3Client)(nil).DeleteFileFromS3), ctx, fileName, s3Bucket) +} diff --git a/pkg/s3Client/s3Client.go b/pkg/s3Client/s3Client.go new file mode 100644 index 000000000..9d10ec884 --- /dev/null +++ b/pkg/s3Client/s3Client.go @@ -0,0 +1,120 @@ +package s3Client + +import ( + "bytes" + "context" + "fmt" + "io" + "strings" + + logutil "github.com/filanov/bm-inventory/pkg/log" + + "github.com/minio/minio-go/v6" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +//go:generate mockgen -source=s3Client.go -package=s3Client -destination=mock_s3client.go +type S3Client interface { + PushDataToS3(ctx context.Context, data []byte, fileName string, s3Bucket string) error + DownloadFileFromS3(ctx context.Context, fileName string, s3Bucket string) (io.ReadCloser, int64, error) + DoesObjectExist(ctx context.Context, fileName string, s3Bucket string) (bool, error) + UpdateObjectTag(ctx context.Context, objectName, s3Bucket, key, value string) (bool, error) + DeleteFileFromS3(ctx context.Context, fileName string, s3Bucket string) error +} + +type s3Client struct { + log *logrus.Logger + client *minio.Client +} + +func NewS3Client(s3EndpointURL string, awsAccessKeyID string, awsSecretAccessKey string, logger *logrus.Logger) (S3Client, error) { + client, err := minio.New(strings.Replace(s3EndpointURL, "http://", "", 1), awsAccessKeyID, awsSecretAccessKey, false) + if err != nil { + return nil, errors.Wrapf(err, "Unable to create aws client to %s", s3EndpointURL) + } + return &s3Client{logger, client}, nil +} + +func (s s3Client) PushDataToS3(ctx context.Context, data []byte, fileName string, s3Bucket string) error { + log := logutil.FromContext(ctx, s.log) + // create a reader from data in memory + reader := bytes.NewReader(data) + _, err := s.client.PutObject(s3Bucket, fileName, reader, reader.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + err = errors.Wrapf(err, "Unable to upload %s to %s", fileName, s3Bucket) + log.Error(err) + return err + } + s.log.Infof("Successfully uploaded %s to %s", fileName, s3Bucket) + return nil +} + +func (s s3Client) DownloadFileFromS3(ctx context.Context, fileName string, s3Bucket string) (io.ReadCloser, int64, error) { + log := logutil.FromContext(ctx, s.log) + log.Infof("Downloading %s from bucket %s", fileName, s3Bucket) + stat, err := s.client.StatObject(s3Bucket, fileName, minio.StatObjectOptions{}) + if err != nil { + errResponse := minio.ToErrorResponse(err) + if errResponse.Code == "NoSuchKey" { + log.Warnf("%s doesn't exists in bucket %s", fileName, s3Bucket) + return nil, 0, errors.Errorf("%s doesn't exist", fileName) + } + return nil, 0, err + } + contentLength := stat.Size + + resp, err := s.client.GetObject(s3Bucket, fileName, minio.GetObjectOptions{}) + if err != nil { + log.WithError(err).Errorf("Failed to get %s file", fileName) + return nil, 0, err + } + + return resp, contentLength, nil +} + +func (s s3Client) DoesObjectExist(ctx context.Context, objectName string, s3Bucket string) (bool, error) { + log := logutil.FromContext(ctx, s.log) + log.Infof("Verifying if %s exists in %s", objectName, s3Bucket) + _, err := s.client.StatObject(s3Bucket, objectName, minio.StatObjectOptions{}) + if err != nil { + errResponse := minio.ToErrorResponse(err) + if errResponse.Code == "NoSuchKey" { + return false, nil + } + return false, errors.Wrap(err, fmt.Sprintf("failed to get %s from %s", objectName, s3Bucket)) + } + return true, nil +} + +func (s s3Client) DeleteFileFromS3(ctx context.Context, fileName string, s3Bucket string) error { + log := logutil.FromContext(ctx, s.log) + log.Infof("Deleting file if %s exists in %s", fileName, s3Bucket) + err := s.client.RemoveObject(s3Bucket, fileName) + if err != nil { + errResponse := minio.ToErrorResponse(err) + if errResponse.Code == "NoSuchKey" { + log.Warnf("File %s does not exists in %s", fileName, s3Bucket) + return nil + } + return errors.Wrap(err, fmt.Sprintf("failed to delete %s from %s", fileName, s3Bucket)) + } + log.Infof("Deleted file %s from %s", fileName, s3Bucket) + return nil +} + +func (s s3Client) UpdateObjectTag(ctx context.Context, objectName, s3Bucket, key, value string) (bool, error) { + log := logutil.FromContext(ctx, s.log) + log.Infof("Adding tag: %s - %s", key, value) + tags := map[string]string{key: value} + err := s.client.PutObjectTagging(s3Bucket, objectName, tags) + if err != nil { + errResponse := minio.ToErrorResponse(err) + if errResponse.Code == "NoSuchKey" { + return false, nil + } + log.Errorf("Updating object tag failed: %s", errResponse.Code) + return false, errors.Wrap(err, fmt.Sprintf("failed to update tags on %s/%s", s3Bucket, objectName)) + } + return true, nil +} diff --git a/pkg/s3wrapper/client.go b/pkg/s3wrapper/client.go new file mode 100644 index 000000000..8fcb58057 --- /dev/null +++ b/pkg/s3wrapper/client.go @@ -0,0 +1,83 @@ +package s3wrapper + +import ( + "crypto/tls" + "net" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/go-openapi/swag" + "github.com/pkg/errors" +) + +type Config struct { + S3EndpointURL string `envconfig:"S3_ENDPOINT_URL"` + Region string `envconfig:"S3_REGION"` + S3Bucket string `envconfig:"S3_BUCKET"` + AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"` + AwsSecretAccessKey string `envconfig:"AWS_SECRET_ACCESS_KEY"` +} + +func CreateBucket(cfg *Config) error { + client, err := NewS3Client(cfg) + if err != nil { + return err + } + if _, err = client.CreateBucket(&s3.CreateBucketInput{ + Bucket: swag.String(cfg.S3Bucket), + }); err != nil { + return errors.Wrapf(err, "failed to create s3 bucket %s", cfg.S3Bucket) + } + return nil +} + +func NewS3Session(cfg *Config) (*session.Session, error) { + HTTPTransport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 0, + MaxIdleConnsPerHost: 4096, + MaxIdleConns: 0, + IdleConnTimeout: time.Minute, + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // true to enable use s3 with ip address (scality) + } + creds := credentials.NewStaticCredentials(cfg.AwsAccessKeyID, cfg.AwsSecretAccessKey, "") + + awsConfig := &aws.Config{ + Region: aws.String(cfg.Region), + Endpoint: aws.String(cfg.S3EndpointURL), + Credentials: creds, + DisableSSL: aws.Bool(true), + S3ForcePathStyle: aws.Bool(true), + S3Disable100Continue: aws.Bool(true), + HTTPClient: &http.Client{Transport: HTTPTransport}, + } + awsSession, err := session.NewSession(awsConfig) + if err != nil { + return nil, errors.Wrap(err, "failed to create s3 session") + } + + return awsSession, nil +} + +// NewS3Client creates new s3 client using default config along with defined env variables +func NewS3Client(cfg *Config) (*s3.S3, error) { + awsSession, err := NewS3Session(cfg) + if err != nil { + return nil, err + } + + client := s3.New(awsSession) + if client == nil { + return nil, errors.Errorf("failed to create s3 client") + } + return client, nil +} diff --git a/pkg/transaction/transaction.go b/pkg/transaction/transaction.go new file mode 100644 index 000000000..76086bf23 --- /dev/null +++ b/pkg/transaction/transaction.go @@ -0,0 +1,9 @@ +package transaction + +import "github.com/jinzhu/gorm" + +func AddForUpdateQueryOption(db *gorm.DB) { + if db.Dialect().GetName() != "sqlite3" { + *db = *db.Set("gorm:query_option", "FOR UPDATE") + } +} diff --git a/pr_check.sh b/pr_check.sh new file mode 100755 index 000000000..1e206a212 --- /dev/null +++ b/pr_check.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +IMAGE_TEST=bm-inventory-test + +docker build -t ${IMAGE_TEST} -f Dockerfile.test . +docker run --rm ${IMAGE_TEST} diff --git a/restapi/configure_assisted_install.go b/restapi/configure_assisted_install.go index ac2c77516..6306eaecf 100644 --- a/restapi/configure_assisted_install.go +++ b/restapi/configure_assisted_install.go @@ -16,6 +16,8 @@ import ( "github.com/filanov/bm-inventory/restapi/operations" "github.com/filanov/bm-inventory/restapi/operations/events" "github.com/filanov/bm-inventory/restapi/operations/installer" + "github.com/filanov/bm-inventory/restapi/operations/managed_domains" + "github.com/filanov/bm-inventory/restapi/operations/versions" ) type contextKey string @@ -24,41 +26,117 @@ const AuthKey contextKey = "Auth" //go:generate mockery -name EventsAPI -inpkg -// EventsAPI +/* EventsAPI */ type EventsAPI interface { + /* ListEvents Lists events for an entity_id */ ListEvents(ctx context.Context, params events.ListEventsParams) middleware.Responder } //go:generate mockery -name InstallerAPI -inpkg -// InstallerAPI +/* InstallerAPI */ type InstallerAPI interface { + /* CancelInstallation Cancels an ongoing installation. */ + CancelInstallation(ctx context.Context, params installer.CancelInstallationParams) middleware.Responder + + /* CompleteInstallation Agent API to mark a finalizing installation as complete. */ + CompleteInstallation(ctx context.Context, params installer.CompleteInstallationParams) middleware.Responder + + /* DeregisterCluster Deletes an OpenShift bare metal cluster definition. */ DeregisterCluster(ctx context.Context, params installer.DeregisterClusterParams) middleware.Responder + + /* DeregisterHost Deregisters an OpenShift bare metal host. */ DeregisterHost(ctx context.Context, params installer.DeregisterHostParams) middleware.Responder + + /* DisableHost Disables a host for inclusion in the cluster. */ DisableHost(ctx context.Context, params installer.DisableHostParams) middleware.Responder + + /* DownloadClusterFiles Downloads files relating to the installed/installing cluster. */ DownloadClusterFiles(ctx context.Context, params installer.DownloadClusterFilesParams) middleware.Responder + + /* DownloadClusterISO Downloads the OpenShift per-cluster discovery ISO. */ DownloadClusterISO(ctx context.Context, params installer.DownloadClusterISOParams) middleware.Responder + + /* DownloadClusterKubeconfig Downloads the kubeconfig file for this cluster. */ + DownloadClusterKubeconfig(ctx context.Context, params installer.DownloadClusterKubeconfigParams) middleware.Responder + + /* EnableHost Enables a host for inclusion in the cluster. */ EnableHost(ctx context.Context, params installer.EnableHostParams) middleware.Responder + + /* GenerateClusterISO Creates a new OpenShift per-cluster discovery ISO. */ GenerateClusterISO(ctx context.Context, params installer.GenerateClusterISOParams) middleware.Responder + + /* GetCluster Retrieves the details of the OpenShift bare metal cluster. */ GetCluster(ctx context.Context, params installer.GetClusterParams) middleware.Responder + + /* GetCredentials Get the the cluster admin credentials. */ GetCredentials(ctx context.Context, params installer.GetCredentialsParams) middleware.Responder + + /* GetFreeAddresses Retrieves the free address list for a network. */ + GetFreeAddresses(ctx context.Context, params installer.GetFreeAddressesParams) middleware.Responder + + /* GetHost Retrieves the details of the OpenShift bare metal host. */ GetHost(ctx context.Context, params installer.GetHostParams) middleware.Responder + + /* GetNextSteps Retrieves the next operations that the host agent needs to perform. */ GetNextSteps(ctx context.Context, params installer.GetNextStepsParams) middleware.Responder + + /* InstallCluster Installs the OpenShift bare metal cluster. */ InstallCluster(ctx context.Context, params installer.InstallClusterParams) middleware.Responder + + /* ListClusters Retrieves the list of OpenShift bare metal clusters. */ ListClusters(ctx context.Context, params installer.ListClustersParams) middleware.Responder + + /* ListHosts Retrieves the list of OpenShift bare metal hosts. */ ListHosts(ctx context.Context, params installer.ListHostsParams) middleware.Responder + + /* PostStepReply Posts the result of the operations from the host agent. */ PostStepReply(ctx context.Context, params installer.PostStepReplyParams) middleware.Responder + + /* RegisterCluster Creates a new OpenShift bare metal cluster definition. */ RegisterCluster(ctx context.Context, params installer.RegisterClusterParams) middleware.Responder + + /* RegisterHost Registers a new OpenShift bare metal host. */ RegisterHost(ctx context.Context, params installer.RegisterHostParams) middleware.Responder + + /* ResetCluster Resets a failed installation. */ + ResetCluster(ctx context.Context, params installer.ResetClusterParams) middleware.Responder + + /* SetDebugStep Sets a single shot debug step that will be sent next time the host agent will ask for a command. */ SetDebugStep(ctx context.Context, params installer.SetDebugStepParams) middleware.Responder + + /* UpdateCluster Updates an OpenShift bare metal cluster definition. */ UpdateCluster(ctx context.Context, params installer.UpdateClusterParams) middleware.Responder + + /* UpdateHostInstallProgress Update installation progress */ UpdateHostInstallProgress(ctx context.Context, params installer.UpdateHostInstallProgressParams) middleware.Responder + + /* UploadClusterIngressCert Transfer the ingress certificate for the cluster. */ + UploadClusterIngressCert(ctx context.Context, params installer.UploadClusterIngressCertParams) middleware.Responder +} + +//go:generate mockery -name ManagedDomainsAPI -inpkg + +/* ManagedDomainsAPI */ +type ManagedDomainsAPI interface { + /* ListManagedDomains List of managed DNS domains */ + ListManagedDomains(ctx context.Context, params managed_domains.ListManagedDomainsParams) middleware.Responder +} + +//go:generate mockery -name VersionsAPI -inpkg + +/* VersionsAPI */ +type VersionsAPI interface { + /* ListComponentVersions List of componenets versions */ + ListComponentVersions(ctx context.Context, params versions.ListComponentVersionsParams) middleware.Responder } // Config is configuration for Handler type Config struct { EventsAPI InstallerAPI + ManagedDomainsAPI + VersionsAPI Logger func(string, ...interface{}) // InnerMiddleware is for the handler executors. These do not apply to the swagger.json document. // The middleware executes after routing but before authentication, binding and validation @@ -91,6 +169,14 @@ func HandlerAPI(c Config) (http.Handler, *operations.AssistedInstallAPI, error) api.JSONConsumer = runtime.JSONConsumer() api.BinProducer = runtime.ByteStreamProducer() api.JSONProducer = runtime.JSONProducer() + api.InstallerCancelInstallationHandler = installer.CancelInstallationHandlerFunc(func(params installer.CancelInstallationParams) middleware.Responder { + ctx := params.HTTPRequest.Context() + return c.InstallerAPI.CancelInstallation(ctx, params) + }) + api.InstallerCompleteInstallationHandler = installer.CompleteInstallationHandlerFunc(func(params installer.CompleteInstallationParams) middleware.Responder { + ctx := params.HTTPRequest.Context() + return c.InstallerAPI.CompleteInstallation(ctx, params) + }) api.InstallerDeregisterClusterHandler = installer.DeregisterClusterHandlerFunc(func(params installer.DeregisterClusterParams) middleware.Responder { ctx := params.HTTPRequest.Context() return c.InstallerAPI.DeregisterCluster(ctx, params) @@ -111,6 +197,10 @@ func HandlerAPI(c Config) (http.Handler, *operations.AssistedInstallAPI, error) ctx := params.HTTPRequest.Context() return c.InstallerAPI.DownloadClusterISO(ctx, params) }) + api.InstallerDownloadClusterKubeconfigHandler = installer.DownloadClusterKubeconfigHandlerFunc(func(params installer.DownloadClusterKubeconfigParams) middleware.Responder { + ctx := params.HTTPRequest.Context() + return c.InstallerAPI.DownloadClusterKubeconfig(ctx, params) + }) api.InstallerEnableHostHandler = installer.EnableHostHandlerFunc(func(params installer.EnableHostParams) middleware.Responder { ctx := params.HTTPRequest.Context() return c.InstallerAPI.EnableHost(ctx, params) @@ -127,6 +217,10 @@ func HandlerAPI(c Config) (http.Handler, *operations.AssistedInstallAPI, error) ctx := params.HTTPRequest.Context() return c.InstallerAPI.GetCredentials(ctx, params) }) + api.InstallerGetFreeAddressesHandler = installer.GetFreeAddressesHandlerFunc(func(params installer.GetFreeAddressesParams) middleware.Responder { + ctx := params.HTTPRequest.Context() + return c.InstallerAPI.GetFreeAddresses(ctx, params) + }) api.InstallerGetHostHandler = installer.GetHostHandlerFunc(func(params installer.GetHostParams) middleware.Responder { ctx := params.HTTPRequest.Context() return c.InstallerAPI.GetHost(ctx, params) @@ -143,6 +237,10 @@ func HandlerAPI(c Config) (http.Handler, *operations.AssistedInstallAPI, error) ctx := params.HTTPRequest.Context() return c.InstallerAPI.ListClusters(ctx, params) }) + api.VersionsListComponentVersionsHandler = versions.ListComponentVersionsHandlerFunc(func(params versions.ListComponentVersionsParams) middleware.Responder { + ctx := params.HTTPRequest.Context() + return c.VersionsAPI.ListComponentVersions(ctx, params) + }) api.EventsListEventsHandler = events.ListEventsHandlerFunc(func(params events.ListEventsParams) middleware.Responder { ctx := params.HTTPRequest.Context() return c.EventsAPI.ListEvents(ctx, params) @@ -151,6 +249,10 @@ func HandlerAPI(c Config) (http.Handler, *operations.AssistedInstallAPI, error) ctx := params.HTTPRequest.Context() return c.InstallerAPI.ListHosts(ctx, params) }) + api.ManagedDomainsListManagedDomainsHandler = managed_domains.ListManagedDomainsHandlerFunc(func(params managed_domains.ListManagedDomainsParams) middleware.Responder { + ctx := params.HTTPRequest.Context() + return c.ManagedDomainsAPI.ListManagedDomains(ctx, params) + }) api.InstallerPostStepReplyHandler = installer.PostStepReplyHandlerFunc(func(params installer.PostStepReplyParams) middleware.Responder { ctx := params.HTTPRequest.Context() return c.InstallerAPI.PostStepReply(ctx, params) @@ -163,6 +265,10 @@ func HandlerAPI(c Config) (http.Handler, *operations.AssistedInstallAPI, error) ctx := params.HTTPRequest.Context() return c.InstallerAPI.RegisterHost(ctx, params) }) + api.InstallerResetClusterHandler = installer.ResetClusterHandlerFunc(func(params installer.ResetClusterParams) middleware.Responder { + ctx := params.HTTPRequest.Context() + return c.InstallerAPI.ResetCluster(ctx, params) + }) api.InstallerSetDebugStepHandler = installer.SetDebugStepHandlerFunc(func(params installer.SetDebugStepParams) middleware.Responder { ctx := params.HTTPRequest.Context() return c.InstallerAPI.SetDebugStep(ctx, params) @@ -175,6 +281,10 @@ func HandlerAPI(c Config) (http.Handler, *operations.AssistedInstallAPI, error) ctx := params.HTTPRequest.Context() return c.InstallerAPI.UpdateHostInstallProgress(ctx, params) }) + api.InstallerUploadClusterIngressCertHandler = installer.UploadClusterIngressCertHandlerFunc(func(params installer.UploadClusterIngressCertParams) middleware.Responder { + ctx := params.HTTPRequest.Context() + return c.InstallerAPI.UploadClusterIngressCert(ctx, params) + }) api.ServerShutdown = func() {} return api.Serve(c.InnerMiddleware), api, nil } diff --git a/restapi/embedded_spec.go b/restapi/embedded_spec.go index fa5a343b7..6d1e2230c 100644 --- a/restapi/embedded_spec.go +++ b/restapi/embedded_spec.go @@ -96,47 +96,6 @@ func init() { } } }, - "/clusters/{clusterId}/hosts/{hostId}/progress": { - "put": { - "tags": [ - "installer" - ], - "summary": "Update installation progress", - "operationId": "UpdateHostInstallProgress", - "parameters": [ - { - "type": "string", - "format": "uuid", - "description": "The ID of the cluster to retrieve", - "name": "clusterId", - "in": "path", - "required": true - }, - { - "type": "string", - "format": "uuid", - "description": "The ID of the host to retrieve", - "name": "hostId", - "in": "path", - "required": true - }, - { - "description": "New progress value", - "name": "host-install-progress-params", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/host-install-progress-params" - } - } - ], - "responses": { - "200": { - "description": "Update install progress" - } - } - } - }, "/clusters/{cluster_id}": { "get": { "tags": [ @@ -270,6 +229,102 @@ func init() { } } }, + "/clusters/{cluster_id}/actions/cancel": { + "post": { + "tags": [ + "installer" + ], + "summary": "Cancels an ongoing installation.", + "operationId": "CancelInstallation", + "parameters": [ + { + "type": "string", + "format": "uuid", + "name": "cluster_id", + "in": "path", + "required": true + } + ], + "responses": { + "202": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/cluster" + } + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "409": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, + "/clusters/{cluster_id}/actions/complete_installation": { + "post": { + "tags": [ + "installer" + ], + "summary": "Agent API to mark a finalizing installation as complete.", + "operationId": "CompleteInstallation", + "parameters": [ + { + "type": "string", + "format": "uuid", + "name": "cluster_id", + "in": "path", + "required": true + }, + { + "name": "completion-params", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/completion-params" + } + } + ], + "responses": { + "202": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/cluster" + } + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "409": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, "/clusters/{cluster_id}/actions/install": { "post": { "tags": [ @@ -320,6 +375,50 @@ func init() { } } }, + "/clusters/{cluster_id}/actions/reset": { + "post": { + "tags": [ + "installer" + ], + "summary": "Resets a failed installation.", + "operationId": "ResetCluster", + "parameters": [ + { + "type": "string", + "format": "uuid", + "name": "cluster_id", + "in": "path", + "required": true + } + ], + "responses": { + "202": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/cluster" + } + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "409": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, "/clusters/{cluster_id}/credentials": { "get": { "tags": [ @@ -389,7 +488,9 @@ func init() { "metadata.json", "worker.ign", "kubeadmin-password", - "kubeconfig" + "kubeconfig", + "kubeconfig-noingress", + "install-config.yaml" ], "type": "string", "name": "file_name", @@ -515,7 +616,10 @@ func init() { } }, "409": { - "description": "Error." + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } }, "500": { "description": "Error.", @@ -526,13 +630,16 @@ func init() { } } }, - "/clusters/{cluster_id}/hosts": { + "/clusters/{cluster_id}/downloads/kubeconfig": { "get": { + "produces": [ + "application/octet-stream" + ], "tags": [ "installer" ], - "summary": "Retrieves the list of OpenShift bare metal hosts.", - "operationId": "ListHosts", + "summary": "Downloads the kubeconfig file for this cluster.", + "operationId": "DownloadClusterKubeconfig", "parameters": [ { "type": "string", @@ -546,7 +653,20 @@ func init() { "200": { "description": "Success.", "schema": { - "$ref": "#/definitions/host-list" + "type": "string", + "format": "binary" + } + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "409": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" } }, "500": { @@ -556,13 +676,15 @@ func init() { } } } - }, - "post": { + } + }, + "/clusters/{cluster_id}/free_addresses": { + "get": { "tags": [ "installer" ], - "summary": "Registers a new OpenShift bare metal host.", - "operationId": "RegisterHost", + "summary": "Retrieves the free address list for a network.", + "operationId": "GetFreeAddresses", "parameters": [ { "type": "string", @@ -572,22 +694,34 @@ func init() { "required": true }, { - "name": "new-host-params", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/host-create-params" - } + "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$", + "type": "string", + "name": "network", + "in": "query", + "required": true + }, + { + "maximum": 8000, + "minimum": 1, + "type": "integer", + "default": 8000, + "name": "limit", + "in": "query" + }, + { + "type": "string", + "name": "prefix", + "in": "query" } ], "responses": { - "201": { - "description": "Success.", + "200": { + "description": "Success", "schema": { - "$ref": "#/definitions/host" + "$ref": "#/definitions/free-addresses-list" } }, - "400": { + "404": { "description": "Error.", "schema": { "$ref": "#/definitions/error" @@ -602,13 +736,13 @@ func init() { } } }, - "/clusters/{cluster_id}/hosts/{host_id}": { + "/clusters/{cluster_id}/hosts": { "get": { "tags": [ "installer" ], - "summary": "Retrieves the details of the OpenShift bare metal host.", - "operationId": "GetHost", + "summary": "Retrieves the list of OpenShift bare metal hosts.", + "operationId": "ListHosts", "parameters": [ { "type": "string", @@ -616,26 +750,13 @@ func init() { "name": "cluster_id", "in": "path", "required": true - }, - { - "type": "string", - "format": "uuid", - "name": "host_id", - "in": "path", - "required": true } ], "responses": { "200": { "description": "Success.", "schema": { - "$ref": "#/definitions/host" - } - }, - "404": { - "description": "Error.", - "schema": { - "$ref": "#/definitions/error" + "$ref": "#/definitions/host-list" } }, "500": { @@ -646,12 +767,12 @@ func init() { } } }, - "delete": { + "post": { "tags": [ "installer" ], - "summary": "Deregisters an OpenShift bare metal host.", - "operationId": "DeregisterHost", + "summary": "Registers a new OpenShift bare metal host.", + "operationId": "RegisterHost", "parameters": [ { "type": "string", @@ -661,15 +782,116 @@ func init() { "required": true }, { - "type": "string", - "format": "uuid", - "name": "host_id", - "in": "path", - "required": true + "name": "new-host-params", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/host-create-params" + } } ], "responses": { - "204": { + "201": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/host" + } + }, + "400": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "403": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, + "/clusters/{cluster_id}/hosts/{host_id}": { + "get": { + "tags": [ + "installer" + ], + "summary": "Retrieves the details of the OpenShift bare metal host.", + "operationId": "GetHost", + "parameters": [ + { + "type": "string", + "format": "uuid", + "name": "cluster_id", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "name": "host_id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/host" + } + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + }, + "delete": { + "tags": [ + "installer" + ], + "summary": "Deregisters an OpenShift bare metal host.", + "operationId": "DeregisterHost", + "parameters": [ + { + "type": "string", + "format": "uuid", + "name": "cluster_id", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "name": "host_id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { "description": "Success." }, "400": { @@ -767,8 +989,11 @@ func init() { } ], "responses": { - "204": { - "description": "Success." + "200": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/host" + } }, "404": { "description": "Error.", @@ -813,8 +1038,11 @@ func init() { } ], "responses": { - "204": { - "description": "Success." + "200": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/host" + } }, "404": { "description": "Error.", @@ -935,6 +1163,148 @@ func init() { } } }, + "/clusters/{cluster_id}/hosts/{host_id}/progress": { + "put": { + "tags": [ + "installer" + ], + "summary": "Update installation progress", + "operationId": "UpdateHostInstallProgress", + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "The ID of the cluster to retrieve", + "name": "cluster_id", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "The ID of the host to retrieve", + "name": "host_id", + "in": "path", + "required": true + }, + { + "description": "New progress value", + "name": "host-progress", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/host-progress" + } + } + ], + "responses": { + "200": { + "description": "Update install progress" + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, + "/clusters/{cluster_id}/uploads/ingress-cert": { + "post": { + "tags": [ + "installer" + ], + "summary": "Transfer the ingress certificate for the cluster.", + "operationId": "UploadClusterIngressCert", + "parameters": [ + { + "type": "string", + "format": "uuid", + "name": "cluster_id", + "in": "path", + "required": true + }, + { + "name": "ingress-cert-params", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ingress-cert-params" + } + } + ], + "responses": { + "201": { + "description": "Success." + }, + "400": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, + "/component_versions": { + "get": { + "tags": [ + "versions" + ], + "summary": "List of componenets versions", + "operationId": "ListComponentVersions", + "responses": { + "200": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/list-versions" + } + } + } + } + }, + "/domains": { + "get": { + "tags": [ + "managed_domains" + ], + "summary": "List of managed DNS domains", + "operationId": "ListManagedDomains", + "responses": { + "200": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/list-managed-domains" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, "/events/{entity_id}": { "get": { "tags": [ @@ -969,57 +1339,14 @@ func init() { } }, "definitions": { - "block-device": { + "boot": { "type": "object", "properties": { - "device_type": { - "type": "string" - }, - "fstype": { - "type": "string" - }, - "major_device_number": { - "type": "integer" - }, - "minor_device_number": { - "type": "integer" - }, - "mountpoint": { + "current_boot_mode": { "type": "string" }, - "name": { + "pxe_interface": { "type": "string" - }, - "read_only": { - "type": "boolean" - }, - "removable_device": { - "type": "integer" - }, - "size": { - "type": "integer" - } - } - }, - "boot": { - "type": "object", - "properties": { - "current_boot_mode": { - "type": "string" - }, - "pxe_interface": { - "type": "string" - } - } - }, - "cidr": { - "type": "object", - "properties": { - "ip_address": { - "type": "string" - }, - "mask": { - "type": "integer" } } }, @@ -1037,7 +1364,7 @@ func init() { "api_vip": { "description": "Virtual IP used to reach the OpenShift cluster API.", "type": "string", - "format": "ipv4" + "pattern": "^(([0-9]{1,3}\\.){3}[0-9]{1,3})?$" }, "base_dns_domain": { "description": "Base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name.", @@ -1058,12 +1385,15 @@ func init() { "description": "The time that this cluster was created.", "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" }, - "dns_vip": { - "description": "Virtual IP used internally by the cluster for automating internal DNS requirements.", - "type": "string", - "format": "ipv4" + "host_networks": { + "description": "List of host networks to be filled during query.", + "type": "array", + "items": { + "$ref": "#/definitions/host_network" + }, + "x-go-custom-tag": "gorm:\"-\"" }, "hosts": { "description": "Hosts that are associated with this cluster.", @@ -1084,6 +1414,9 @@ func init() { "format": "uuid", "x-go-custom-tag": "gorm:\"primary_key\"" }, + "ignition_generator_version": { + "type": "string" + }, "image_info": { "x-go-custom-tag": "gorm:\"embedded;embedded_prefix:image_\"", "$ref": "#/definitions/image_info" @@ -1091,19 +1424,19 @@ func init() { "ingress_vip": { "description": "Virtual IP used for cluster ingress traffic.", "type": "string", - "format": "ipv4" + "pattern": "^(([0-9]{1,3}\\.){3}[0-9]{1,3})?$" }, "install_completed_at": { "description": "The time that this cluster completed installation.", "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime;default:0\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone;default:'2000-01-01 00:00:00z'\"" }, "install_started_at": { "description": "The time that this cluster began installation.", "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime;default:0\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone;default:'2000-01-01 00:00:00z'\"" }, "kind": { "description": "Indicates the type of this object. Will be 'Cluster' if this is a complete object or 'ClusterLink' if it is just a link.", @@ -1112,6 +1445,11 @@ func init() { "Cluster" ] }, + "machine_network_cidr": { + "description": "A CIDR that all hosts belonging to the cluster should have an interfaces with IP address that belongs to this CIDR. The api_vip belongs to this CIDR.", + "type": "string", + "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$" + }, "name": { "description": "Name of the OpenShift cluster.", "type": "string" @@ -1120,14 +1458,15 @@ func init() { "description": "Version of the OpenShift cluster.", "type": "string", "enum": [ - "4.4", "4.5" ] }, - "pull_secret": { - "description": "The pull secret that obtained from the Pull Secret page on the Red Hat OpenShift Cluster Manager site.", - "type": "string", - "x-go-custom-tag": "gorm:\"type:varchar(4096)\"" + "org_id": { + "type": "string" + }, + "pull_secret_set": { + "description": "True if the pull-secret has been added to the cluster", + "type": "boolean" }, "service_network_cidr": { "description": "The IP address pool to use for service IP addresses. You can enter only one IP address pool. If you need to access the services from an external network, configure load balancers and routers to manage the traffic.", @@ -1146,7 +1485,9 @@ func init() { "insufficient", "ready", "error", + "preparing-for-installation", "installing", + "finalizing", "installed" ] }, @@ -1155,11 +1496,20 @@ func init() { "type": "string", "x-go-custom-tag": "gorm:\"type:varchar(2048)\"" }, + "status_updated_at": { + "description": "The last time that the cluster status has been updated", + "type": "string", + "format": "date-time", + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, "updated_at": { "description": "The last time that this cluster was updated.", "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, + "user_id": { + "type": "string" } } }, @@ -1170,11 +1520,6 @@ func init() { "openshift_version" ], "properties": { - "api_vip": { - "description": "Virtual IP used to reach the OpenShift cluster API.", - "type": "string", - "format": "ipv4" - }, "base_dns_domain": { "description": "Base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name.", "type": "string" @@ -1182,23 +1527,20 @@ func init() { "cluster_network_cidr": { "description": "IP address block from which Pod IPs are allocated This block must not overlap with existing physical networks. These IP addresses are used for the Pod network, and if you need to access the Pods from an external network, configure load balancers and routers to manage the traffic.", "type": "string", + "default": "10.128.0.0/14", "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$" }, "cluster_network_host_prefix": { "description": "The subnet prefix length to assign to each individual node. For example, if clusterNetworkHostPrefix is set to 23, then each node is assigned a /23 subnet out of the given cidr (clusterNetworkCIDR), which allows for 510 (2^(32 - 23) - 2) pod IPs addresses. If you are required to provide access to nodes from an external network, configure load balancers and routers to manage the traffic.", "type": "integer", + "default": 23, "maximum": 32, "minimum": 1 }, - "dns_vip": { - "description": "Virtual IP used internally by the cluster for automating internal DNS requirements.", - "type": "string", - "format": "ipv4" - }, "ingress_vip": { "description": "Virtual IP used for cluster ingress traffic.", "type": "string", - "format": "ipv4" + "pattern": "^(([0-9]{1,3}\\.){3}[0-9]{1,3})?$" }, "name": { "description": "Name of the OpenShift cluster.", @@ -1208,7 +1550,6 @@ func init() { "description": "Version of the OpenShift cluster.", "type": "string", "enum": [ - "4.4", "4.5" ] }, @@ -1219,6 +1560,7 @@ func init() { "service_network_cidr": { "description": "The IP address pool to use for service IP addresses. You can enter only one IP address pool. If you need to access the services from an external network, configure load balancers and routers to manage the traffic.", "type": "string", + "default": "172.30.0.0/16", "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$" }, "ssh_public_key": { @@ -1239,27 +1581,44 @@ func init() { "api_vip": { "description": "Virtual IP used to reach the OpenShift cluster API.", "type": "string", - "format": "ipv4" + "pattern": "^(([0-9]{1,3}\\.){3}[0-9]{1,3})?$", + "x-nullable": true }, "base_dns_domain": { "description": "Base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name.", - "type": "string" + "type": "string", + "x-nullable": true }, "cluster_network_cidr": { "description": "IP address block from which Pod IPs are allocated This block must not overlap with existing physical networks. These IP addresses are used for the Pod network, and if you need to access the Pods from an external network, configure load balancers and routers to manage the traffic.", "type": "string", - "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$" + "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$", + "x-nullable": true }, "cluster_network_host_prefix": { "description": "The subnet prefix length to assign to each individual node. For example, if clusterNetworkHostPrefix is set to 23, then each node is assigned a /23 subnet out of the given cidr (clusterNetworkCIDR), which allows for 510 (2^(32 - 23) - 2) pod IPs addresses. If you are required to provide access to nodes from an external network, configure load balancers and routers to manage the traffic.", "type": "integer", "maximum": 32, - "minimum": 1 + "minimum": 1, + "x-nullable": true }, - "dns_vip": { - "description": "Virtual IP used internally by the cluster for automating internal DNS requirements.", - "type": "string", - "format": "ipv4" + "hosts_names": { + "description": "The desired hostname for hosts associated with the cluster.", + "type": "array", + "items": { + "type": "object", + "properties": { + "hostname": { + "type": "string" + }, + "id": { + "type": "string", + "format": "uuid" + } + } + }, + "x-go-custom-tag": "gorm:\"type:varchar(64)[]\"", + "x-nullable": true }, "hosts_roles": { "description": "The desired role for hosts associated with the cluster.", @@ -1272,37 +1631,53 @@ func init() { "format": "uuid" }, "role": { - "type": "string", - "enum": [ - "master", - "worker" - ] + "$ref": "#/definitions/host-role-update-params" } } }, - "x-go-custom-tag": "gorm:\"type:varchar(64)[]\"" + "x-go-custom-tag": "gorm:\"type:varchar(64)[]\"", + "x-nullable": true }, "ingress_vip": { "description": "Virtual IP used for cluster ingress traffic.", "type": "string", - "format": "ipv4" + "pattern": "^(([0-9]{1,3}\\.){3}[0-9]{1,3})?$", + "x-nullable": true }, "name": { "description": "OpenShift cluster name", - "type": "string" + "type": "string", + "x-nullable": true }, "pull_secret": { "description": "The pull secret that obtained from the Pull Secret page on the Red Hat OpenShift Cluster Manager site.", - "type": "string" + "type": "string", + "x-nullable": true }, "service_network_cidr": { "description": "The IP address pool to use for service IP addresses. You can enter only one IP address pool. If you need to access the services from an external network, configure load balancers and routers to manage the traffic.", "type": "string", - "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$" + "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$", + "x-nullable": true }, "ssh_public_key": { "description": "SSH public key for debugging OpenShift nodes.", + "type": "string", + "x-nullable": true + } + } + }, + "completion-params": { + "type": "object", + "required": [ + "is_success" + ], + "properties": { + "error_info": { "type": "string" + }, + "is_success": { + "type": "boolean" } } }, @@ -1399,32 +1774,12 @@ func init() { } } }, - "cpu_details": { + "credentials": { "type": "object", "properties": { - "architecture": { + "console_url": { "type": "string" }, - "cpu_mhz": { - "type": "number" - }, - "cpus": { - "type": "integer" - }, - "model_name": { - "type": "string" - }, - "sockets": { - "type": "integer" - }, - "threads_per_core": { - "type": "integer" - } - } - }, - "credentials": { - "type": "object", - "properties": { "password": { "type": "string" }, @@ -1521,6 +1876,7 @@ func init() { "type": "object", "required": [ "entity_id", + "severity", "message", "event_time" ], @@ -1534,15 +1890,25 @@ func init() { "event_time": { "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" }, "message": { - "type": "string" + "type": "string", + "x-go-custom-tag": "gorm:\"type:varchar(4096)\"" }, "request_id": { "description": "Unique identifier for the request that caused this event to occure", "type": "string", "format": "uuid" + }, + "severity": { + "type": "string", + "enum": [ + "info", + "warning", + "error", + "critical" + ] } } }, @@ -1552,6 +1918,42 @@ func init() { "$ref": "#/definitions/event" } }, + "free-addresses-list": { + "type": "array", + "items": { + "type": "string", + "format": "ipv4" + } + }, + "free_addresses_request": { + "type": "array", + "items": { + "type": "string", + "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$" + } + }, + "free_network_addresses": { + "type": "object", + "properties": { + "free_addresses": { + "type": "array", + "items": { + "type": "string", + "format": "ipv4" + } + }, + "network": { + "type": "string", + "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$" + } + } + }, + "free_networks_addresses": { + "type": "array", + "items": { + "$ref": "#/definitions/free_network_addresses" + } + }, "host": { "type": "object", "required": [ @@ -1565,6 +1967,12 @@ func init() { "bootstrap": { "type": "boolean" }, + "checked_in_at": { + "description": "The last time the host's agent communicated with the service.", + "type": "string", + "format": "date-time", + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, "cluster_id": { "description": "The cluster that this host is associated with.", "type": "string", @@ -1578,9 +1986,12 @@ func init() { "created_at": { "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, + "discovery_agent_version": { + "type": "string" }, - "hardware_info": { + "free_addresses": { "type": "string", "x-go-custom-tag": "gorm:\"type:text\"" }, @@ -1594,6 +2005,10 @@ func init() { "format": "uuid", "x-go-custom-tag": "gorm:\"primary_key\"" }, + "installer_version": { + "description": "Installer version", + "type": "string" + }, "inventory": { "type": "string", "x-go-custom-tag": "gorm:\"type:text\"" @@ -1605,13 +2020,34 @@ func init() { "Host" ] }, + "progress": { + "x-go-custom-tag": "gorm:\"embedded;embedded_prefix:progress_\"", + "$ref": "#/definitions/host-progress-info" + }, + "progress_stages": { + "type": "array", + "items": { + "$ref": "#/definitions/host-stage" + }, + "x-go-custom-tag": "gorm:\"-\"" + }, + "requested_hostname": { + "type": "string" + }, "role": { + "$ref": "#/definitions/host-role" + }, + "stage_started_at": { + "description": "Time at which the current progress stage started", "type": "string", - "enum": [ - "undefined", - "master", - "worker" - ] + "format": "date-time", + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, + "stage_updated_at": { + "description": "Time at which the current progress stage was last updated", + "type": "string", + "format": "date-time", + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" }, "status": { "type": "string", @@ -1621,20 +2057,36 @@ func init() { "disconnected", "insufficient", "disabled", + "preparing-for-installation", + "pending-for-input", "installing", "installing-in-progress", + "installing-pending-user-action", + "resetting-pending-user-action", "installed", - "error" + "error", + "resetting" ] }, "status_info": { "type": "string", "x-go-custom-tag": "gorm:\"type:varchar(2048)\"" }, + "status_updated_at": { + "description": "The last time that the host status has been updated", + "type": "string", + "format": "date-time", + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, "updated_at": { "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, + "validations_info": { + "description": "Json formatted string containing the validations results for each validation id grouped by category (network, hardware, etc.)", + "type": "string", + "x-go-custom-tag": "gorm:\"type:varchar(2048)\"" } } }, @@ -1644,21 +2096,128 @@ func init() { "host_id" ], "properties": { + "discovery_agent_version": { + "type": "string" + }, "host_id": { "type": "string", "format": "uuid" } } }, - "host-install-progress-params": { - "type": "string" - }, "host-list": { "type": "array", "items": { "$ref": "#/definitions/host" } }, + "host-progress": { + "type": "object", + "required": [ + "current_stage" + ], + "properties": { + "current_stage": { + "type": "string", + "$ref": "#/definitions/host-stage" + }, + "progress_info": { + "type": "string", + "x-go-custom-tag": "gorm:\"type:varchar(2048)\"" + } + } + }, + "host-progress-info": { + "type": "object", + "required": [ + "current_stage" + ], + "properties": { + "current_stage": { + "type": "string", + "$ref": "#/definitions/host-stage" + }, + "progress_info": { + "type": "string", + "x-go-custom-tag": "gorm:\"type:varchar(2048)\"" + }, + "stage_started_at": { + "description": "Time at which the current progress stage started", + "type": "string", + "format": "date-time", + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, + "stage_updated_at": { + "description": "Time at which the current progress stage was last updated", + "type": "string", + "format": "date-time", + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + } + } + }, + "host-role": { + "type": "string", + "enum": [ + "master", + "worker", + "bootstrap" + ] + }, + "host-role-update-params": { + "type": "string", + "enum": [ + "master", + "worker" + ] + }, + "host-stage": { + "type": "string", + "enum": [ + "Starting installation", + "Waiting for control plane", + "Start Waiting for control plane", + "Installing", + "Writing image to disk", + "Rebooting", + "Waiting for ignition", + "Configuring", + "Joined", + "Done", + "Failed" + ] + }, + "host-validation-id": { + "type": "string", + "enum": [ + "connected", + "has-inventory", + "has-min-cpu-cores", + "has-min-valid-disks", + "has-min-memory", + "machine-cidr-defined", + "role-defined", + "has-cpu-cores-for-role", + "has-memory-for-role", + "hostname-unique", + "hostname-valid", + "belongs-to-machine-cidr" + ] + }, + "host_network": { + "type": "object", + "properties": { + "cidr": { + "type": "string" + }, + "host_ids": { + "type": "array", + "items": { + "type": "string", + "format": "uuid" + } + } + } + }, "image-create-params": { "type": "object", "properties": { @@ -1678,7 +2237,11 @@ func init() { "created_at": { "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, + "generator_version": { + "description": "Image generator version", + "type": "string" }, "proxy_url": { "description": "The URL of the HTTP/S proxy that agents should use to access the discovery service\nhttp://\\\u003cuser\\\u003e:\\\u003cpassword\\\u003e@\\\u003cserver\\\u003e:\\\u003cport\\\u003e/\n", @@ -1691,6 +2254,9 @@ func init() { } } }, + "ingress-cert-params": { + "type": "string" + }, "interface": { "type": "object", "properties": { @@ -1741,32 +2307,6 @@ func init() { } } }, - "introspection": { - "type": "object", - "properties": { - "block_devices": { - "type": "array", - "items": { - "$ref": "#/definitions/block-device" - } - }, - "cpu": { - "$ref": "#/definitions/cpu_details" - }, - "memory": { - "type": "array", - "items": { - "$ref": "#/definitions/memory_details" - } - }, - "nics": { - "type": "array", - "items": { - "$ref": "#/definitions/nic" - } - } - } - }, "inventory": { "type": "object", "properties": { @@ -1839,63 +2379,45 @@ func init() { } } }, - "memory": { + "list-managed-domains": { + "type": "array", + "items": { + "$ref": "#/definitions/managed-domain" + } + }, + "list-versions": { "type": "object", "properties": { - "physical_bytes": { - "type": "integer" + "release_tag": { + "type": "string" }, - "usable_bytes": { - "type": "integer" + "versions": { + "$ref": "#/definitions/versions" } } }, - "memory_details": { + "managed-domain": { "type": "object", "properties": { - "available": { - "type": "integer" - }, - "buff_cached": { - "type": "integer" - }, - "free": { - "type": "integer" - }, - "name": { + "domain": { "type": "string" }, - "shared": { - "type": "integer" - }, - "total": { - "type": "integer" - }, - "used": { - "type": "integer" + "provider": { + "type": "string", + "enum": [ + "route53" + ] } } }, - "nic": { + "memory": { "type": "object", "properties": { - "cidrs": { - "type": "array", - "items": { - "$ref": "#/definitions/cidr" - } - }, - "mac": { - "type": "string" - }, - "mtu": { + "physical_bytes": { "type": "integer" }, - "name": { - "type": "string" - }, - "state": { - "type": "string" + "usable_bytes": { + "type": "integer" } } }, @@ -1933,22 +2455,35 @@ func init() { }, "step_id": { "type": "string" + }, + "step_type": { + "$ref": "#/definitions/step-type" } } }, "step-type": { "type": "string", "enum": [ - "hardware-info", "connectivity-check", "execute", - "inventory" + "inventory", + "install", + "free-network-addresses", + "reset-installation" ] }, "steps": { - "type": "array", - "items": { - "$ref": "#/definitions/step" + "type": "object", + "properties": { + "instructions": { + "type": "array", + "items": { + "$ref": "#/definitions/step" + } + }, + "next_instruction_seconds": { + "type": "integer" + } } }, "steps-reply": { @@ -1970,6 +2505,12 @@ func init() { "type": "string" } } + }, + "versions": { + "type": "object", + "additionalProperties": { + "type": "string" + } } }, "tags": [ @@ -2058,47 +2599,6 @@ func init() { } } }, - "/clusters/{clusterId}/hosts/{hostId}/progress": { - "put": { - "tags": [ - "installer" - ], - "summary": "Update installation progress", - "operationId": "UpdateHostInstallProgress", - "parameters": [ - { - "type": "string", - "format": "uuid", - "description": "The ID of the cluster to retrieve", - "name": "clusterId", - "in": "path", - "required": true - }, - { - "type": "string", - "format": "uuid", - "description": "The ID of the host to retrieve", - "name": "hostId", - "in": "path", - "required": true - }, - { - "description": "New progress value", - "name": "host-install-progress-params", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/host-install-progress-params" - } - } - ], - "responses": { - "200": { - "description": "Update install progress" - } - } - } - }, "/clusters/{cluster_id}": { "get": { "tags": [ @@ -2232,6 +2732,102 @@ func init() { } } }, + "/clusters/{cluster_id}/actions/cancel": { + "post": { + "tags": [ + "installer" + ], + "summary": "Cancels an ongoing installation.", + "operationId": "CancelInstallation", + "parameters": [ + { + "type": "string", + "format": "uuid", + "name": "cluster_id", + "in": "path", + "required": true + } + ], + "responses": { + "202": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/cluster" + } + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "409": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, + "/clusters/{cluster_id}/actions/complete_installation": { + "post": { + "tags": [ + "installer" + ], + "summary": "Agent API to mark a finalizing installation as complete.", + "operationId": "CompleteInstallation", + "parameters": [ + { + "type": "string", + "format": "uuid", + "name": "cluster_id", + "in": "path", + "required": true + }, + { + "name": "completion-params", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/completion-params" + } + } + ], + "responses": { + "202": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/cluster" + } + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "409": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, "/clusters/{cluster_id}/actions/install": { "post": { "tags": [ @@ -2282,6 +2878,50 @@ func init() { } } }, + "/clusters/{cluster_id}/actions/reset": { + "post": { + "tags": [ + "installer" + ], + "summary": "Resets a failed installation.", + "operationId": "ResetCluster", + "parameters": [ + { + "type": "string", + "format": "uuid", + "name": "cluster_id", + "in": "path", + "required": true + } + ], + "responses": { + "202": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/cluster" + } + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "409": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, "/clusters/{cluster_id}/credentials": { "get": { "tags": [ @@ -2351,7 +2991,9 @@ func init() { "metadata.json", "worker.ign", "kubeadmin-password", - "kubeconfig" + "kubeconfig", + "kubeconfig-noingress", + "install-config.yaml" ], "type": "string", "name": "file_name", @@ -2360,10 +3002,114 @@ func init() { } ], "responses": { - "200": { + "200": { + "description": "Success.", + "schema": { + "type": "file" + } + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "409": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, + "/clusters/{cluster_id}/downloads/image": { + "get": { + "produces": [ + "application/octet-stream" + ], + "tags": [ + "installer" + ], + "summary": "Downloads the OpenShift per-cluster discovery ISO.", + "operationId": "DownloadClusterISO", + "parameters": [ + { + "type": "string", + "format": "uuid", + "name": "cluster_id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Success.", + "schema": { + "type": "string", + "format": "binary" + } + }, + "400": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + }, + "post": { + "tags": [ + "installer" + ], + "summary": "Creates a new OpenShift per-cluster discovery ISO.", + "operationId": "GenerateClusterISO", + "parameters": [ + { + "type": "string", + "format": "uuid", + "name": "cluster_id", + "in": "path", + "required": true + }, + { + "name": "image-create-params", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/image-create-params" + } + } + ], + "responses": { + "201": { "description": "Success.", "schema": { - "type": "file" + "$ref": "#/definitions/cluster" + } + }, + "400": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" } }, "404": { @@ -2387,7 +3133,7 @@ func init() { } } }, - "/clusters/{cluster_id}/downloads/image": { + "/clusters/{cluster_id}/downloads/kubeconfig": { "get": { "produces": [ "application/octet-stream" @@ -2395,8 +3141,8 @@ func init() { "tags": [ "installer" ], - "summary": "Downloads the OpenShift per-cluster discovery ISO.", - "operationId": "DownloadClusterISO", + "summary": "Downloads the kubeconfig file for this cluster.", + "operationId": "DownloadClusterKubeconfig", "parameters": [ { "type": "string", @@ -2414,13 +3160,13 @@ func init() { "format": "binary" } }, - "400": { + "404": { "description": "Error.", "schema": { "$ref": "#/definitions/error" } }, - "404": { + "409": { "description": "Error.", "schema": { "$ref": "#/definitions/error" @@ -2433,13 +3179,15 @@ func init() { } } } - }, - "post": { + } + }, + "/clusters/{cluster_id}/free_addresses": { + "get": { "tags": [ "installer" ], - "summary": "Creates a new OpenShift per-cluster discovery ISO.", - "operationId": "GenerateClusterISO", + "summary": "Retrieves the free address list for a network.", + "operationId": "GetFreeAddresses", "parameters": [ { "type": "string", @@ -2449,25 +3197,31 @@ func init() { "required": true }, { - "name": "image-create-params", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/image-create-params" - } + "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$", + "type": "string", + "name": "network", + "in": "query", + "required": true + }, + { + "maximum": 8000, + "minimum": 1, + "type": "integer", + "default": 8000, + "name": "limit", + "in": "query" + }, + { + "type": "string", + "name": "prefix", + "in": "query" } ], "responses": { - "201": { - "description": "Success.", - "schema": { - "$ref": "#/definitions/cluster" - } - }, - "400": { - "description": "Error.", + "200": { + "description": "Success", "schema": { - "$ref": "#/definitions/error" + "$ref": "#/definitions/free-addresses-list" } }, "404": { @@ -2476,9 +3230,6 @@ func init() { "$ref": "#/definitions/error" } }, - "409": { - "description": "Error." - }, "500": { "description": "Error.", "schema": { @@ -2555,6 +3306,18 @@ func init() { "$ref": "#/definitions/error" } }, + "403": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, "500": { "description": "Error.", "schema": { @@ -2729,8 +3492,11 @@ func init() { } ], "responses": { - "204": { - "description": "Success." + "200": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/host" + } }, "404": { "description": "Error.", @@ -2775,8 +3541,11 @@ func init() { } ], "responses": { - "204": { - "description": "Success." + "200": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/host" + } }, "404": { "description": "Error.", @@ -2897,6 +3666,148 @@ func init() { } } }, + "/clusters/{cluster_id}/hosts/{host_id}/progress": { + "put": { + "tags": [ + "installer" + ], + "summary": "Update installation progress", + "operationId": "UpdateHostInstallProgress", + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "The ID of the cluster to retrieve", + "name": "cluster_id", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "uuid", + "description": "The ID of the host to retrieve", + "name": "host_id", + "in": "path", + "required": true + }, + { + "description": "New progress value", + "name": "host-progress", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/host-progress" + } + } + ], + "responses": { + "200": { + "description": "Update install progress" + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, + "/clusters/{cluster_id}/uploads/ingress-cert": { + "post": { + "tags": [ + "installer" + ], + "summary": "Transfer the ingress certificate for the cluster.", + "operationId": "UploadClusterIngressCert", + "parameters": [ + { + "type": "string", + "format": "uuid", + "name": "cluster_id", + "in": "path", + "required": true + }, + { + "name": "ingress-cert-params", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/ingress-cert-params" + } + } + ], + "responses": { + "201": { + "description": "Success." + }, + "400": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "404": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, + "/component_versions": { + "get": { + "tags": [ + "versions" + ], + "summary": "List of componenets versions", + "operationId": "ListComponentVersions", + "responses": { + "200": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/list-versions" + } + } + } + } + }, + "/domains": { + "get": { + "tags": [ + "managed_domains" + ], + "summary": "List of managed DNS domains", + "operationId": "ListManagedDomains", + "responses": { + "200": { + "description": "Success.", + "schema": { + "$ref": "#/definitions/list-managed-domains" + } + }, + "500": { + "description": "Error.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, "/events/{entity_id}": { "get": { "tags": [ @@ -2931,51 +3842,27 @@ func init() { } }, "definitions": { - "ClusterUpdateParamsHostsRolesItems0": { + "ClusterUpdateParamsHostsNamesItems0": { "type": "object", "properties": { + "hostname": { + "type": "string" + }, "id": { "type": "string", "format": "uuid" - }, - "role": { - "type": "string", - "enum": [ - "master", - "worker" - ] } } }, - "block-device": { + "ClusterUpdateParamsHostsRolesItems0": { "type": "object", "properties": { - "device_type": { - "type": "string" - }, - "fstype": { - "type": "string" - }, - "major_device_number": { - "type": "integer" - }, - "minor_device_number": { - "type": "integer" - }, - "mountpoint": { - "type": "string" - }, - "name": { - "type": "string" - }, - "read_only": { - "type": "boolean" - }, - "removable_device": { - "type": "integer" + "id": { + "type": "string", + "format": "uuid" }, - "size": { - "type": "integer" + "role": { + "$ref": "#/definitions/host-role-update-params" } } }, @@ -2990,17 +3877,6 @@ func init() { } } }, - "cidr": { - "type": "object", - "properties": { - "ip_address": { - "type": "string" - }, - "mask": { - "type": "integer" - } - } - }, "cluster": { "type": "object", "required": [ @@ -3015,7 +3891,7 @@ func init() { "api_vip": { "description": "Virtual IP used to reach the OpenShift cluster API.", "type": "string", - "format": "ipv4" + "pattern": "^(([0-9]{1,3}\\.){3}[0-9]{1,3})?$" }, "base_dns_domain": { "description": "Base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name.", @@ -3036,12 +3912,15 @@ func init() { "description": "The time that this cluster was created.", "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" }, - "dns_vip": { - "description": "Virtual IP used internally by the cluster for automating internal DNS requirements.", - "type": "string", - "format": "ipv4" + "host_networks": { + "description": "List of host networks to be filled during query.", + "type": "array", + "items": { + "$ref": "#/definitions/host_network" + }, + "x-go-custom-tag": "gorm:\"-\"" }, "hosts": { "description": "Hosts that are associated with this cluster.", @@ -3062,6 +3941,9 @@ func init() { "format": "uuid", "x-go-custom-tag": "gorm:\"primary_key\"" }, + "ignition_generator_version": { + "type": "string" + }, "image_info": { "x-go-custom-tag": "gorm:\"embedded;embedded_prefix:image_\"", "$ref": "#/definitions/image_info" @@ -3069,19 +3951,19 @@ func init() { "ingress_vip": { "description": "Virtual IP used for cluster ingress traffic.", "type": "string", - "format": "ipv4" + "pattern": "^(([0-9]{1,3}\\.){3}[0-9]{1,3})?$" }, "install_completed_at": { "description": "The time that this cluster completed installation.", "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime;default:0\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone;default:'2000-01-01 00:00:00z'\"" }, "install_started_at": { "description": "The time that this cluster began installation.", "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime;default:0\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone;default:'2000-01-01 00:00:00z'\"" }, "kind": { "description": "Indicates the type of this object. Will be 'Cluster' if this is a complete object or 'ClusterLink' if it is just a link.", @@ -3090,6 +3972,11 @@ func init() { "Cluster" ] }, + "machine_network_cidr": { + "description": "A CIDR that all hosts belonging to the cluster should have an interfaces with IP address that belongs to this CIDR. The api_vip belongs to this CIDR.", + "type": "string", + "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$" + }, "name": { "description": "Name of the OpenShift cluster.", "type": "string" @@ -3098,15 +3985,16 @@ func init() { "description": "Version of the OpenShift cluster.", "type": "string", "enum": [ - "4.4", "4.5" ] }, - "pull_secret": { - "description": "The pull secret that obtained from the Pull Secret page on the Red Hat OpenShift Cluster Manager site.", - "type": "string", - "x-go-custom-tag": "gorm:\"type:varchar(4096)\"" - }, + "org_id": { + "type": "string" + }, + "pull_secret_set": { + "description": "True if the pull-secret has been added to the cluster", + "type": "boolean" + }, "service_network_cidr": { "description": "The IP address pool to use for service IP addresses. You can enter only one IP address pool. If you need to access the services from an external network, configure load balancers and routers to manage the traffic.", "type": "string", @@ -3124,7 +4012,9 @@ func init() { "insufficient", "ready", "error", + "preparing-for-installation", "installing", + "finalizing", "installed" ] }, @@ -3133,11 +4023,20 @@ func init() { "type": "string", "x-go-custom-tag": "gorm:\"type:varchar(2048)\"" }, + "status_updated_at": { + "description": "The last time that the cluster status has been updated", + "type": "string", + "format": "date-time", + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, "updated_at": { "description": "The last time that this cluster was updated.", "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, + "user_id": { + "type": "string" } } }, @@ -3148,11 +4047,6 @@ func init() { "openshift_version" ], "properties": { - "api_vip": { - "description": "Virtual IP used to reach the OpenShift cluster API.", - "type": "string", - "format": "ipv4" - }, "base_dns_domain": { "description": "Base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name.", "type": "string" @@ -3160,23 +4054,20 @@ func init() { "cluster_network_cidr": { "description": "IP address block from which Pod IPs are allocated This block must not overlap with existing physical networks. These IP addresses are used for the Pod network, and if you need to access the Pods from an external network, configure load balancers and routers to manage the traffic.", "type": "string", + "default": "10.128.0.0/14", "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$" }, "cluster_network_host_prefix": { "description": "The subnet prefix length to assign to each individual node. For example, if clusterNetworkHostPrefix is set to 23, then each node is assigned a /23 subnet out of the given cidr (clusterNetworkCIDR), which allows for 510 (2^(32 - 23) - 2) pod IPs addresses. If you are required to provide access to nodes from an external network, configure load balancers and routers to manage the traffic.", "type": "integer", + "default": 23, "maximum": 32, "minimum": 1 }, - "dns_vip": { - "description": "Virtual IP used internally by the cluster for automating internal DNS requirements.", - "type": "string", - "format": "ipv4" - }, "ingress_vip": { "description": "Virtual IP used for cluster ingress traffic.", "type": "string", - "format": "ipv4" + "pattern": "^(([0-9]{1,3}\\.){3}[0-9]{1,3})?$" }, "name": { "description": "Name of the OpenShift cluster.", @@ -3186,7 +4077,6 @@ func init() { "description": "Version of the OpenShift cluster.", "type": "string", "enum": [ - "4.4", "4.5" ] }, @@ -3197,6 +4087,7 @@ func init() { "service_network_cidr": { "description": "The IP address pool to use for service IP addresses. You can enter only one IP address pool. If you need to access the services from an external network, configure load balancers and routers to manage the traffic.", "type": "string", + "default": "172.30.0.0/16", "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$" }, "ssh_public_key": { @@ -3217,27 +4108,35 @@ func init() { "api_vip": { "description": "Virtual IP used to reach the OpenShift cluster API.", "type": "string", - "format": "ipv4" + "pattern": "^(([0-9]{1,3}\\.){3}[0-9]{1,3})?$", + "x-nullable": true }, "base_dns_domain": { "description": "Base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name.", - "type": "string" + "type": "string", + "x-nullable": true }, "cluster_network_cidr": { "description": "IP address block from which Pod IPs are allocated This block must not overlap with existing physical networks. These IP addresses are used for the Pod network, and if you need to access the Pods from an external network, configure load balancers and routers to manage the traffic.", "type": "string", - "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$" + "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$", + "x-nullable": true }, "cluster_network_host_prefix": { "description": "The subnet prefix length to assign to each individual node. For example, if clusterNetworkHostPrefix is set to 23, then each node is assigned a /23 subnet out of the given cidr (clusterNetworkCIDR), which allows for 510 (2^(32 - 23) - 2) pod IPs addresses. If you are required to provide access to nodes from an external network, configure load balancers and routers to manage the traffic.", "type": "integer", "maximum": 32, - "minimum": 1 + "minimum": 1, + "x-nullable": true }, - "dns_vip": { - "description": "Virtual IP used internally by the cluster for automating internal DNS requirements.", - "type": "string", - "format": "ipv4" + "hosts_names": { + "description": "The desired hostname for hosts associated with the cluster.", + "type": "array", + "items": { + "$ref": "#/definitions/ClusterUpdateParamsHostsNamesItems0" + }, + "x-go-custom-tag": "gorm:\"type:varchar(64)[]\"", + "x-nullable": true }, "hosts_roles": { "description": "The desired role for hosts associated with the cluster.", @@ -3245,29 +4144,49 @@ func init() { "items": { "$ref": "#/definitions/ClusterUpdateParamsHostsRolesItems0" }, - "x-go-custom-tag": "gorm:\"type:varchar(64)[]\"" + "x-go-custom-tag": "gorm:\"type:varchar(64)[]\"", + "x-nullable": true }, "ingress_vip": { "description": "Virtual IP used for cluster ingress traffic.", "type": "string", - "format": "ipv4" + "pattern": "^(([0-9]{1,3}\\.){3}[0-9]{1,3})?$", + "x-nullable": true }, "name": { "description": "OpenShift cluster name", - "type": "string" + "type": "string", + "x-nullable": true }, "pull_secret": { "description": "The pull secret that obtained from the Pull Secret page on the Red Hat OpenShift Cluster Manager site.", - "type": "string" + "type": "string", + "x-nullable": true }, "service_network_cidr": { "description": "The IP address pool to use for service IP addresses. You can enter only one IP address pool. If you need to access the services from an external network, configure load balancers and routers to manage the traffic.", "type": "string", - "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$" + "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$", + "x-nullable": true }, "ssh_public_key": { "description": "SSH public key for debugging OpenShift nodes.", + "type": "string", + "x-nullable": true + } + } + }, + "completion-params": { + "type": "object", + "required": [ + "is_success" + ], + "properties": { + "error_info": { "type": "string" + }, + "is_success": { + "type": "boolean" } } }, @@ -3364,32 +4283,12 @@ func init() { } } }, - "cpu_details": { + "credentials": { "type": "object", "properties": { - "architecture": { + "console_url": { "type": "string" }, - "cpu_mhz": { - "type": "number" - }, - "cpus": { - "type": "integer" - }, - "model_name": { - "type": "string" - }, - "sockets": { - "type": "integer" - }, - "threads_per_core": { - "type": "integer" - } - } - }, - "credentials": { - "type": "object", - "properties": { "password": { "type": "string" }, @@ -3486,6 +4385,7 @@ func init() { "type": "object", "required": [ "entity_id", + "severity", "message", "event_time" ], @@ -3499,15 +4399,25 @@ func init() { "event_time": { "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" }, "message": { - "type": "string" + "type": "string", + "x-go-custom-tag": "gorm:\"type:varchar(4096)\"" }, "request_id": { "description": "Unique identifier for the request that caused this event to occure", "type": "string", "format": "uuid" + }, + "severity": { + "type": "string", + "enum": [ + "info", + "warning", + "error", + "critical" + ] } } }, @@ -3517,6 +4427,42 @@ func init() { "$ref": "#/definitions/event" } }, + "free-addresses-list": { + "type": "array", + "items": { + "type": "string", + "format": "ipv4" + } + }, + "free_addresses_request": { + "type": "array", + "items": { + "type": "string", + "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$" + } + }, + "free_network_addresses": { + "type": "object", + "properties": { + "free_addresses": { + "type": "array", + "items": { + "type": "string", + "format": "ipv4" + } + }, + "network": { + "type": "string", + "pattern": "^([0-9]{1,3}\\.){3}[0-9]{1,3}\\/[0-9]|[1-2][0-9]|3[0-2]?$" + } + } + }, + "free_networks_addresses": { + "type": "array", + "items": { + "$ref": "#/definitions/free_network_addresses" + } + }, "host": { "type": "object", "required": [ @@ -3530,6 +4476,12 @@ func init() { "bootstrap": { "type": "boolean" }, + "checked_in_at": { + "description": "The last time the host's agent communicated with the service.", + "type": "string", + "format": "date-time", + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, "cluster_id": { "description": "The cluster that this host is associated with.", "type": "string", @@ -3543,9 +4495,12 @@ func init() { "created_at": { "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, + "discovery_agent_version": { + "type": "string" }, - "hardware_info": { + "free_addresses": { "type": "string", "x-go-custom-tag": "gorm:\"type:text\"" }, @@ -3559,6 +4514,10 @@ func init() { "format": "uuid", "x-go-custom-tag": "gorm:\"primary_key\"" }, + "installer_version": { + "description": "Installer version", + "type": "string" + }, "inventory": { "type": "string", "x-go-custom-tag": "gorm:\"type:text\"" @@ -3570,13 +4529,34 @@ func init() { "Host" ] }, + "progress": { + "x-go-custom-tag": "gorm:\"embedded;embedded_prefix:progress_\"", + "$ref": "#/definitions/host-progress-info" + }, + "progress_stages": { + "type": "array", + "items": { + "$ref": "#/definitions/host-stage" + }, + "x-go-custom-tag": "gorm:\"-\"" + }, + "requested_hostname": { + "type": "string" + }, "role": { + "$ref": "#/definitions/host-role" + }, + "stage_started_at": { + "description": "Time at which the current progress stage started", "type": "string", - "enum": [ - "undefined", - "master", - "worker" - ] + "format": "date-time", + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, + "stage_updated_at": { + "description": "Time at which the current progress stage was last updated", + "type": "string", + "format": "date-time", + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" }, "status": { "type": "string", @@ -3586,20 +4566,36 @@ func init() { "disconnected", "insufficient", "disabled", + "preparing-for-installation", + "pending-for-input", "installing", "installing-in-progress", + "installing-pending-user-action", + "resetting-pending-user-action", "installed", - "error" + "error", + "resetting" ] }, "status_info": { "type": "string", "x-go-custom-tag": "gorm:\"type:varchar(2048)\"" }, + "status_updated_at": { + "description": "The last time that the host status has been updated", + "type": "string", + "format": "date-time", + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, "updated_at": { "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, + "validations_info": { + "description": "Json formatted string containing the validations results for each validation id grouped by category (network, hardware, etc.)", + "type": "string", + "x-go-custom-tag": "gorm:\"type:varchar(2048)\"" } } }, @@ -3609,21 +4605,128 @@ func init() { "host_id" ], "properties": { + "discovery_agent_version": { + "type": "string" + }, "host_id": { "type": "string", "format": "uuid" } } }, - "host-install-progress-params": { - "type": "string" - }, "host-list": { "type": "array", "items": { "$ref": "#/definitions/host" } }, + "host-progress": { + "type": "object", + "required": [ + "current_stage" + ], + "properties": { + "current_stage": { + "type": "string", + "$ref": "#/definitions/host-stage" + }, + "progress_info": { + "type": "string", + "x-go-custom-tag": "gorm:\"type:varchar(2048)\"" + } + } + }, + "host-progress-info": { + "type": "object", + "required": [ + "current_stage" + ], + "properties": { + "current_stage": { + "type": "string", + "$ref": "#/definitions/host-stage" + }, + "progress_info": { + "type": "string", + "x-go-custom-tag": "gorm:\"type:varchar(2048)\"" + }, + "stage_started_at": { + "description": "Time at which the current progress stage started", + "type": "string", + "format": "date-time", + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, + "stage_updated_at": { + "description": "Time at which the current progress stage was last updated", + "type": "string", + "format": "date-time", + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + } + } + }, + "host-role": { + "type": "string", + "enum": [ + "master", + "worker", + "bootstrap" + ] + }, + "host-role-update-params": { + "type": "string", + "enum": [ + "master", + "worker" + ] + }, + "host-stage": { + "type": "string", + "enum": [ + "Starting installation", + "Waiting for control plane", + "Start Waiting for control plane", + "Installing", + "Writing image to disk", + "Rebooting", + "Waiting for ignition", + "Configuring", + "Joined", + "Done", + "Failed" + ] + }, + "host-validation-id": { + "type": "string", + "enum": [ + "connected", + "has-inventory", + "has-min-cpu-cores", + "has-min-valid-disks", + "has-min-memory", + "machine-cidr-defined", + "role-defined", + "has-cpu-cores-for-role", + "has-memory-for-role", + "hostname-unique", + "hostname-valid", + "belongs-to-machine-cidr" + ] + }, + "host_network": { + "type": "object", + "properties": { + "cidr": { + "type": "string" + }, + "host_ids": { + "type": "array", + "items": { + "type": "string", + "format": "uuid" + } + } + } + }, "image-create-params": { "type": "object", "properties": { @@ -3643,7 +4746,11 @@ func init() { "created_at": { "type": "string", "format": "date-time", - "x-go-custom-tag": "gorm:\"type:datetime\"" + "x-go-custom-tag": "gorm:\"type:timestamp with time zone\"" + }, + "generator_version": { + "description": "Image generator version", + "type": "string" }, "proxy_url": { "description": "The URL of the HTTP/S proxy that agents should use to access the discovery service\nhttp://\\\u003cuser\\\u003e:\\\u003cpassword\\\u003e@\\\u003cserver\\\u003e:\\\u003cport\\\u003e/\n", @@ -3656,6 +4763,9 @@ func init() { } } }, + "ingress-cert-params": { + "type": "string" + }, "interface": { "type": "object", "properties": { @@ -3706,32 +4816,6 @@ func init() { } } }, - "introspection": { - "type": "object", - "properties": { - "block_devices": { - "type": "array", - "items": { - "$ref": "#/definitions/block-device" - } - }, - "cpu": { - "$ref": "#/definitions/cpu_details" - }, - "memory": { - "type": "array", - "items": { - "$ref": "#/definitions/memory_details" - } - }, - "nics": { - "type": "array", - "items": { - "$ref": "#/definitions/nic" - } - } - } - }, "inventory": { "type": "object", "properties": { @@ -3804,63 +4888,45 @@ func init() { } } }, - "memory": { + "list-managed-domains": { + "type": "array", + "items": { + "$ref": "#/definitions/managed-domain" + } + }, + "list-versions": { "type": "object", "properties": { - "physical_bytes": { - "type": "integer" + "release_tag": { + "type": "string" }, - "usable_bytes": { - "type": "integer" + "versions": { + "$ref": "#/definitions/versions" } } }, - "memory_details": { + "managed-domain": { "type": "object", "properties": { - "available": { - "type": "integer" - }, - "buff_cached": { - "type": "integer" - }, - "free": { - "type": "integer" - }, - "name": { + "domain": { "type": "string" }, - "shared": { - "type": "integer" - }, - "total": { - "type": "integer" - }, - "used": { - "type": "integer" + "provider": { + "type": "string", + "enum": [ + "route53" + ] } } }, - "nic": { + "memory": { "type": "object", "properties": { - "cidrs": { - "type": "array", - "items": { - "$ref": "#/definitions/cidr" - } - }, - "mac": { - "type": "string" - }, - "mtu": { + "physical_bytes": { "type": "integer" }, - "name": { - "type": "string" - }, - "state": { - "type": "string" + "usable_bytes": { + "type": "integer" } } }, @@ -3898,22 +4964,35 @@ func init() { }, "step_id": { "type": "string" + }, + "step_type": { + "$ref": "#/definitions/step-type" } } }, "step-type": { "type": "string", "enum": [ - "hardware-info", "connectivity-check", "execute", - "inventory" + "inventory", + "install", + "free-network-addresses", + "reset-installation" ] }, "steps": { - "type": "array", - "items": { - "$ref": "#/definitions/step" + "type": "object", + "properties": { + "instructions": { + "type": "array", + "items": { + "$ref": "#/definitions/step" + } + }, + "next_instruction_seconds": { + "type": "integer" + } } }, "steps-reply": { @@ -3935,6 +5014,12 @@ func init() { "type": "string" } } + }, + "versions": { + "type": "object", + "additionalProperties": { + "type": "string" + } } }, "tags": [ diff --git a/restapi/mock_EventsAPI.go b/restapi/mock_EventsAPI.go deleted file mode 100644 index 030890b30..000000000 --- a/restapi/mock_EventsAPI.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package restapi - -import ( - context "context" - - events "github.com/filanov/bm-inventory/restapi/operations/events" - middleware "github.com/go-openapi/runtime/middleware" - - mock "github.com/stretchr/testify/mock" -) - -// MockEventsAPI is an autogenerated mock type for the EventsAPI type -type MockEventsAPI struct { - mock.Mock -} - -// ListEvents provides a mock function with given fields: ctx, params -func (_m *MockEventsAPI) ListEvents(ctx context.Context, params events.ListEventsParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, events.ListEventsParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} diff --git a/restapi/mock_InstallerAPI.go b/restapi/mock_InstallerAPI.go deleted file mode 100644 index b73c753dd..000000000 --- a/restapi/mock_InstallerAPI.go +++ /dev/null @@ -1,337 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package restapi - -import ( - context "context" - - installer "github.com/filanov/bm-inventory/restapi/operations/installer" - middleware "github.com/go-openapi/runtime/middleware" - - mock "github.com/stretchr/testify/mock" -) - -// MockInstallerAPI is an autogenerated mock type for the InstallerAPI type -type MockInstallerAPI struct { - mock.Mock -} - -// DeregisterCluster provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) DeregisterCluster(ctx context.Context, params installer.DeregisterClusterParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.DeregisterClusterParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// DeregisterHost provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) DeregisterHost(ctx context.Context, params installer.DeregisterHostParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.DeregisterHostParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// DisableHost provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) DisableHost(ctx context.Context, params installer.DisableHostParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.DisableHostParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// DownloadClusterFiles provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) DownloadClusterFiles(ctx context.Context, params installer.DownloadClusterFilesParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.DownloadClusterFilesParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// DownloadClusterISO provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) DownloadClusterISO(ctx context.Context, params installer.DownloadClusterISOParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.DownloadClusterISOParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// EnableHost provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) EnableHost(ctx context.Context, params installer.EnableHostParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.EnableHostParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// GenerateClusterISO provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) GenerateClusterISO(ctx context.Context, params installer.GenerateClusterISOParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.GenerateClusterISOParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// GetCluster provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) GetCluster(ctx context.Context, params installer.GetClusterParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.GetClusterParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// GetCredentials provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) GetCredentials(ctx context.Context, params installer.GetCredentialsParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.GetCredentialsParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// GetHost provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) GetHost(ctx context.Context, params installer.GetHostParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.GetHostParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// GetNextSteps provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) GetNextSteps(ctx context.Context, params installer.GetNextStepsParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.GetNextStepsParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// InstallCluster provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) InstallCluster(ctx context.Context, params installer.InstallClusterParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.InstallClusterParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// ListClusters provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) ListClusters(ctx context.Context, params installer.ListClustersParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.ListClustersParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// ListHosts provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) ListHosts(ctx context.Context, params installer.ListHostsParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.ListHostsParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// PostStepReply provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) PostStepReply(ctx context.Context, params installer.PostStepReplyParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.PostStepReplyParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// RegisterCluster provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) RegisterCluster(ctx context.Context, params installer.RegisterClusterParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.RegisterClusterParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// RegisterHost provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) RegisterHost(ctx context.Context, params installer.RegisterHostParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.RegisterHostParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// SetDebugStep provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) SetDebugStep(ctx context.Context, params installer.SetDebugStepParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.SetDebugStepParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// UpdateCluster provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) UpdateCluster(ctx context.Context, params installer.UpdateClusterParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.UpdateClusterParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} - -// UpdateHostInstallProgress provides a mock function with given fields: ctx, params -func (_m *MockInstallerAPI) UpdateHostInstallProgress(ctx context.Context, params installer.UpdateHostInstallProgressParams) middleware.Responder { - ret := _m.Called(ctx, params) - - var r0 middleware.Responder - if rf, ok := ret.Get(0).(func(context.Context, installer.UpdateHostInstallProgressParams) middleware.Responder); ok { - r0 = rf(ctx, params) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(middleware.Responder) - } - } - - return r0 -} diff --git a/restapi/operations/assisted_install_api.go b/restapi/operations/assisted_install_api.go index 34cc27b7c..007694242 100644 --- a/restapi/operations/assisted_install_api.go +++ b/restapi/operations/assisted_install_api.go @@ -21,6 +21,8 @@ import ( "github.com/filanov/bm-inventory/restapi/operations/events" "github.com/filanov/bm-inventory/restapi/operations/installer" + "github.com/filanov/bm-inventory/restapi/operations/managed_domains" + "github.com/filanov/bm-inventory/restapi/operations/versions" ) // NewAssistedInstallAPI creates a new AssistedInstall instance @@ -45,6 +47,12 @@ func NewAssistedInstallAPI(spec *loads.Document) *AssistedInstallAPI { BinProducer: runtime.ByteStreamProducer(), JSONProducer: runtime.JSONProducer(), + InstallerCancelInstallationHandler: installer.CancelInstallationHandlerFunc(func(params installer.CancelInstallationParams) middleware.Responder { + return middleware.NotImplemented("operation installer.CancelInstallation has not yet been implemented") + }), + InstallerCompleteInstallationHandler: installer.CompleteInstallationHandlerFunc(func(params installer.CompleteInstallationParams) middleware.Responder { + return middleware.NotImplemented("operation installer.CompleteInstallation has not yet been implemented") + }), InstallerDeregisterClusterHandler: installer.DeregisterClusterHandlerFunc(func(params installer.DeregisterClusterParams) middleware.Responder { return middleware.NotImplemented("operation installer.DeregisterCluster has not yet been implemented") }), @@ -60,6 +68,9 @@ func NewAssistedInstallAPI(spec *loads.Document) *AssistedInstallAPI { InstallerDownloadClusterISOHandler: installer.DownloadClusterISOHandlerFunc(func(params installer.DownloadClusterISOParams) middleware.Responder { return middleware.NotImplemented("operation installer.DownloadClusterISO has not yet been implemented") }), + InstallerDownloadClusterKubeconfigHandler: installer.DownloadClusterKubeconfigHandlerFunc(func(params installer.DownloadClusterKubeconfigParams) middleware.Responder { + return middleware.NotImplemented("operation installer.DownloadClusterKubeconfig has not yet been implemented") + }), InstallerEnableHostHandler: installer.EnableHostHandlerFunc(func(params installer.EnableHostParams) middleware.Responder { return middleware.NotImplemented("operation installer.EnableHost has not yet been implemented") }), @@ -72,6 +83,9 @@ func NewAssistedInstallAPI(spec *loads.Document) *AssistedInstallAPI { InstallerGetCredentialsHandler: installer.GetCredentialsHandlerFunc(func(params installer.GetCredentialsParams) middleware.Responder { return middleware.NotImplemented("operation installer.GetCredentials has not yet been implemented") }), + InstallerGetFreeAddressesHandler: installer.GetFreeAddressesHandlerFunc(func(params installer.GetFreeAddressesParams) middleware.Responder { + return middleware.NotImplemented("operation installer.GetFreeAddresses has not yet been implemented") + }), InstallerGetHostHandler: installer.GetHostHandlerFunc(func(params installer.GetHostParams) middleware.Responder { return middleware.NotImplemented("operation installer.GetHost has not yet been implemented") }), @@ -84,12 +98,18 @@ func NewAssistedInstallAPI(spec *loads.Document) *AssistedInstallAPI { InstallerListClustersHandler: installer.ListClustersHandlerFunc(func(params installer.ListClustersParams) middleware.Responder { return middleware.NotImplemented("operation installer.ListClusters has not yet been implemented") }), + VersionsListComponentVersionsHandler: versions.ListComponentVersionsHandlerFunc(func(params versions.ListComponentVersionsParams) middleware.Responder { + return middleware.NotImplemented("operation versions.ListComponentVersions has not yet been implemented") + }), EventsListEventsHandler: events.ListEventsHandlerFunc(func(params events.ListEventsParams) middleware.Responder { return middleware.NotImplemented("operation events.ListEvents has not yet been implemented") }), InstallerListHostsHandler: installer.ListHostsHandlerFunc(func(params installer.ListHostsParams) middleware.Responder { return middleware.NotImplemented("operation installer.ListHosts has not yet been implemented") }), + ManagedDomainsListManagedDomainsHandler: managed_domains.ListManagedDomainsHandlerFunc(func(params managed_domains.ListManagedDomainsParams) middleware.Responder { + return middleware.NotImplemented("operation managed_domains.ListManagedDomains has not yet been implemented") + }), InstallerPostStepReplyHandler: installer.PostStepReplyHandlerFunc(func(params installer.PostStepReplyParams) middleware.Responder { return middleware.NotImplemented("operation installer.PostStepReply has not yet been implemented") }), @@ -99,6 +119,9 @@ func NewAssistedInstallAPI(spec *loads.Document) *AssistedInstallAPI { InstallerRegisterHostHandler: installer.RegisterHostHandlerFunc(func(params installer.RegisterHostParams) middleware.Responder { return middleware.NotImplemented("operation installer.RegisterHost has not yet been implemented") }), + InstallerResetClusterHandler: installer.ResetClusterHandlerFunc(func(params installer.ResetClusterParams) middleware.Responder { + return middleware.NotImplemented("operation installer.ResetCluster has not yet been implemented") + }), InstallerSetDebugStepHandler: installer.SetDebugStepHandlerFunc(func(params installer.SetDebugStepParams) middleware.Responder { return middleware.NotImplemented("operation installer.SetDebugStep has not yet been implemented") }), @@ -108,6 +131,9 @@ func NewAssistedInstallAPI(spec *loads.Document) *AssistedInstallAPI { InstallerUpdateHostInstallProgressHandler: installer.UpdateHostInstallProgressHandlerFunc(func(params installer.UpdateHostInstallProgressParams) middleware.Responder { return middleware.NotImplemented("operation installer.UpdateHostInstallProgress has not yet been implemented") }), + InstallerUploadClusterIngressCertHandler: installer.UploadClusterIngressCertHandlerFunc(func(params installer.UploadClusterIngressCertParams) middleware.Responder { + return middleware.NotImplemented("operation installer.UploadClusterIngressCert has not yet been implemented") + }), } } @@ -144,6 +170,10 @@ type AssistedInstallAPI struct { // - application/json JSONProducer runtime.Producer + // InstallerCancelInstallationHandler sets the operation handler for the cancel installation operation + InstallerCancelInstallationHandler installer.CancelInstallationHandler + // InstallerCompleteInstallationHandler sets the operation handler for the complete installation operation + InstallerCompleteInstallationHandler installer.CompleteInstallationHandler // InstallerDeregisterClusterHandler sets the operation handler for the deregister cluster operation InstallerDeregisterClusterHandler installer.DeregisterClusterHandler // InstallerDeregisterHostHandler sets the operation handler for the deregister host operation @@ -154,6 +184,8 @@ type AssistedInstallAPI struct { InstallerDownloadClusterFilesHandler installer.DownloadClusterFilesHandler // InstallerDownloadClusterISOHandler sets the operation handler for the download cluster i s o operation InstallerDownloadClusterISOHandler installer.DownloadClusterISOHandler + // InstallerDownloadClusterKubeconfigHandler sets the operation handler for the download cluster kubeconfig operation + InstallerDownloadClusterKubeconfigHandler installer.DownloadClusterKubeconfigHandler // InstallerEnableHostHandler sets the operation handler for the enable host operation InstallerEnableHostHandler installer.EnableHostHandler // InstallerGenerateClusterISOHandler sets the operation handler for the generate cluster i s o operation @@ -162,6 +194,8 @@ type AssistedInstallAPI struct { InstallerGetClusterHandler installer.GetClusterHandler // InstallerGetCredentialsHandler sets the operation handler for the get credentials operation InstallerGetCredentialsHandler installer.GetCredentialsHandler + // InstallerGetFreeAddressesHandler sets the operation handler for the get free addresses operation + InstallerGetFreeAddressesHandler installer.GetFreeAddressesHandler // InstallerGetHostHandler sets the operation handler for the get host operation InstallerGetHostHandler installer.GetHostHandler // InstallerGetNextStepsHandler sets the operation handler for the get next steps operation @@ -170,22 +204,30 @@ type AssistedInstallAPI struct { InstallerInstallClusterHandler installer.InstallClusterHandler // InstallerListClustersHandler sets the operation handler for the list clusters operation InstallerListClustersHandler installer.ListClustersHandler + // VersionsListComponentVersionsHandler sets the operation handler for the list component versions operation + VersionsListComponentVersionsHandler versions.ListComponentVersionsHandler // EventsListEventsHandler sets the operation handler for the list events operation EventsListEventsHandler events.ListEventsHandler // InstallerListHostsHandler sets the operation handler for the list hosts operation InstallerListHostsHandler installer.ListHostsHandler + // ManagedDomainsListManagedDomainsHandler sets the operation handler for the list managed domains operation + ManagedDomainsListManagedDomainsHandler managed_domains.ListManagedDomainsHandler // InstallerPostStepReplyHandler sets the operation handler for the post step reply operation InstallerPostStepReplyHandler installer.PostStepReplyHandler // InstallerRegisterClusterHandler sets the operation handler for the register cluster operation InstallerRegisterClusterHandler installer.RegisterClusterHandler // InstallerRegisterHostHandler sets the operation handler for the register host operation InstallerRegisterHostHandler installer.RegisterHostHandler + // InstallerResetClusterHandler sets the operation handler for the reset cluster operation + InstallerResetClusterHandler installer.ResetClusterHandler // InstallerSetDebugStepHandler sets the operation handler for the set debug step operation InstallerSetDebugStepHandler installer.SetDebugStepHandler // InstallerUpdateClusterHandler sets the operation handler for the update cluster operation InstallerUpdateClusterHandler installer.UpdateClusterHandler // InstallerUpdateHostInstallProgressHandler sets the operation handler for the update host install progress operation InstallerUpdateHostInstallProgressHandler installer.UpdateHostInstallProgressHandler + // InstallerUploadClusterIngressCertHandler sets the operation handler for the upload cluster ingress cert operation + InstallerUploadClusterIngressCertHandler installer.UploadClusterIngressCertHandler // ServeError is called when an error is received, there is a default handler // but you can set your own with this ServeError func(http.ResponseWriter, *http.Request, error) @@ -255,6 +297,12 @@ func (o *AssistedInstallAPI) Validate() error { unregistered = append(unregistered, "JSONProducer") } + if o.InstallerCancelInstallationHandler == nil { + unregistered = append(unregistered, "installer.CancelInstallationHandler") + } + if o.InstallerCompleteInstallationHandler == nil { + unregistered = append(unregistered, "installer.CompleteInstallationHandler") + } if o.InstallerDeregisterClusterHandler == nil { unregistered = append(unregistered, "installer.DeregisterClusterHandler") } @@ -270,6 +318,9 @@ func (o *AssistedInstallAPI) Validate() error { if o.InstallerDownloadClusterISOHandler == nil { unregistered = append(unregistered, "installer.DownloadClusterISOHandler") } + if o.InstallerDownloadClusterKubeconfigHandler == nil { + unregistered = append(unregistered, "installer.DownloadClusterKubeconfigHandler") + } if o.InstallerEnableHostHandler == nil { unregistered = append(unregistered, "installer.EnableHostHandler") } @@ -282,6 +333,9 @@ func (o *AssistedInstallAPI) Validate() error { if o.InstallerGetCredentialsHandler == nil { unregistered = append(unregistered, "installer.GetCredentialsHandler") } + if o.InstallerGetFreeAddressesHandler == nil { + unregistered = append(unregistered, "installer.GetFreeAddressesHandler") + } if o.InstallerGetHostHandler == nil { unregistered = append(unregistered, "installer.GetHostHandler") } @@ -294,12 +348,18 @@ func (o *AssistedInstallAPI) Validate() error { if o.InstallerListClustersHandler == nil { unregistered = append(unregistered, "installer.ListClustersHandler") } + if o.VersionsListComponentVersionsHandler == nil { + unregistered = append(unregistered, "versions.ListComponentVersionsHandler") + } if o.EventsListEventsHandler == nil { unregistered = append(unregistered, "events.ListEventsHandler") } if o.InstallerListHostsHandler == nil { unregistered = append(unregistered, "installer.ListHostsHandler") } + if o.ManagedDomainsListManagedDomainsHandler == nil { + unregistered = append(unregistered, "managed_domains.ListManagedDomainsHandler") + } if o.InstallerPostStepReplyHandler == nil { unregistered = append(unregistered, "installer.PostStepReplyHandler") } @@ -309,6 +369,9 @@ func (o *AssistedInstallAPI) Validate() error { if o.InstallerRegisterHostHandler == nil { unregistered = append(unregistered, "installer.RegisterHostHandler") } + if o.InstallerResetClusterHandler == nil { + unregistered = append(unregistered, "installer.ResetClusterHandler") + } if o.InstallerSetDebugStepHandler == nil { unregistered = append(unregistered, "installer.SetDebugStepHandler") } @@ -318,6 +381,9 @@ func (o *AssistedInstallAPI) Validate() error { if o.InstallerUpdateHostInstallProgressHandler == nil { unregistered = append(unregistered, "installer.UpdateHostInstallProgressHandler") } + if o.InstallerUploadClusterIngressCertHandler == nil { + unregistered = append(unregistered, "installer.UploadClusterIngressCertHandler") + } if len(unregistered) > 0 { return fmt.Errorf("missing registration: %s", strings.Join(unregistered, ", ")) @@ -408,6 +474,14 @@ func (o *AssistedInstallAPI) initHandlerCache() { o.handlers = make(map[string]map[string]http.Handler) } + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/clusters/{cluster_id}/actions/cancel"] = installer.NewCancelInstallation(o.context, o.InstallerCancelInstallationHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/clusters/{cluster_id}/actions/complete_installation"] = installer.NewCompleteInstallation(o.context, o.InstallerCompleteInstallationHandler) if o.handlers["DELETE"] == nil { o.handlers["DELETE"] = make(map[string]http.Handler) } @@ -428,6 +502,10 @@ func (o *AssistedInstallAPI) initHandlerCache() { o.handlers["GET"] = make(map[string]http.Handler) } o.handlers["GET"]["/clusters/{cluster_id}/downloads/image"] = installer.NewDownloadClusterISO(o.context, o.InstallerDownloadClusterISOHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/clusters/{cluster_id}/downloads/kubeconfig"] = installer.NewDownloadClusterKubeconfig(o.context, o.InstallerDownloadClusterKubeconfigHandler) if o.handlers["POST"] == nil { o.handlers["POST"] = make(map[string]http.Handler) } @@ -447,6 +525,10 @@ func (o *AssistedInstallAPI) initHandlerCache() { if o.handlers["GET"] == nil { o.handlers["GET"] = make(map[string]http.Handler) } + o.handlers["GET"]["/clusters/{cluster_id}/free_addresses"] = installer.NewGetFreeAddresses(o.context, o.InstallerGetFreeAddressesHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } o.handlers["GET"]["/clusters/{cluster_id}/hosts/{host_id}"] = installer.NewGetHost(o.context, o.InstallerGetHostHandler) if o.handlers["GET"] == nil { o.handlers["GET"] = make(map[string]http.Handler) @@ -463,11 +545,19 @@ func (o *AssistedInstallAPI) initHandlerCache() { if o.handlers["GET"] == nil { o.handlers["GET"] = make(map[string]http.Handler) } + o.handlers["GET"]["/component_versions"] = versions.NewListComponentVersions(o.context, o.VersionsListComponentVersionsHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } o.handlers["GET"]["/events/{entity_id}"] = events.NewListEvents(o.context, o.EventsListEventsHandler) if o.handlers["GET"] == nil { o.handlers["GET"] = make(map[string]http.Handler) } o.handlers["GET"]["/clusters/{cluster_id}/hosts"] = installer.NewListHosts(o.context, o.InstallerListHostsHandler) + if o.handlers["GET"] == nil { + o.handlers["GET"] = make(map[string]http.Handler) + } + o.handlers["GET"]["/domains"] = managed_domains.NewListManagedDomains(o.context, o.ManagedDomainsListManagedDomainsHandler) if o.handlers["POST"] == nil { o.handlers["POST"] = make(map[string]http.Handler) } @@ -483,6 +573,10 @@ func (o *AssistedInstallAPI) initHandlerCache() { if o.handlers["POST"] == nil { o.handlers["POST"] = make(map[string]http.Handler) } + o.handlers["POST"]["/clusters/{cluster_id}/actions/reset"] = installer.NewResetCluster(o.context, o.InstallerResetClusterHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } o.handlers["POST"]["/clusters/{cluster_id}/hosts/{host_id}/actions/debug"] = installer.NewSetDebugStep(o.context, o.InstallerSetDebugStepHandler) if o.handlers["PATCH"] == nil { o.handlers["PATCH"] = make(map[string]http.Handler) @@ -491,7 +585,11 @@ func (o *AssistedInstallAPI) initHandlerCache() { if o.handlers["PUT"] == nil { o.handlers["PUT"] = make(map[string]http.Handler) } - o.handlers["PUT"]["/clusters/{clusterId}/hosts/{hostId}/progress"] = installer.NewUpdateHostInstallProgress(o.context, o.InstallerUpdateHostInstallProgressHandler) + o.handlers["PUT"]["/clusters/{cluster_id}/hosts/{host_id}/progress"] = installer.NewUpdateHostInstallProgress(o.context, o.InstallerUpdateHostInstallProgressHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/clusters/{cluster_id}/uploads/ingress-cert"] = installer.NewUploadClusterIngressCert(o.context, o.InstallerUploadClusterIngressCertHandler) } // Serve creates a http handler to serve the API over HTTP diff --git a/restapi/operations/installer/cancel_installation.go b/restapi/operations/installer/cancel_installation.go new file mode 100644 index 000000000..1cdacc0f6 --- /dev/null +++ b/restapi/operations/installer/cancel_installation.go @@ -0,0 +1,58 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" +) + +// CancelInstallationHandlerFunc turns a function with the right signature into a cancel installation handler +type CancelInstallationHandlerFunc func(CancelInstallationParams) middleware.Responder + +// Handle executing the request and returning a response +func (fn CancelInstallationHandlerFunc) Handle(params CancelInstallationParams) middleware.Responder { + return fn(params) +} + +// CancelInstallationHandler interface for that can handle valid cancel installation params +type CancelInstallationHandler interface { + Handle(CancelInstallationParams) middleware.Responder +} + +// NewCancelInstallation creates a new http.Handler for the cancel installation operation +func NewCancelInstallation(ctx *middleware.Context, handler CancelInstallationHandler) *CancelInstallation { + return &CancelInstallation{Context: ctx, Handler: handler} +} + +/*CancelInstallation swagger:route POST /clusters/{cluster_id}/actions/cancel installer cancelInstallation + +Cancels an ongoing installation. + +*/ +type CancelInstallation struct { + Context *middleware.Context + Handler CancelInstallationHandler +} + +func (o *CancelInstallation) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + var Params = NewCancelInstallationParams() + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params) // actually handle the request + + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/restapi/operations/installer/cancel_installation_parameters.go b/restapi/operations/installer/cancel_installation_parameters.go new file mode 100644 index 000000000..46fec8a49 --- /dev/null +++ b/restapi/operations/installer/cancel_installation_parameters.go @@ -0,0 +1,91 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewCancelInstallationParams creates a new CancelInstallationParams object +// no default values defined in spec. +func NewCancelInstallationParams() CancelInstallationParams { + + return CancelInstallationParams{} +} + +// CancelInstallationParams contains all the bound params for the cancel installation operation +// typically these are obtained from a http.Request +// +// swagger:parameters CancelInstallation +type CancelInstallationParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClusterID strfmt.UUID +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewCancelInstallationParams() beforehand. +func (o *CancelInstallationParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rClusterID, rhkClusterID, _ := route.Params.GetOK("cluster_id") + if err := o.bindClusterID(rClusterID, rhkClusterID, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClusterID binds and validates parameter ClusterID from path. +func (o *CancelInstallationParams) bindClusterID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("cluster_id", "path", "strfmt.UUID", raw) + } + o.ClusterID = *(value.(*strfmt.UUID)) + + if err := o.validateClusterID(formats); err != nil { + return err + } + + return nil +} + +// validateClusterID carries on validations for parameter ClusterID +func (o *CancelInstallationParams) validateClusterID(formats strfmt.Registry) error { + + if err := validate.FormatOf("cluster_id", "path", "uuid", o.ClusterID.String(), formats); err != nil { + return err + } + return nil +} diff --git a/restapi/operations/installer/cancel_installation_responses.go b/restapi/operations/installer/cancel_installation_responses.go new file mode 100644 index 000000000..4b1f6acc6 --- /dev/null +++ b/restapi/operations/installer/cancel_installation_responses.go @@ -0,0 +1,190 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/filanov/bm-inventory/models" +) + +// CancelInstallationAcceptedCode is the HTTP code returned for type CancelInstallationAccepted +const CancelInstallationAcceptedCode int = 202 + +/*CancelInstallationAccepted Success. + +swagger:response cancelInstallationAccepted +*/ +type CancelInstallationAccepted struct { + + /* + In: Body + */ + Payload *models.Cluster `json:"body,omitempty"` +} + +// NewCancelInstallationAccepted creates CancelInstallationAccepted with default headers values +func NewCancelInstallationAccepted() *CancelInstallationAccepted { + + return &CancelInstallationAccepted{} +} + +// WithPayload adds the payload to the cancel installation accepted response +func (o *CancelInstallationAccepted) WithPayload(payload *models.Cluster) *CancelInstallationAccepted { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the cancel installation accepted response +func (o *CancelInstallationAccepted) SetPayload(payload *models.Cluster) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CancelInstallationAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(202) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CancelInstallationNotFoundCode is the HTTP code returned for type CancelInstallationNotFound +const CancelInstallationNotFoundCode int = 404 + +/*CancelInstallationNotFound Error. + +swagger:response cancelInstallationNotFound +*/ +type CancelInstallationNotFound struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewCancelInstallationNotFound creates CancelInstallationNotFound with default headers values +func NewCancelInstallationNotFound() *CancelInstallationNotFound { + + return &CancelInstallationNotFound{} +} + +// WithPayload adds the payload to the cancel installation not found response +func (o *CancelInstallationNotFound) WithPayload(payload *models.Error) *CancelInstallationNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the cancel installation not found response +func (o *CancelInstallationNotFound) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CancelInstallationNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CancelInstallationConflictCode is the HTTP code returned for type CancelInstallationConflict +const CancelInstallationConflictCode int = 409 + +/*CancelInstallationConflict Error. + +swagger:response cancelInstallationConflict +*/ +type CancelInstallationConflict struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewCancelInstallationConflict creates CancelInstallationConflict with default headers values +func NewCancelInstallationConflict() *CancelInstallationConflict { + + return &CancelInstallationConflict{} +} + +// WithPayload adds the payload to the cancel installation conflict response +func (o *CancelInstallationConflict) WithPayload(payload *models.Error) *CancelInstallationConflict { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the cancel installation conflict response +func (o *CancelInstallationConflict) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CancelInstallationConflict) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(409) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CancelInstallationInternalServerErrorCode is the HTTP code returned for type CancelInstallationInternalServerError +const CancelInstallationInternalServerErrorCode int = 500 + +/*CancelInstallationInternalServerError Error. + +swagger:response cancelInstallationInternalServerError +*/ +type CancelInstallationInternalServerError struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewCancelInstallationInternalServerError creates CancelInstallationInternalServerError with default headers values +func NewCancelInstallationInternalServerError() *CancelInstallationInternalServerError { + + return &CancelInstallationInternalServerError{} +} + +// WithPayload adds the payload to the cancel installation internal server error response +func (o *CancelInstallationInternalServerError) WithPayload(payload *models.Error) *CancelInstallationInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the cancel installation internal server error response +func (o *CancelInstallationInternalServerError) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CancelInstallationInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/restapi/operations/installer/cancel_installation_urlbuilder.go b/restapi/operations/installer/cancel_installation_urlbuilder.go new file mode 100644 index 000000000..5ade925eb --- /dev/null +++ b/restapi/operations/installer/cancel_installation_urlbuilder.go @@ -0,0 +1,101 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// CancelInstallationURL generates an URL for the cancel installation operation +type CancelInstallationURL struct { + ClusterID strfmt.UUID + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *CancelInstallationURL) WithBasePath(bp string) *CancelInstallationURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *CancelInstallationURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *CancelInstallationURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/clusters/{cluster_id}/actions/cancel" + + clusterID := o.ClusterID.String() + if clusterID != "" { + _path = strings.Replace(_path, "{cluster_id}", clusterID, -1) + } else { + return nil, errors.New("clusterId is required on CancelInstallationURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/api/assisted-install/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *CancelInstallationURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *CancelInstallationURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *CancelInstallationURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on CancelInstallationURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on CancelInstallationURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *CancelInstallationURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/restapi/operations/installer/complete_installation.go b/restapi/operations/installer/complete_installation.go new file mode 100644 index 000000000..8d0eeec32 --- /dev/null +++ b/restapi/operations/installer/complete_installation.go @@ -0,0 +1,58 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" +) + +// CompleteInstallationHandlerFunc turns a function with the right signature into a complete installation handler +type CompleteInstallationHandlerFunc func(CompleteInstallationParams) middleware.Responder + +// Handle executing the request and returning a response +func (fn CompleteInstallationHandlerFunc) Handle(params CompleteInstallationParams) middleware.Responder { + return fn(params) +} + +// CompleteInstallationHandler interface for that can handle valid complete installation params +type CompleteInstallationHandler interface { + Handle(CompleteInstallationParams) middleware.Responder +} + +// NewCompleteInstallation creates a new http.Handler for the complete installation operation +func NewCompleteInstallation(ctx *middleware.Context, handler CompleteInstallationHandler) *CompleteInstallation { + return &CompleteInstallation{Context: ctx, Handler: handler} +} + +/*CompleteInstallation swagger:route POST /clusters/{cluster_id}/actions/complete_installation installer completeInstallation + +Agent API to mark a finalizing installation as complete. + +*/ +type CompleteInstallation struct { + Context *middleware.Context + Handler CompleteInstallationHandler +} + +func (o *CompleteInstallation) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + var Params = NewCompleteInstallationParams() + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params) // actually handle the request + + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/restapi/operations/installer/complete_installation_parameters.go b/restapi/operations/installer/complete_installation_parameters.go new file mode 100644 index 000000000..f7cf3e925 --- /dev/null +++ b/restapi/operations/installer/complete_installation_parameters.go @@ -0,0 +1,122 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/filanov/bm-inventory/models" +) + +// NewCompleteInstallationParams creates a new CompleteInstallationParams object +// no default values defined in spec. +func NewCompleteInstallationParams() CompleteInstallationParams { + + return CompleteInstallationParams{} +} + +// CompleteInstallationParams contains all the bound params for the complete installation operation +// typically these are obtained from a http.Request +// +// swagger:parameters CompleteInstallation +type CompleteInstallationParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClusterID strfmt.UUID + /* + Required: true + In: body + */ + CompletionParams *models.CompletionParams +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewCompleteInstallationParams() beforehand. +func (o *CompleteInstallationParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rClusterID, rhkClusterID, _ := route.Params.GetOK("cluster_id") + if err := o.bindClusterID(rClusterID, rhkClusterID, route.Formats); err != nil { + res = append(res, err) + } + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.CompletionParams + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("completionParams", "body", "")) + } else { + res = append(res, errors.NewParseError("completionParams", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.CompletionParams = &body + } + } + } else { + res = append(res, errors.Required("completionParams", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClusterID binds and validates parameter ClusterID from path. +func (o *CompleteInstallationParams) bindClusterID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("cluster_id", "path", "strfmt.UUID", raw) + } + o.ClusterID = *(value.(*strfmt.UUID)) + + if err := o.validateClusterID(formats); err != nil { + return err + } + + return nil +} + +// validateClusterID carries on validations for parameter ClusterID +func (o *CompleteInstallationParams) validateClusterID(formats strfmt.Registry) error { + + if err := validate.FormatOf("cluster_id", "path", "uuid", o.ClusterID.String(), formats); err != nil { + return err + } + return nil +} diff --git a/restapi/operations/installer/complete_installation_responses.go b/restapi/operations/installer/complete_installation_responses.go new file mode 100644 index 000000000..933f201a9 --- /dev/null +++ b/restapi/operations/installer/complete_installation_responses.go @@ -0,0 +1,190 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/filanov/bm-inventory/models" +) + +// CompleteInstallationAcceptedCode is the HTTP code returned for type CompleteInstallationAccepted +const CompleteInstallationAcceptedCode int = 202 + +/*CompleteInstallationAccepted Success. + +swagger:response completeInstallationAccepted +*/ +type CompleteInstallationAccepted struct { + + /* + In: Body + */ + Payload *models.Cluster `json:"body,omitempty"` +} + +// NewCompleteInstallationAccepted creates CompleteInstallationAccepted with default headers values +func NewCompleteInstallationAccepted() *CompleteInstallationAccepted { + + return &CompleteInstallationAccepted{} +} + +// WithPayload adds the payload to the complete installation accepted response +func (o *CompleteInstallationAccepted) WithPayload(payload *models.Cluster) *CompleteInstallationAccepted { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the complete installation accepted response +func (o *CompleteInstallationAccepted) SetPayload(payload *models.Cluster) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CompleteInstallationAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(202) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CompleteInstallationNotFoundCode is the HTTP code returned for type CompleteInstallationNotFound +const CompleteInstallationNotFoundCode int = 404 + +/*CompleteInstallationNotFound Error. + +swagger:response completeInstallationNotFound +*/ +type CompleteInstallationNotFound struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewCompleteInstallationNotFound creates CompleteInstallationNotFound with default headers values +func NewCompleteInstallationNotFound() *CompleteInstallationNotFound { + + return &CompleteInstallationNotFound{} +} + +// WithPayload adds the payload to the complete installation not found response +func (o *CompleteInstallationNotFound) WithPayload(payload *models.Error) *CompleteInstallationNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the complete installation not found response +func (o *CompleteInstallationNotFound) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CompleteInstallationNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CompleteInstallationConflictCode is the HTTP code returned for type CompleteInstallationConflict +const CompleteInstallationConflictCode int = 409 + +/*CompleteInstallationConflict Error. + +swagger:response completeInstallationConflict +*/ +type CompleteInstallationConflict struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewCompleteInstallationConflict creates CompleteInstallationConflict with default headers values +func NewCompleteInstallationConflict() *CompleteInstallationConflict { + + return &CompleteInstallationConflict{} +} + +// WithPayload adds the payload to the complete installation conflict response +func (o *CompleteInstallationConflict) WithPayload(payload *models.Error) *CompleteInstallationConflict { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the complete installation conflict response +func (o *CompleteInstallationConflict) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CompleteInstallationConflict) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(409) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// CompleteInstallationInternalServerErrorCode is the HTTP code returned for type CompleteInstallationInternalServerError +const CompleteInstallationInternalServerErrorCode int = 500 + +/*CompleteInstallationInternalServerError Error. + +swagger:response completeInstallationInternalServerError +*/ +type CompleteInstallationInternalServerError struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewCompleteInstallationInternalServerError creates CompleteInstallationInternalServerError with default headers values +func NewCompleteInstallationInternalServerError() *CompleteInstallationInternalServerError { + + return &CompleteInstallationInternalServerError{} +} + +// WithPayload adds the payload to the complete installation internal server error response +func (o *CompleteInstallationInternalServerError) WithPayload(payload *models.Error) *CompleteInstallationInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the complete installation internal server error response +func (o *CompleteInstallationInternalServerError) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *CompleteInstallationInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/restapi/operations/installer/complete_installation_urlbuilder.go b/restapi/operations/installer/complete_installation_urlbuilder.go new file mode 100644 index 000000000..defa0602b --- /dev/null +++ b/restapi/operations/installer/complete_installation_urlbuilder.go @@ -0,0 +1,101 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// CompleteInstallationURL generates an URL for the complete installation operation +type CompleteInstallationURL struct { + ClusterID strfmt.UUID + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *CompleteInstallationURL) WithBasePath(bp string) *CompleteInstallationURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *CompleteInstallationURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *CompleteInstallationURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/clusters/{cluster_id}/actions/complete_installation" + + clusterID := o.ClusterID.String() + if clusterID != "" { + _path = strings.Replace(_path, "{cluster_id}", clusterID, -1) + } else { + return nil, errors.New("clusterId is required on CompleteInstallationURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/api/assisted-install/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *CompleteInstallationURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *CompleteInstallationURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *CompleteInstallationURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on CompleteInstallationURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on CompleteInstallationURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *CompleteInstallationURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/restapi/operations/installer/disable_host_responses.go b/restapi/operations/installer/disable_host_responses.go index 850a94d92..074ca292b 100644 --- a/restapi/operations/installer/disable_host_responses.go +++ b/restapi/operations/installer/disable_host_responses.go @@ -13,28 +13,48 @@ import ( "github.com/filanov/bm-inventory/models" ) -// DisableHostNoContentCode is the HTTP code returned for type DisableHostNoContent -const DisableHostNoContentCode int = 204 +// DisableHostOKCode is the HTTP code returned for type DisableHostOK +const DisableHostOKCode int = 200 -/*DisableHostNoContent Success. +/*DisableHostOK Success. -swagger:response disableHostNoContent +swagger:response disableHostOK */ -type DisableHostNoContent struct { +type DisableHostOK struct { + + /* + In: Body + */ + Payload *models.Host `json:"body,omitempty"` } -// NewDisableHostNoContent creates DisableHostNoContent with default headers values -func NewDisableHostNoContent() *DisableHostNoContent { +// NewDisableHostOK creates DisableHostOK with default headers values +func NewDisableHostOK() *DisableHostOK { - return &DisableHostNoContent{} + return &DisableHostOK{} } -// WriteResponse to the client -func (o *DisableHostNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { +// WithPayload adds the payload to the disable host o k response +func (o *DisableHostOK) WithPayload(payload *models.Host) *DisableHostOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the disable host o k response +func (o *DisableHostOK) SetPayload(payload *models.Host) { + o.Payload = payload +} - rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses +// WriteResponse to the client +func (o *DisableHostOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { - rw.WriteHeader(204) + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } } // DisableHostNotFoundCode is the HTTP code returned for type DisableHostNotFound diff --git a/restapi/operations/installer/download_cluster_files_parameters.go b/restapi/operations/installer/download_cluster_files_parameters.go index 8c56cc754..4ebc064bc 100644 --- a/restapi/operations/installer/download_cluster_files_parameters.go +++ b/restapi/operations/installer/download_cluster_files_parameters.go @@ -106,7 +106,7 @@ func (o *DownloadClusterFilesParams) validateClusterID(formats strfmt.Registry) // bindFileName binds and validates parameter FileName from query. func (o *DownloadClusterFilesParams) bindFileName(rawData []string, hasKey bool, formats strfmt.Registry) error { if !hasKey { - return errors.Required("file_name", "query") + return errors.Required("file_name", "query", rawData) } var raw string if len(rawData) > 0 { @@ -131,7 +131,7 @@ func (o *DownloadClusterFilesParams) bindFileName(rawData []string, hasKey bool, // validateFileName carries on validations for parameter FileName func (o *DownloadClusterFilesParams) validateFileName(formats strfmt.Registry) error { - if err := validate.Enum("file_name", "query", o.FileName, []interface{}{"bootstrap.ign", "master.ign", "metadata.json", "worker.ign", "kubeadmin-password", "kubeconfig"}); err != nil { + if err := validate.EnumCase("file_name", "query", o.FileName, []interface{}{"bootstrap.ign", "master.ign", "metadata.json", "worker.ign", "kubeadmin-password", "kubeconfig", "kubeconfig-noingress", "install-config.yaml"}, true); err != nil { return err } diff --git a/restapi/operations/installer/download_cluster_kubeconfig.go b/restapi/operations/installer/download_cluster_kubeconfig.go new file mode 100644 index 000000000..c1ddc1e66 --- /dev/null +++ b/restapi/operations/installer/download_cluster_kubeconfig.go @@ -0,0 +1,58 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" +) + +// DownloadClusterKubeconfigHandlerFunc turns a function with the right signature into a download cluster kubeconfig handler +type DownloadClusterKubeconfigHandlerFunc func(DownloadClusterKubeconfigParams) middleware.Responder + +// Handle executing the request and returning a response +func (fn DownloadClusterKubeconfigHandlerFunc) Handle(params DownloadClusterKubeconfigParams) middleware.Responder { + return fn(params) +} + +// DownloadClusterKubeconfigHandler interface for that can handle valid download cluster kubeconfig params +type DownloadClusterKubeconfigHandler interface { + Handle(DownloadClusterKubeconfigParams) middleware.Responder +} + +// NewDownloadClusterKubeconfig creates a new http.Handler for the download cluster kubeconfig operation +func NewDownloadClusterKubeconfig(ctx *middleware.Context, handler DownloadClusterKubeconfigHandler) *DownloadClusterKubeconfig { + return &DownloadClusterKubeconfig{Context: ctx, Handler: handler} +} + +/*DownloadClusterKubeconfig swagger:route GET /clusters/{cluster_id}/downloads/kubeconfig installer downloadClusterKubeconfig + +Downloads the kubeconfig file for this cluster. + +*/ +type DownloadClusterKubeconfig struct { + Context *middleware.Context + Handler DownloadClusterKubeconfigHandler +} + +func (o *DownloadClusterKubeconfig) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + var Params = NewDownloadClusterKubeconfigParams() + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params) // actually handle the request + + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/restapi/operations/installer/download_cluster_kubeconfig_parameters.go b/restapi/operations/installer/download_cluster_kubeconfig_parameters.go new file mode 100644 index 000000000..77f0d1921 --- /dev/null +++ b/restapi/operations/installer/download_cluster_kubeconfig_parameters.go @@ -0,0 +1,91 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewDownloadClusterKubeconfigParams creates a new DownloadClusterKubeconfigParams object +// no default values defined in spec. +func NewDownloadClusterKubeconfigParams() DownloadClusterKubeconfigParams { + + return DownloadClusterKubeconfigParams{} +} + +// DownloadClusterKubeconfigParams contains all the bound params for the download cluster kubeconfig operation +// typically these are obtained from a http.Request +// +// swagger:parameters DownloadClusterKubeconfig +type DownloadClusterKubeconfigParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClusterID strfmt.UUID +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewDownloadClusterKubeconfigParams() beforehand. +func (o *DownloadClusterKubeconfigParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rClusterID, rhkClusterID, _ := route.Params.GetOK("cluster_id") + if err := o.bindClusterID(rClusterID, rhkClusterID, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClusterID binds and validates parameter ClusterID from path. +func (o *DownloadClusterKubeconfigParams) bindClusterID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("cluster_id", "path", "strfmt.UUID", raw) + } + o.ClusterID = *(value.(*strfmt.UUID)) + + if err := o.validateClusterID(formats); err != nil { + return err + } + + return nil +} + +// validateClusterID carries on validations for parameter ClusterID +func (o *DownloadClusterKubeconfigParams) validateClusterID(formats strfmt.Registry) error { + + if err := validate.FormatOf("cluster_id", "path", "uuid", o.ClusterID.String(), formats); err != nil { + return err + } + return nil +} diff --git a/restapi/operations/installer/download_cluster_kubeconfig_responses.go b/restapi/operations/installer/download_cluster_kubeconfig_responses.go new file mode 100644 index 000000000..f64dfbdfb --- /dev/null +++ b/restapi/operations/installer/download_cluster_kubeconfig_responses.go @@ -0,0 +1,189 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/filanov/bm-inventory/models" +) + +// DownloadClusterKubeconfigOKCode is the HTTP code returned for type DownloadClusterKubeconfigOK +const DownloadClusterKubeconfigOKCode int = 200 + +/*DownloadClusterKubeconfigOK Success. + +swagger:response downloadClusterKubeconfigOK +*/ +type DownloadClusterKubeconfigOK struct { + + /* + In: Body + */ + Payload io.ReadCloser `json:"body,omitempty"` +} + +// NewDownloadClusterKubeconfigOK creates DownloadClusterKubeconfigOK with default headers values +func NewDownloadClusterKubeconfigOK() *DownloadClusterKubeconfigOK { + + return &DownloadClusterKubeconfigOK{} +} + +// WithPayload adds the payload to the download cluster kubeconfig o k response +func (o *DownloadClusterKubeconfigOK) WithPayload(payload io.ReadCloser) *DownloadClusterKubeconfigOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the download cluster kubeconfig o k response +func (o *DownloadClusterKubeconfigOK) SetPayload(payload io.ReadCloser) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DownloadClusterKubeconfigOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// DownloadClusterKubeconfigNotFoundCode is the HTTP code returned for type DownloadClusterKubeconfigNotFound +const DownloadClusterKubeconfigNotFoundCode int = 404 + +/*DownloadClusterKubeconfigNotFound Error. + +swagger:response downloadClusterKubeconfigNotFound +*/ +type DownloadClusterKubeconfigNotFound struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewDownloadClusterKubeconfigNotFound creates DownloadClusterKubeconfigNotFound with default headers values +func NewDownloadClusterKubeconfigNotFound() *DownloadClusterKubeconfigNotFound { + + return &DownloadClusterKubeconfigNotFound{} +} + +// WithPayload adds the payload to the download cluster kubeconfig not found response +func (o *DownloadClusterKubeconfigNotFound) WithPayload(payload *models.Error) *DownloadClusterKubeconfigNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the download cluster kubeconfig not found response +func (o *DownloadClusterKubeconfigNotFound) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DownloadClusterKubeconfigNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DownloadClusterKubeconfigConflictCode is the HTTP code returned for type DownloadClusterKubeconfigConflict +const DownloadClusterKubeconfigConflictCode int = 409 + +/*DownloadClusterKubeconfigConflict Error. + +swagger:response downloadClusterKubeconfigConflict +*/ +type DownloadClusterKubeconfigConflict struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewDownloadClusterKubeconfigConflict creates DownloadClusterKubeconfigConflict with default headers values +func NewDownloadClusterKubeconfigConflict() *DownloadClusterKubeconfigConflict { + + return &DownloadClusterKubeconfigConflict{} +} + +// WithPayload adds the payload to the download cluster kubeconfig conflict response +func (o *DownloadClusterKubeconfigConflict) WithPayload(payload *models.Error) *DownloadClusterKubeconfigConflict { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the download cluster kubeconfig conflict response +func (o *DownloadClusterKubeconfigConflict) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DownloadClusterKubeconfigConflict) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(409) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// DownloadClusterKubeconfigInternalServerErrorCode is the HTTP code returned for type DownloadClusterKubeconfigInternalServerError +const DownloadClusterKubeconfigInternalServerErrorCode int = 500 + +/*DownloadClusterKubeconfigInternalServerError Error. + +swagger:response downloadClusterKubeconfigInternalServerError +*/ +type DownloadClusterKubeconfigInternalServerError struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewDownloadClusterKubeconfigInternalServerError creates DownloadClusterKubeconfigInternalServerError with default headers values +func NewDownloadClusterKubeconfigInternalServerError() *DownloadClusterKubeconfigInternalServerError { + + return &DownloadClusterKubeconfigInternalServerError{} +} + +// WithPayload adds the payload to the download cluster kubeconfig internal server error response +func (o *DownloadClusterKubeconfigInternalServerError) WithPayload(payload *models.Error) *DownloadClusterKubeconfigInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the download cluster kubeconfig internal server error response +func (o *DownloadClusterKubeconfigInternalServerError) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *DownloadClusterKubeconfigInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/restapi/operations/installer/download_cluster_kubeconfig_urlbuilder.go b/restapi/operations/installer/download_cluster_kubeconfig_urlbuilder.go new file mode 100644 index 000000000..22f58fdea --- /dev/null +++ b/restapi/operations/installer/download_cluster_kubeconfig_urlbuilder.go @@ -0,0 +1,101 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// DownloadClusterKubeconfigURL generates an URL for the download cluster kubeconfig operation +type DownloadClusterKubeconfigURL struct { + ClusterID strfmt.UUID + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DownloadClusterKubeconfigURL) WithBasePath(bp string) *DownloadClusterKubeconfigURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *DownloadClusterKubeconfigURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *DownloadClusterKubeconfigURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/clusters/{cluster_id}/downloads/kubeconfig" + + clusterID := o.ClusterID.String() + if clusterID != "" { + _path = strings.Replace(_path, "{cluster_id}", clusterID, -1) + } else { + return nil, errors.New("clusterId is required on DownloadClusterKubeconfigURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/api/assisted-install/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *DownloadClusterKubeconfigURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *DownloadClusterKubeconfigURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *DownloadClusterKubeconfigURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on DownloadClusterKubeconfigURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on DownloadClusterKubeconfigURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *DownloadClusterKubeconfigURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/restapi/operations/installer/enable_host_responses.go b/restapi/operations/installer/enable_host_responses.go index aca52af64..64211c844 100644 --- a/restapi/operations/installer/enable_host_responses.go +++ b/restapi/operations/installer/enable_host_responses.go @@ -13,28 +13,48 @@ import ( "github.com/filanov/bm-inventory/models" ) -// EnableHostNoContentCode is the HTTP code returned for type EnableHostNoContent -const EnableHostNoContentCode int = 204 +// EnableHostOKCode is the HTTP code returned for type EnableHostOK +const EnableHostOKCode int = 200 -/*EnableHostNoContent Success. +/*EnableHostOK Success. -swagger:response enableHostNoContent +swagger:response enableHostOK */ -type EnableHostNoContent struct { +type EnableHostOK struct { + + /* + In: Body + */ + Payload *models.Host `json:"body,omitempty"` } -// NewEnableHostNoContent creates EnableHostNoContent with default headers values -func NewEnableHostNoContent() *EnableHostNoContent { +// NewEnableHostOK creates EnableHostOK with default headers values +func NewEnableHostOK() *EnableHostOK { - return &EnableHostNoContent{} + return &EnableHostOK{} } -// WriteResponse to the client -func (o *EnableHostNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { +// WithPayload adds the payload to the enable host o k response +func (o *EnableHostOK) WithPayload(payload *models.Host) *EnableHostOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the enable host o k response +func (o *EnableHostOK) SetPayload(payload *models.Host) { + o.Payload = payload +} - rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses +// WriteResponse to the client +func (o *EnableHostOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { - rw.WriteHeader(204) + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } } // EnableHostNotFoundCode is the HTTP code returned for type EnableHostNotFound diff --git a/restapi/operations/installer/generate_cluster_i_s_o_parameters.go b/restapi/operations/installer/generate_cluster_i_s_o_parameters.go index 0d7c74290..de8167c5d 100644 --- a/restapi/operations/installer/generate_cluster_i_s_o_parameters.go +++ b/restapi/operations/installer/generate_cluster_i_s_o_parameters.go @@ -65,7 +65,7 @@ func (o *GenerateClusterISOParams) BindRequest(r *http.Request, route *middlewar var body models.ImageCreateParams if err := route.Consumer.Consume(r.Body, &body); err != nil { if err == io.EOF { - res = append(res, errors.Required("imageCreateParams", "body")) + res = append(res, errors.Required("imageCreateParams", "body", "")) } else { res = append(res, errors.NewParseError("imageCreateParams", "body", "", err)) } @@ -80,7 +80,7 @@ func (o *GenerateClusterISOParams) BindRequest(r *http.Request, route *middlewar } } } else { - res = append(res, errors.Required("imageCreateParams", "body")) + res = append(res, errors.Required("imageCreateParams", "body", "")) } if len(res) > 0 { return errors.CompositeValidationError(res...) diff --git a/restapi/operations/installer/generate_cluster_i_s_o_responses.go b/restapi/operations/installer/generate_cluster_i_s_o_responses.go index 3ec7e14f2..8c59aec88 100644 --- a/restapi/operations/installer/generate_cluster_i_s_o_responses.go +++ b/restapi/operations/installer/generate_cluster_i_s_o_responses.go @@ -153,6 +153,11 @@ const GenerateClusterISOConflictCode int = 409 swagger:response generateClusterISOConflict */ type GenerateClusterISOConflict struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` } // NewGenerateClusterISOConflict creates GenerateClusterISOConflict with default headers values @@ -161,12 +166,27 @@ func NewGenerateClusterISOConflict() *GenerateClusterISOConflict { return &GenerateClusterISOConflict{} } +// WithPayload adds the payload to the generate cluster i s o conflict response +func (o *GenerateClusterISOConflict) WithPayload(payload *models.Error) *GenerateClusterISOConflict { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the generate cluster i s o conflict response +func (o *GenerateClusterISOConflict) SetPayload(payload *models.Error) { + o.Payload = payload +} + // WriteResponse to the client func (o *GenerateClusterISOConflict) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { - rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses - rw.WriteHeader(409) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } } // GenerateClusterISOInternalServerErrorCode is the HTTP code returned for type GenerateClusterISOInternalServerError diff --git a/restapi/operations/installer/get_free_addresses.go b/restapi/operations/installer/get_free_addresses.go new file mode 100644 index 000000000..6478dc001 --- /dev/null +++ b/restapi/operations/installer/get_free_addresses.go @@ -0,0 +1,58 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" +) + +// GetFreeAddressesHandlerFunc turns a function with the right signature into a get free addresses handler +type GetFreeAddressesHandlerFunc func(GetFreeAddressesParams) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetFreeAddressesHandlerFunc) Handle(params GetFreeAddressesParams) middleware.Responder { + return fn(params) +} + +// GetFreeAddressesHandler interface for that can handle valid get free addresses params +type GetFreeAddressesHandler interface { + Handle(GetFreeAddressesParams) middleware.Responder +} + +// NewGetFreeAddresses creates a new http.Handler for the get free addresses operation +func NewGetFreeAddresses(ctx *middleware.Context, handler GetFreeAddressesHandler) *GetFreeAddresses { + return &GetFreeAddresses{Context: ctx, Handler: handler} +} + +/*GetFreeAddresses swagger:route GET /clusters/{cluster_id}/free_addresses installer getFreeAddresses + +Retrieves the free address list for a network. + +*/ +type GetFreeAddresses struct { + Context *middleware.Context + Handler GetFreeAddressesHandler +} + +func (o *GetFreeAddresses) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + var Params = NewGetFreeAddressesParams() + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params) // actually handle the request + + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/restapi/operations/installer/get_free_addresses_parameters.go b/restapi/operations/installer/get_free_addresses_parameters.go new file mode 100644 index 000000000..421f06c92 --- /dev/null +++ b/restapi/operations/installer/get_free_addresses_parameters.go @@ -0,0 +1,229 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// NewGetFreeAddressesParams creates a new GetFreeAddressesParams object +// with the default values initialized. +func NewGetFreeAddressesParams() GetFreeAddressesParams { + + var ( + // initialize parameters with default values + + limitDefault = int64(8000) + ) + + return GetFreeAddressesParams{ + Limit: &limitDefault, + } +} + +// GetFreeAddressesParams contains all the bound params for the get free addresses operation +// typically these are obtained from a http.Request +// +// swagger:parameters GetFreeAddresses +type GetFreeAddressesParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClusterID strfmt.UUID + /* + Maximum: 8000 + Minimum: 1 + In: query + Default: 8000 + */ + Limit *int64 + /* + Required: true + Pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$ + In: query + */ + Network string + /* + In: query + */ + Prefix *string +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetFreeAddressesParams() beforehand. +func (o *GetFreeAddressesParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + qs := runtime.Values(r.URL.Query()) + + rClusterID, rhkClusterID, _ := route.Params.GetOK("cluster_id") + if err := o.bindClusterID(rClusterID, rhkClusterID, route.Formats); err != nil { + res = append(res, err) + } + + qLimit, qhkLimit, _ := qs.GetOK("limit") + if err := o.bindLimit(qLimit, qhkLimit, route.Formats); err != nil { + res = append(res, err) + } + + qNetwork, qhkNetwork, _ := qs.GetOK("network") + if err := o.bindNetwork(qNetwork, qhkNetwork, route.Formats); err != nil { + res = append(res, err) + } + + qPrefix, qhkPrefix, _ := qs.GetOK("prefix") + if err := o.bindPrefix(qPrefix, qhkPrefix, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClusterID binds and validates parameter ClusterID from path. +func (o *GetFreeAddressesParams) bindClusterID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("cluster_id", "path", "strfmt.UUID", raw) + } + o.ClusterID = *(value.(*strfmt.UUID)) + + if err := o.validateClusterID(formats); err != nil { + return err + } + + return nil +} + +// validateClusterID carries on validations for parameter ClusterID +func (o *GetFreeAddressesParams) validateClusterID(formats strfmt.Registry) error { + + if err := validate.FormatOf("cluster_id", "path", "uuid", o.ClusterID.String(), formats); err != nil { + return err + } + return nil +} + +// bindLimit binds and validates parameter Limit from query. +func (o *GetFreeAddressesParams) bindLimit(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + if raw == "" { // empty values pass all other validations + // Default values have been previously initialized by NewGetFreeAddressesParams() + return nil + } + + value, err := swag.ConvertInt64(raw) + if err != nil { + return errors.InvalidType("limit", "query", "int64", raw) + } + o.Limit = &value + + if err := o.validateLimit(formats); err != nil { + return err + } + + return nil +} + +// validateLimit carries on validations for parameter Limit +func (o *GetFreeAddressesParams) validateLimit(formats strfmt.Registry) error { + + if err := validate.MinimumInt("limit", "query", int64(*o.Limit), 1, false); err != nil { + return err + } + + if err := validate.MaximumInt("limit", "query", int64(*o.Limit), 8000, false); err != nil { + return err + } + + return nil +} + +// bindNetwork binds and validates parameter Network from query. +func (o *GetFreeAddressesParams) bindNetwork(rawData []string, hasKey bool, formats strfmt.Registry) error { + if !hasKey { + return errors.Required("network", "query", rawData) + } + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // AllowEmptyValue: false + if err := validate.RequiredString("network", "query", raw); err != nil { + return err + } + + o.Network = raw + + if err := o.validateNetwork(formats); err != nil { + return err + } + + return nil +} + +// validateNetwork carries on validations for parameter Network +func (o *GetFreeAddressesParams) validateNetwork(formats strfmt.Registry) error { + + if err := validate.Pattern("network", "query", o.Network, `^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$`); err != nil { + return err + } + + return nil +} + +// bindPrefix binds and validates parameter Prefix from query. +func (o *GetFreeAddressesParams) bindPrefix(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: false + // AllowEmptyValue: false + if raw == "" { // empty values pass all other validations + return nil + } + + o.Prefix = &raw + + return nil +} diff --git a/restapi/operations/installer/get_free_addresses_responses.go b/restapi/operations/installer/get_free_addresses_responses.go new file mode 100644 index 000000000..3199356dc --- /dev/null +++ b/restapi/operations/installer/get_free_addresses_responses.go @@ -0,0 +1,149 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/filanov/bm-inventory/models" +) + +// GetFreeAddressesOKCode is the HTTP code returned for type GetFreeAddressesOK +const GetFreeAddressesOKCode int = 200 + +/*GetFreeAddressesOK Success + +swagger:response getFreeAddressesOK +*/ +type GetFreeAddressesOK struct { + + /* + In: Body + */ + Payload models.FreeAddressesList `json:"body,omitempty"` +} + +// NewGetFreeAddressesOK creates GetFreeAddressesOK with default headers values +func NewGetFreeAddressesOK() *GetFreeAddressesOK { + + return &GetFreeAddressesOK{} +} + +// WithPayload adds the payload to the get free addresses o k response +func (o *GetFreeAddressesOK) WithPayload(payload models.FreeAddressesList) *GetFreeAddressesOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get free addresses o k response +func (o *GetFreeAddressesOK) SetPayload(payload models.FreeAddressesList) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetFreeAddressesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = models.FreeAddressesList{} + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// GetFreeAddressesNotFoundCode is the HTTP code returned for type GetFreeAddressesNotFound +const GetFreeAddressesNotFoundCode int = 404 + +/*GetFreeAddressesNotFound Error. + +swagger:response getFreeAddressesNotFound +*/ +type GetFreeAddressesNotFound struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewGetFreeAddressesNotFound creates GetFreeAddressesNotFound with default headers values +func NewGetFreeAddressesNotFound() *GetFreeAddressesNotFound { + + return &GetFreeAddressesNotFound{} +} + +// WithPayload adds the payload to the get free addresses not found response +func (o *GetFreeAddressesNotFound) WithPayload(payload *models.Error) *GetFreeAddressesNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get free addresses not found response +func (o *GetFreeAddressesNotFound) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetFreeAddressesNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetFreeAddressesInternalServerErrorCode is the HTTP code returned for type GetFreeAddressesInternalServerError +const GetFreeAddressesInternalServerErrorCode int = 500 + +/*GetFreeAddressesInternalServerError Error. + +swagger:response getFreeAddressesInternalServerError +*/ +type GetFreeAddressesInternalServerError struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewGetFreeAddressesInternalServerError creates GetFreeAddressesInternalServerError with default headers values +func NewGetFreeAddressesInternalServerError() *GetFreeAddressesInternalServerError { + + return &GetFreeAddressesInternalServerError{} +} + +// WithPayload adds the payload to the get free addresses internal server error response +func (o *GetFreeAddressesInternalServerError) WithPayload(payload *models.Error) *GetFreeAddressesInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get free addresses internal server error response +func (o *GetFreeAddressesInternalServerError) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetFreeAddressesInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/restapi/operations/installer/get_free_addresses_urlbuilder.go b/restapi/operations/installer/get_free_addresses_urlbuilder.go new file mode 100644 index 000000000..15c0192cb --- /dev/null +++ b/restapi/operations/installer/get_free_addresses_urlbuilder.go @@ -0,0 +1,131 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// GetFreeAddressesURL generates an URL for the get free addresses operation +type GetFreeAddressesURL struct { + ClusterID strfmt.UUID + + Limit *int64 + Network string + Prefix *string + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetFreeAddressesURL) WithBasePath(bp string) *GetFreeAddressesURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetFreeAddressesURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetFreeAddressesURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/clusters/{cluster_id}/free_addresses" + + clusterID := o.ClusterID.String() + if clusterID != "" { + _path = strings.Replace(_path, "{cluster_id}", clusterID, -1) + } else { + return nil, errors.New("clusterId is required on GetFreeAddressesURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/api/assisted-install/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + qs := make(url.Values) + + var limitQ string + if o.Limit != nil { + limitQ = swag.FormatInt64(*o.Limit) + } + if limitQ != "" { + qs.Set("limit", limitQ) + } + + networkQ := o.Network + if networkQ != "" { + qs.Set("network", networkQ) + } + + var prefixQ string + if o.Prefix != nil { + prefixQ = *o.Prefix + } + if prefixQ != "" { + qs.Set("prefix", prefixQ) + } + + _result.RawQuery = qs.Encode() + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetFreeAddressesURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetFreeAddressesURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetFreeAddressesURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetFreeAddressesURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetFreeAddressesURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetFreeAddressesURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/restapi/operations/installer/get_next_steps_responses.go b/restapi/operations/installer/get_next_steps_responses.go index 43f2fc32a..997e31b07 100644 --- a/restapi/operations/installer/get_next_steps_responses.go +++ b/restapi/operations/installer/get_next_steps_responses.go @@ -25,7 +25,7 @@ type GetNextStepsOK struct { /* In: Body */ - Payload models.Steps `json:"body,omitempty"` + Payload *models.Steps `json:"body,omitempty"` } // NewGetNextStepsOK creates GetNextStepsOK with default headers values @@ -35,13 +35,13 @@ func NewGetNextStepsOK() *GetNextStepsOK { } // WithPayload adds the payload to the get next steps o k response -func (o *GetNextStepsOK) WithPayload(payload models.Steps) *GetNextStepsOK { +func (o *GetNextStepsOK) WithPayload(payload *models.Steps) *GetNextStepsOK { o.Payload = payload return o } // SetPayload sets the payload to the get next steps o k response -func (o *GetNextStepsOK) SetPayload(payload models.Steps) { +func (o *GetNextStepsOK) SetPayload(payload *models.Steps) { o.Payload = payload } @@ -49,14 +49,11 @@ func (o *GetNextStepsOK) SetPayload(payload models.Steps) { func (o *GetNextStepsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.WriteHeader(200) - payload := o.Payload - if payload == nil { - // return empty array - payload = models.Steps{} - } - - if err := producer.Produce(rw, payload); err != nil { - panic(err) // let the recovery middleware deal with this + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } } } diff --git a/restapi/operations/installer/register_cluster_parameters.go b/restapi/operations/installer/register_cluster_parameters.go index 035f6c3f2..8f1a762d6 100644 --- a/restapi/operations/installer/register_cluster_parameters.go +++ b/restapi/operations/installer/register_cluster_parameters.go @@ -53,7 +53,7 @@ func (o *RegisterClusterParams) BindRequest(r *http.Request, route *middleware.M var body models.ClusterCreateParams if err := route.Consumer.Consume(r.Body, &body); err != nil { if err == io.EOF { - res = append(res, errors.Required("newClusterParams", "body")) + res = append(res, errors.Required("newClusterParams", "body", "")) } else { res = append(res, errors.NewParseError("newClusterParams", "body", "", err)) } @@ -68,7 +68,7 @@ func (o *RegisterClusterParams) BindRequest(r *http.Request, route *middleware.M } } } else { - res = append(res, errors.Required("newClusterParams", "body")) + res = append(res, errors.Required("newClusterParams", "body", "")) } if len(res) > 0 { return errors.CompositeValidationError(res...) diff --git a/restapi/operations/installer/register_host_parameters.go b/restapi/operations/installer/register_host_parameters.go index 8642ac1c0..02bf8c855 100644 --- a/restapi/operations/installer/register_host_parameters.go +++ b/restapi/operations/installer/register_host_parameters.go @@ -65,7 +65,7 @@ func (o *RegisterHostParams) BindRequest(r *http.Request, route *middleware.Matc var body models.HostCreateParams if err := route.Consumer.Consume(r.Body, &body); err != nil { if err == io.EOF { - res = append(res, errors.Required("newHostParams", "body")) + res = append(res, errors.Required("newHostParams", "body", "")) } else { res = append(res, errors.NewParseError("newHostParams", "body", "", err)) } @@ -80,7 +80,7 @@ func (o *RegisterHostParams) BindRequest(r *http.Request, route *middleware.Matc } } } else { - res = append(res, errors.Required("newHostParams", "body")) + res = append(res, errors.Required("newHostParams", "body", "")) } if len(res) > 0 { return errors.CompositeValidationError(res...) diff --git a/restapi/operations/installer/register_host_responses.go b/restapi/operations/installer/register_host_responses.go index cadb5865a..b9766c03c 100644 --- a/restapi/operations/installer/register_host_responses.go +++ b/restapi/operations/installer/register_host_responses.go @@ -101,6 +101,94 @@ func (o *RegisterHostBadRequest) WriteResponse(rw http.ResponseWriter, producer } } +// RegisterHostForbiddenCode is the HTTP code returned for type RegisterHostForbidden +const RegisterHostForbiddenCode int = 403 + +/*RegisterHostForbidden Error. + +swagger:response registerHostForbidden +*/ +type RegisterHostForbidden struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewRegisterHostForbidden creates RegisterHostForbidden with default headers values +func NewRegisterHostForbidden() *RegisterHostForbidden { + + return &RegisterHostForbidden{} +} + +// WithPayload adds the payload to the register host forbidden response +func (o *RegisterHostForbidden) WithPayload(payload *models.Error) *RegisterHostForbidden { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the register host forbidden response +func (o *RegisterHostForbidden) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RegisterHostForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(403) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// RegisterHostNotFoundCode is the HTTP code returned for type RegisterHostNotFound +const RegisterHostNotFoundCode int = 404 + +/*RegisterHostNotFound Error. + +swagger:response registerHostNotFound +*/ +type RegisterHostNotFound struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewRegisterHostNotFound creates RegisterHostNotFound with default headers values +func NewRegisterHostNotFound() *RegisterHostNotFound { + + return &RegisterHostNotFound{} +} + +// WithPayload adds the payload to the register host not found response +func (o *RegisterHostNotFound) WithPayload(payload *models.Error) *RegisterHostNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the register host not found response +func (o *RegisterHostNotFound) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *RegisterHostNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + // RegisterHostInternalServerErrorCode is the HTTP code returned for type RegisterHostInternalServerError const RegisterHostInternalServerErrorCode int = 500 diff --git a/restapi/operations/installer/reset_cluster.go b/restapi/operations/installer/reset_cluster.go new file mode 100644 index 000000000..6e0c443bf --- /dev/null +++ b/restapi/operations/installer/reset_cluster.go @@ -0,0 +1,58 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" +) + +// ResetClusterHandlerFunc turns a function with the right signature into a reset cluster handler +type ResetClusterHandlerFunc func(ResetClusterParams) middleware.Responder + +// Handle executing the request and returning a response +func (fn ResetClusterHandlerFunc) Handle(params ResetClusterParams) middleware.Responder { + return fn(params) +} + +// ResetClusterHandler interface for that can handle valid reset cluster params +type ResetClusterHandler interface { + Handle(ResetClusterParams) middleware.Responder +} + +// NewResetCluster creates a new http.Handler for the reset cluster operation +func NewResetCluster(ctx *middleware.Context, handler ResetClusterHandler) *ResetCluster { + return &ResetCluster{Context: ctx, Handler: handler} +} + +/*ResetCluster swagger:route POST /clusters/{cluster_id}/actions/reset installer resetCluster + +Resets a failed installation. + +*/ +type ResetCluster struct { + Context *middleware.Context + Handler ResetClusterHandler +} + +func (o *ResetCluster) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + var Params = NewResetClusterParams() + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params) // actually handle the request + + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/restapi/operations/installer/reset_cluster_parameters.go b/restapi/operations/installer/reset_cluster_parameters.go new file mode 100644 index 000000000..0fe77cf4c --- /dev/null +++ b/restapi/operations/installer/reset_cluster_parameters.go @@ -0,0 +1,91 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// NewResetClusterParams creates a new ResetClusterParams object +// no default values defined in spec. +func NewResetClusterParams() ResetClusterParams { + + return ResetClusterParams{} +} + +// ResetClusterParams contains all the bound params for the reset cluster operation +// typically these are obtained from a http.Request +// +// swagger:parameters ResetCluster +type ResetClusterParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClusterID strfmt.UUID +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewResetClusterParams() beforehand. +func (o *ResetClusterParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rClusterID, rhkClusterID, _ := route.Params.GetOK("cluster_id") + if err := o.bindClusterID(rClusterID, rhkClusterID, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClusterID binds and validates parameter ClusterID from path. +func (o *ResetClusterParams) bindClusterID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("cluster_id", "path", "strfmt.UUID", raw) + } + o.ClusterID = *(value.(*strfmt.UUID)) + + if err := o.validateClusterID(formats); err != nil { + return err + } + + return nil +} + +// validateClusterID carries on validations for parameter ClusterID +func (o *ResetClusterParams) validateClusterID(formats strfmt.Registry) error { + + if err := validate.FormatOf("cluster_id", "path", "uuid", o.ClusterID.String(), formats); err != nil { + return err + } + return nil +} diff --git a/restapi/operations/installer/reset_cluster_responses.go b/restapi/operations/installer/reset_cluster_responses.go new file mode 100644 index 000000000..bef9fe99f --- /dev/null +++ b/restapi/operations/installer/reset_cluster_responses.go @@ -0,0 +1,190 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/filanov/bm-inventory/models" +) + +// ResetClusterAcceptedCode is the HTTP code returned for type ResetClusterAccepted +const ResetClusterAcceptedCode int = 202 + +/*ResetClusterAccepted Success. + +swagger:response resetClusterAccepted +*/ +type ResetClusterAccepted struct { + + /* + In: Body + */ + Payload *models.Cluster `json:"body,omitempty"` +} + +// NewResetClusterAccepted creates ResetClusterAccepted with default headers values +func NewResetClusterAccepted() *ResetClusterAccepted { + + return &ResetClusterAccepted{} +} + +// WithPayload adds the payload to the reset cluster accepted response +func (o *ResetClusterAccepted) WithPayload(payload *models.Cluster) *ResetClusterAccepted { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the reset cluster accepted response +func (o *ResetClusterAccepted) SetPayload(payload *models.Cluster) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ResetClusterAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(202) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ResetClusterNotFoundCode is the HTTP code returned for type ResetClusterNotFound +const ResetClusterNotFoundCode int = 404 + +/*ResetClusterNotFound Error. + +swagger:response resetClusterNotFound +*/ +type ResetClusterNotFound struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewResetClusterNotFound creates ResetClusterNotFound with default headers values +func NewResetClusterNotFound() *ResetClusterNotFound { + + return &ResetClusterNotFound{} +} + +// WithPayload adds the payload to the reset cluster not found response +func (o *ResetClusterNotFound) WithPayload(payload *models.Error) *ResetClusterNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the reset cluster not found response +func (o *ResetClusterNotFound) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ResetClusterNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ResetClusterConflictCode is the HTTP code returned for type ResetClusterConflict +const ResetClusterConflictCode int = 409 + +/*ResetClusterConflict Error. + +swagger:response resetClusterConflict +*/ +type ResetClusterConflict struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewResetClusterConflict creates ResetClusterConflict with default headers values +func NewResetClusterConflict() *ResetClusterConflict { + + return &ResetClusterConflict{} +} + +// WithPayload adds the payload to the reset cluster conflict response +func (o *ResetClusterConflict) WithPayload(payload *models.Error) *ResetClusterConflict { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the reset cluster conflict response +func (o *ResetClusterConflict) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ResetClusterConflict) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(409) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ResetClusterInternalServerErrorCode is the HTTP code returned for type ResetClusterInternalServerError +const ResetClusterInternalServerErrorCode int = 500 + +/*ResetClusterInternalServerError Error. + +swagger:response resetClusterInternalServerError +*/ +type ResetClusterInternalServerError struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewResetClusterInternalServerError creates ResetClusterInternalServerError with default headers values +func NewResetClusterInternalServerError() *ResetClusterInternalServerError { + + return &ResetClusterInternalServerError{} +} + +// WithPayload adds the payload to the reset cluster internal server error response +func (o *ResetClusterInternalServerError) WithPayload(payload *models.Error) *ResetClusterInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the reset cluster internal server error response +func (o *ResetClusterInternalServerError) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ResetClusterInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/restapi/operations/installer/reset_cluster_urlbuilder.go b/restapi/operations/installer/reset_cluster_urlbuilder.go new file mode 100644 index 000000000..748d3c70d --- /dev/null +++ b/restapi/operations/installer/reset_cluster_urlbuilder.go @@ -0,0 +1,101 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// ResetClusterURL generates an URL for the reset cluster operation +type ResetClusterURL struct { + ClusterID strfmt.UUID + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ResetClusterURL) WithBasePath(bp string) *ResetClusterURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ResetClusterURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ResetClusterURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/clusters/{cluster_id}/actions/reset" + + clusterID := o.ClusterID.String() + if clusterID != "" { + _path = strings.Replace(_path, "{cluster_id}", clusterID, -1) + } else { + return nil, errors.New("clusterId is required on ResetClusterURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/api/assisted-install/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ResetClusterURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ResetClusterURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ResetClusterURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ResetClusterURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ResetClusterURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ResetClusterURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/restapi/operations/installer/set_debug_step_parameters.go b/restapi/operations/installer/set_debug_step_parameters.go index 228c4c5a5..2318f3469 100644 --- a/restapi/operations/installer/set_debug_step_parameters.go +++ b/restapi/operations/installer/set_debug_step_parameters.go @@ -75,7 +75,7 @@ func (o *SetDebugStepParams) BindRequest(r *http.Request, route *middleware.Matc var body models.DebugStep if err := route.Consumer.Consume(r.Body, &body); err != nil { if err == io.EOF { - res = append(res, errors.Required("step", "body")) + res = append(res, errors.Required("step", "body", "")) } else { res = append(res, errors.NewParseError("step", "body", "", err)) } @@ -90,7 +90,7 @@ func (o *SetDebugStepParams) BindRequest(r *http.Request, route *middleware.Matc } } } else { - res = append(res, errors.Required("step", "body")) + res = append(res, errors.Required("step", "body", "")) } if len(res) > 0 { return errors.CompositeValidationError(res...) diff --git a/restapi/operations/installer/update_cluster_parameters.go b/restapi/operations/installer/update_cluster_parameters.go index d347657ae..fb82acf9a 100644 --- a/restapi/operations/installer/update_cluster_parameters.go +++ b/restapi/operations/installer/update_cluster_parameters.go @@ -60,7 +60,7 @@ func (o *UpdateClusterParams) BindRequest(r *http.Request, route *middleware.Mat var body models.ClusterUpdateParams if err := route.Consumer.Consume(r.Body, &body); err != nil { if err == io.EOF { - res = append(res, errors.Required("clusterUpdateParams", "body")) + res = append(res, errors.Required("clusterUpdateParams", "body", "")) } else { res = append(res, errors.NewParseError("clusterUpdateParams", "body", "", err)) } @@ -75,7 +75,7 @@ func (o *UpdateClusterParams) BindRequest(r *http.Request, route *middleware.Mat } } } else { - res = append(res, errors.Required("clusterUpdateParams", "body")) + res = append(res, errors.Required("clusterUpdateParams", "body", "")) } rClusterID, rhkClusterID, _ := route.Params.GetOK("cluster_id") if err := o.bindClusterID(rClusterID, rhkClusterID, route.Formats); err != nil { diff --git a/restapi/operations/installer/update_host_install_progress.go b/restapi/operations/installer/update_host_install_progress.go index 3bfd146ab..7d529bfa7 100644 --- a/restapi/operations/installer/update_host_install_progress.go +++ b/restapi/operations/installer/update_host_install_progress.go @@ -29,7 +29,7 @@ func NewUpdateHostInstallProgress(ctx *middleware.Context, handler UpdateHostIns return &UpdateHostInstallProgress{Context: ctx, Handler: handler} } -/*UpdateHostInstallProgress swagger:route PUT /clusters/{clusterId}/hosts/{hostId}/progress installer updateHostInstallProgress +/*UpdateHostInstallProgress swagger:route PUT /clusters/{cluster_id}/hosts/{host_id}/progress installer updateHostInstallProgress Update installation progress diff --git a/restapi/operations/installer/update_host_install_progress_parameters.go b/restapi/operations/installer/update_host_install_progress_parameters.go index 9095850c8..c8160bb43 100644 --- a/restapi/operations/installer/update_host_install_progress_parameters.go +++ b/restapi/operations/installer/update_host_install_progress_parameters.go @@ -43,7 +43,7 @@ type UpdateHostInstallProgressParams struct { Required: true In: body */ - HostInstallProgressParams models.HostInstallProgressParams + HostProgress *models.HostProgress /*The ID of the host to retrieve Required: true In: path @@ -60,19 +60,19 @@ func (o *UpdateHostInstallProgressParams) BindRequest(r *http.Request, route *mi o.HTTPRequest = r - rClusterID, rhkClusterID, _ := route.Params.GetOK("clusterId") + rClusterID, rhkClusterID, _ := route.Params.GetOK("cluster_id") if err := o.bindClusterID(rClusterID, rhkClusterID, route.Formats); err != nil { res = append(res, err) } if runtime.HasBody(r) { defer r.Body.Close() - var body models.HostInstallProgressParams + var body models.HostProgress if err := route.Consumer.Consume(r.Body, &body); err != nil { if err == io.EOF { - res = append(res, errors.Required("hostInstallProgressParams", "body")) + res = append(res, errors.Required("hostProgress", "body", "")) } else { - res = append(res, errors.NewParseError("hostInstallProgressParams", "body", "", err)) + res = append(res, errors.NewParseError("hostProgress", "body", "", err)) } } else { // validate body object @@ -81,13 +81,13 @@ func (o *UpdateHostInstallProgressParams) BindRequest(r *http.Request, route *mi } if len(res) == 0 { - o.HostInstallProgressParams = body + o.HostProgress = &body } } } else { - res = append(res, errors.Required("hostInstallProgressParams", "body")) + res = append(res, errors.Required("hostProgress", "body", "")) } - rHostID, rhkHostID, _ := route.Params.GetOK("hostId") + rHostID, rhkHostID, _ := route.Params.GetOK("host_id") if err := o.bindHostID(rHostID, rhkHostID, route.Formats); err != nil { res = append(res, err) } @@ -111,7 +111,7 @@ func (o *UpdateHostInstallProgressParams) bindClusterID(rawData []string, hasKey // Format: uuid value, err := formats.Parse("uuid", raw) if err != nil { - return errors.InvalidType("clusterId", "path", "strfmt.UUID", raw) + return errors.InvalidType("cluster_id", "path", "strfmt.UUID", raw) } o.ClusterID = *(value.(*strfmt.UUID)) @@ -125,7 +125,7 @@ func (o *UpdateHostInstallProgressParams) bindClusterID(rawData []string, hasKey // validateClusterID carries on validations for parameter ClusterID func (o *UpdateHostInstallProgressParams) validateClusterID(formats strfmt.Registry) error { - if err := validate.FormatOf("clusterId", "path", "uuid", o.ClusterID.String(), formats); err != nil { + if err := validate.FormatOf("cluster_id", "path", "uuid", o.ClusterID.String(), formats); err != nil { return err } return nil @@ -144,7 +144,7 @@ func (o *UpdateHostInstallProgressParams) bindHostID(rawData []string, hasKey bo // Format: uuid value, err := formats.Parse("uuid", raw) if err != nil { - return errors.InvalidType("hostId", "path", "strfmt.UUID", raw) + return errors.InvalidType("host_id", "path", "strfmt.UUID", raw) } o.HostID = *(value.(*strfmt.UUID)) @@ -158,7 +158,7 @@ func (o *UpdateHostInstallProgressParams) bindHostID(rawData []string, hasKey bo // validateHostID carries on validations for parameter HostID func (o *UpdateHostInstallProgressParams) validateHostID(formats strfmt.Registry) error { - if err := validate.FormatOf("hostId", "path", "uuid", o.HostID.String(), formats); err != nil { + if err := validate.FormatOf("host_id", "path", "uuid", o.HostID.String(), formats); err != nil { return err } return nil diff --git a/restapi/operations/installer/update_host_install_progress_responses.go b/restapi/operations/installer/update_host_install_progress_responses.go index 5da1beafb..56e6db56e 100644 --- a/restapi/operations/installer/update_host_install_progress_responses.go +++ b/restapi/operations/installer/update_host_install_progress_responses.go @@ -9,6 +9,8 @@ import ( "net/http" "github.com/go-openapi/runtime" + + "github.com/filanov/bm-inventory/models" ) // UpdateHostInstallProgressOKCode is the HTTP code returned for type UpdateHostInstallProgressOK @@ -34,3 +36,91 @@ func (o *UpdateHostInstallProgressOK) WriteResponse(rw http.ResponseWriter, prod rw.WriteHeader(200) } + +// UpdateHostInstallProgressNotFoundCode is the HTTP code returned for type UpdateHostInstallProgressNotFound +const UpdateHostInstallProgressNotFoundCode int = 404 + +/*UpdateHostInstallProgressNotFound Error. + +swagger:response updateHostInstallProgressNotFound +*/ +type UpdateHostInstallProgressNotFound struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewUpdateHostInstallProgressNotFound creates UpdateHostInstallProgressNotFound with default headers values +func NewUpdateHostInstallProgressNotFound() *UpdateHostInstallProgressNotFound { + + return &UpdateHostInstallProgressNotFound{} +} + +// WithPayload adds the payload to the update host install progress not found response +func (o *UpdateHostInstallProgressNotFound) WithPayload(payload *models.Error) *UpdateHostInstallProgressNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the update host install progress not found response +func (o *UpdateHostInstallProgressNotFound) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *UpdateHostInstallProgressNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// UpdateHostInstallProgressInternalServerErrorCode is the HTTP code returned for type UpdateHostInstallProgressInternalServerError +const UpdateHostInstallProgressInternalServerErrorCode int = 500 + +/*UpdateHostInstallProgressInternalServerError Error. + +swagger:response updateHostInstallProgressInternalServerError +*/ +type UpdateHostInstallProgressInternalServerError struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewUpdateHostInstallProgressInternalServerError creates UpdateHostInstallProgressInternalServerError with default headers values +func NewUpdateHostInstallProgressInternalServerError() *UpdateHostInstallProgressInternalServerError { + + return &UpdateHostInstallProgressInternalServerError{} +} + +// WithPayload adds the payload to the update host install progress internal server error response +func (o *UpdateHostInstallProgressInternalServerError) WithPayload(payload *models.Error) *UpdateHostInstallProgressInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the update host install progress internal server error response +func (o *UpdateHostInstallProgressInternalServerError) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *UpdateHostInstallProgressInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/restapi/operations/installer/update_host_install_progress_urlbuilder.go b/restapi/operations/installer/update_host_install_progress_urlbuilder.go index 11c7fa4e9..90bb44288 100644 --- a/restapi/operations/installer/update_host_install_progress_urlbuilder.go +++ b/restapi/operations/installer/update_host_install_progress_urlbuilder.go @@ -43,18 +43,18 @@ func (o *UpdateHostInstallProgressURL) SetBasePath(bp string) { func (o *UpdateHostInstallProgressURL) Build() (*url.URL, error) { var _result url.URL - var _path = "/clusters/{clusterId}/hosts/{hostId}/progress" + var _path = "/clusters/{cluster_id}/hosts/{host_id}/progress" clusterID := o.ClusterID.String() if clusterID != "" { - _path = strings.Replace(_path, "{clusterId}", clusterID, -1) + _path = strings.Replace(_path, "{cluster_id}", clusterID, -1) } else { return nil, errors.New("clusterId is required on UpdateHostInstallProgressURL") } hostID := o.HostID.String() if hostID != "" { - _path = strings.Replace(_path, "{hostId}", hostID, -1) + _path = strings.Replace(_path, "{host_id}", hostID, -1) } else { return nil, errors.New("hostId is required on UpdateHostInstallProgressURL") } diff --git a/restapi/operations/installer/upload_cluster_ingress_cert.go b/restapi/operations/installer/upload_cluster_ingress_cert.go new file mode 100644 index 000000000..a305938d3 --- /dev/null +++ b/restapi/operations/installer/upload_cluster_ingress_cert.go @@ -0,0 +1,58 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" +) + +// UploadClusterIngressCertHandlerFunc turns a function with the right signature into a upload cluster ingress cert handler +type UploadClusterIngressCertHandlerFunc func(UploadClusterIngressCertParams) middleware.Responder + +// Handle executing the request and returning a response +func (fn UploadClusterIngressCertHandlerFunc) Handle(params UploadClusterIngressCertParams) middleware.Responder { + return fn(params) +} + +// UploadClusterIngressCertHandler interface for that can handle valid upload cluster ingress cert params +type UploadClusterIngressCertHandler interface { + Handle(UploadClusterIngressCertParams) middleware.Responder +} + +// NewUploadClusterIngressCert creates a new http.Handler for the upload cluster ingress cert operation +func NewUploadClusterIngressCert(ctx *middleware.Context, handler UploadClusterIngressCertHandler) *UploadClusterIngressCert { + return &UploadClusterIngressCert{Context: ctx, Handler: handler} +} + +/*UploadClusterIngressCert swagger:route POST /clusters/{cluster_id}/uploads/ingress-cert installer uploadClusterIngressCert + +Transfer the ingress certificate for the cluster. + +*/ +type UploadClusterIngressCert struct { + Context *middleware.Context + Handler UploadClusterIngressCertHandler +} + +func (o *UploadClusterIngressCert) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + var Params = NewUploadClusterIngressCertParams() + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params) // actually handle the request + + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/restapi/operations/installer/upload_cluster_ingress_cert_parameters.go b/restapi/operations/installer/upload_cluster_ingress_cert_parameters.go new file mode 100644 index 000000000..b05f2f8e0 --- /dev/null +++ b/restapi/operations/installer/upload_cluster_ingress_cert_parameters.go @@ -0,0 +1,122 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" + + "github.com/filanov/bm-inventory/models" +) + +// NewUploadClusterIngressCertParams creates a new UploadClusterIngressCertParams object +// no default values defined in spec. +func NewUploadClusterIngressCertParams() UploadClusterIngressCertParams { + + return UploadClusterIngressCertParams{} +} + +// UploadClusterIngressCertParams contains all the bound params for the upload cluster ingress cert operation +// typically these are obtained from a http.Request +// +// swagger:parameters UploadClusterIngressCert +type UploadClusterIngressCertParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: path + */ + ClusterID strfmt.UUID + /* + Required: true + In: body + */ + IngressCertParams models.IngressCertParams +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewUploadClusterIngressCertParams() beforehand. +func (o *UploadClusterIngressCertParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + rClusterID, rhkClusterID, _ := route.Params.GetOK("cluster_id") + if err := o.bindClusterID(rClusterID, rhkClusterID, route.Formats); err != nil { + res = append(res, err) + } + + if runtime.HasBody(r) { + defer r.Body.Close() + var body models.IngressCertParams + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("ingressCertParams", "body", "")) + } else { + res = append(res, errors.NewParseError("ingressCertParams", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.IngressCertParams = body + } + } + } else { + res = append(res, errors.Required("ingressCertParams", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// bindClusterID binds and validates parameter ClusterID from path. +func (o *UploadClusterIngressCertParams) bindClusterID(rawData []string, hasKey bool, formats strfmt.Registry) error { + var raw string + if len(rawData) > 0 { + raw = rawData[len(rawData)-1] + } + + // Required: true + // Parameter is provided by construction from the route + + // Format: uuid + value, err := formats.Parse("uuid", raw) + if err != nil { + return errors.InvalidType("cluster_id", "path", "strfmt.UUID", raw) + } + o.ClusterID = *(value.(*strfmt.UUID)) + + if err := o.validateClusterID(formats); err != nil { + return err + } + + return nil +} + +// validateClusterID carries on validations for parameter ClusterID +func (o *UploadClusterIngressCertParams) validateClusterID(formats strfmt.Registry) error { + + if err := validate.FormatOf("cluster_id", "path", "uuid", o.ClusterID.String(), formats); err != nil { + return err + } + return nil +} diff --git a/restapi/operations/installer/upload_cluster_ingress_cert_responses.go b/restapi/operations/installer/upload_cluster_ingress_cert_responses.go new file mode 100644 index 000000000..09f300e96 --- /dev/null +++ b/restapi/operations/installer/upload_cluster_ingress_cert_responses.go @@ -0,0 +1,170 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/filanov/bm-inventory/models" +) + +// UploadClusterIngressCertCreatedCode is the HTTP code returned for type UploadClusterIngressCertCreated +const UploadClusterIngressCertCreatedCode int = 201 + +/*UploadClusterIngressCertCreated Success. + +swagger:response uploadClusterIngressCertCreated +*/ +type UploadClusterIngressCertCreated struct { +} + +// NewUploadClusterIngressCertCreated creates UploadClusterIngressCertCreated with default headers values +func NewUploadClusterIngressCertCreated() *UploadClusterIngressCertCreated { + + return &UploadClusterIngressCertCreated{} +} + +// WriteResponse to the client +func (o *UploadClusterIngressCertCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(201) +} + +// UploadClusterIngressCertBadRequestCode is the HTTP code returned for type UploadClusterIngressCertBadRequest +const UploadClusterIngressCertBadRequestCode int = 400 + +/*UploadClusterIngressCertBadRequest Error. + +swagger:response uploadClusterIngressCertBadRequest +*/ +type UploadClusterIngressCertBadRequest struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewUploadClusterIngressCertBadRequest creates UploadClusterIngressCertBadRequest with default headers values +func NewUploadClusterIngressCertBadRequest() *UploadClusterIngressCertBadRequest { + + return &UploadClusterIngressCertBadRequest{} +} + +// WithPayload adds the payload to the upload cluster ingress cert bad request response +func (o *UploadClusterIngressCertBadRequest) WithPayload(payload *models.Error) *UploadClusterIngressCertBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the upload cluster ingress cert bad request response +func (o *UploadClusterIngressCertBadRequest) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *UploadClusterIngressCertBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// UploadClusterIngressCertNotFoundCode is the HTTP code returned for type UploadClusterIngressCertNotFound +const UploadClusterIngressCertNotFoundCode int = 404 + +/*UploadClusterIngressCertNotFound Error. + +swagger:response uploadClusterIngressCertNotFound +*/ +type UploadClusterIngressCertNotFound struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewUploadClusterIngressCertNotFound creates UploadClusterIngressCertNotFound with default headers values +func NewUploadClusterIngressCertNotFound() *UploadClusterIngressCertNotFound { + + return &UploadClusterIngressCertNotFound{} +} + +// WithPayload adds the payload to the upload cluster ingress cert not found response +func (o *UploadClusterIngressCertNotFound) WithPayload(payload *models.Error) *UploadClusterIngressCertNotFound { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the upload cluster ingress cert not found response +func (o *UploadClusterIngressCertNotFound) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *UploadClusterIngressCertNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(404) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// UploadClusterIngressCertInternalServerErrorCode is the HTTP code returned for type UploadClusterIngressCertInternalServerError +const UploadClusterIngressCertInternalServerErrorCode int = 500 + +/*UploadClusterIngressCertInternalServerError Error. + +swagger:response uploadClusterIngressCertInternalServerError +*/ +type UploadClusterIngressCertInternalServerError struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewUploadClusterIngressCertInternalServerError creates UploadClusterIngressCertInternalServerError with default headers values +func NewUploadClusterIngressCertInternalServerError() *UploadClusterIngressCertInternalServerError { + + return &UploadClusterIngressCertInternalServerError{} +} + +// WithPayload adds the payload to the upload cluster ingress cert internal server error response +func (o *UploadClusterIngressCertInternalServerError) WithPayload(payload *models.Error) *UploadClusterIngressCertInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the upload cluster ingress cert internal server error response +func (o *UploadClusterIngressCertInternalServerError) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *UploadClusterIngressCertInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/restapi/operations/installer/upload_cluster_ingress_cert_urlbuilder.go b/restapi/operations/installer/upload_cluster_ingress_cert_urlbuilder.go new file mode 100644 index 000000000..d6575130f --- /dev/null +++ b/restapi/operations/installer/upload_cluster_ingress_cert_urlbuilder.go @@ -0,0 +1,101 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package installer + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" + "strings" + + "github.com/go-openapi/strfmt" +) + +// UploadClusterIngressCertURL generates an URL for the upload cluster ingress cert operation +type UploadClusterIngressCertURL struct { + ClusterID strfmt.UUID + + _basePath string + // avoid unkeyed usage + _ struct{} +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *UploadClusterIngressCertURL) WithBasePath(bp string) *UploadClusterIngressCertURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *UploadClusterIngressCertURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *UploadClusterIngressCertURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/clusters/{cluster_id}/uploads/ingress-cert" + + clusterID := o.ClusterID.String() + if clusterID != "" { + _path = strings.Replace(_path, "{cluster_id}", clusterID, -1) + } else { + return nil, errors.New("clusterId is required on UploadClusterIngressCertURL") + } + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/api/assisted-install/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *UploadClusterIngressCertURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *UploadClusterIngressCertURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *UploadClusterIngressCertURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on UploadClusterIngressCertURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on UploadClusterIngressCertURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *UploadClusterIngressCertURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/restapi/operations/managed_domains/list_managed_domains.go b/restapi/operations/managed_domains/list_managed_domains.go new file mode 100644 index 000000000..1b11442f2 --- /dev/null +++ b/restapi/operations/managed_domains/list_managed_domains.go @@ -0,0 +1,58 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package managed_domains + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" +) + +// ListManagedDomainsHandlerFunc turns a function with the right signature into a list managed domains handler +type ListManagedDomainsHandlerFunc func(ListManagedDomainsParams) middleware.Responder + +// Handle executing the request and returning a response +func (fn ListManagedDomainsHandlerFunc) Handle(params ListManagedDomainsParams) middleware.Responder { + return fn(params) +} + +// ListManagedDomainsHandler interface for that can handle valid list managed domains params +type ListManagedDomainsHandler interface { + Handle(ListManagedDomainsParams) middleware.Responder +} + +// NewListManagedDomains creates a new http.Handler for the list managed domains operation +func NewListManagedDomains(ctx *middleware.Context, handler ListManagedDomainsHandler) *ListManagedDomains { + return &ListManagedDomains{Context: ctx, Handler: handler} +} + +/*ListManagedDomains swagger:route GET /domains managed_domains listManagedDomains + +List of managed DNS domains + +*/ +type ListManagedDomains struct { + Context *middleware.Context + Handler ListManagedDomainsHandler +} + +func (o *ListManagedDomains) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + var Params = NewListManagedDomainsParams() + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params) // actually handle the request + + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/restapi/operations/managed_domains/list_managed_domains_parameters.go b/restapi/operations/managed_domains/list_managed_domains_parameters.go new file mode 100644 index 000000000..3ae0ee70b --- /dev/null +++ b/restapi/operations/managed_domains/list_managed_domains_parameters.go @@ -0,0 +1,45 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package managed_domains + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" +) + +// NewListManagedDomainsParams creates a new ListManagedDomainsParams object +// no default values defined in spec. +func NewListManagedDomainsParams() ListManagedDomainsParams { + + return ListManagedDomainsParams{} +} + +// ListManagedDomainsParams contains all the bound params for the list managed domains operation +// typically these are obtained from a http.Request +// +// swagger:parameters ListManagedDomains +type ListManagedDomainsParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewListManagedDomainsParams() beforehand. +func (o *ListManagedDomainsParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/restapi/operations/managed_domains/list_managed_domains_responses.go b/restapi/operations/managed_domains/list_managed_domains_responses.go new file mode 100644 index 000000000..8f64f391f --- /dev/null +++ b/restapi/operations/managed_domains/list_managed_domains_responses.go @@ -0,0 +1,105 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package managed_domains + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/filanov/bm-inventory/models" +) + +// ListManagedDomainsOKCode is the HTTP code returned for type ListManagedDomainsOK +const ListManagedDomainsOKCode int = 200 + +/*ListManagedDomainsOK Success. + +swagger:response listManagedDomainsOK +*/ +type ListManagedDomainsOK struct { + + /* + In: Body + */ + Payload models.ListManagedDomains `json:"body,omitempty"` +} + +// NewListManagedDomainsOK creates ListManagedDomainsOK with default headers values +func NewListManagedDomainsOK() *ListManagedDomainsOK { + + return &ListManagedDomainsOK{} +} + +// WithPayload adds the payload to the list managed domains o k response +func (o *ListManagedDomainsOK) WithPayload(payload models.ListManagedDomains) *ListManagedDomainsOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the list managed domains o k response +func (o *ListManagedDomainsOK) SetPayload(payload models.ListManagedDomains) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ListManagedDomainsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty array + payload = models.ListManagedDomains{} + } + + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// ListManagedDomainsInternalServerErrorCode is the HTTP code returned for type ListManagedDomainsInternalServerError +const ListManagedDomainsInternalServerErrorCode int = 500 + +/*ListManagedDomainsInternalServerError Error. + +swagger:response listManagedDomainsInternalServerError +*/ +type ListManagedDomainsInternalServerError struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewListManagedDomainsInternalServerError creates ListManagedDomainsInternalServerError with default headers values +func NewListManagedDomainsInternalServerError() *ListManagedDomainsInternalServerError { + + return &ListManagedDomainsInternalServerError{} +} + +// WithPayload adds the payload to the list managed domains internal server error response +func (o *ListManagedDomainsInternalServerError) WithPayload(payload *models.Error) *ListManagedDomainsInternalServerError { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the list managed domains internal server error response +func (o *ListManagedDomainsInternalServerError) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ListManagedDomainsInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(500) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/restapi/operations/managed_domains/list_managed_domains_urlbuilder.go b/restapi/operations/managed_domains/list_managed_domains_urlbuilder.go new file mode 100644 index 000000000..09847c39a --- /dev/null +++ b/restapi/operations/managed_domains/list_managed_domains_urlbuilder.go @@ -0,0 +1,87 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package managed_domains + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// ListManagedDomainsURL generates an URL for the list managed domains operation +type ListManagedDomainsURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ListManagedDomainsURL) WithBasePath(bp string) *ListManagedDomainsURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ListManagedDomainsURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ListManagedDomainsURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/domains" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/api/assisted-install/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ListManagedDomainsURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ListManagedDomainsURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ListManagedDomainsURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ListManagedDomainsURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ListManagedDomainsURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ListManagedDomainsURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/restapi/operations/versions/list_component_versions.go b/restapi/operations/versions/list_component_versions.go new file mode 100644 index 000000000..5a09ea4d3 --- /dev/null +++ b/restapi/operations/versions/list_component_versions.go @@ -0,0 +1,58 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package versions + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" +) + +// ListComponentVersionsHandlerFunc turns a function with the right signature into a list component versions handler +type ListComponentVersionsHandlerFunc func(ListComponentVersionsParams) middleware.Responder + +// Handle executing the request and returning a response +func (fn ListComponentVersionsHandlerFunc) Handle(params ListComponentVersionsParams) middleware.Responder { + return fn(params) +} + +// ListComponentVersionsHandler interface for that can handle valid list component versions params +type ListComponentVersionsHandler interface { + Handle(ListComponentVersionsParams) middleware.Responder +} + +// NewListComponentVersions creates a new http.Handler for the list component versions operation +func NewListComponentVersions(ctx *middleware.Context, handler ListComponentVersionsHandler) *ListComponentVersions { + return &ListComponentVersions{Context: ctx, Handler: handler} +} + +/*ListComponentVersions swagger:route GET /component_versions versions listComponentVersions + +List of componenets versions + +*/ +type ListComponentVersions struct { + Context *middleware.Context + Handler ListComponentVersionsHandler +} + +func (o *ListComponentVersions) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + var Params = NewListComponentVersionsParams() + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params) // actually handle the request + + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/restapi/operations/versions/list_component_versions_parameters.go b/restapi/operations/versions/list_component_versions_parameters.go new file mode 100644 index 000000000..65f6f7ee1 --- /dev/null +++ b/restapi/operations/versions/list_component_versions_parameters.go @@ -0,0 +1,45 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package versions + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" +) + +// NewListComponentVersionsParams creates a new ListComponentVersionsParams object +// no default values defined in spec. +func NewListComponentVersionsParams() ListComponentVersionsParams { + + return ListComponentVersionsParams{} +} + +// ListComponentVersionsParams contains all the bound params for the list component versions operation +// typically these are obtained from a http.Request +// +// swagger:parameters ListComponentVersions +type ListComponentVersionsParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewListComponentVersionsParams() beforehand. +func (o *ListComponentVersionsParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/restapi/operations/versions/list_component_versions_responses.go b/restapi/operations/versions/list_component_versions_responses.go new file mode 100644 index 000000000..9bcf7cfff --- /dev/null +++ b/restapi/operations/versions/list_component_versions_responses.go @@ -0,0 +1,58 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package versions + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/filanov/bm-inventory/models" +) + +// ListComponentVersionsOKCode is the HTTP code returned for type ListComponentVersionsOK +const ListComponentVersionsOKCode int = 200 + +/*ListComponentVersionsOK Success. + +swagger:response listComponentVersionsOK +*/ +type ListComponentVersionsOK struct { + + /* + In: Body + */ + Payload *models.ListVersions `json:"body,omitempty"` +} + +// NewListComponentVersionsOK creates ListComponentVersionsOK with default headers values +func NewListComponentVersionsOK() *ListComponentVersionsOK { + + return &ListComponentVersionsOK{} +} + +// WithPayload adds the payload to the list component versions o k response +func (o *ListComponentVersionsOK) WithPayload(payload *models.ListVersions) *ListComponentVersionsOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the list component versions o k response +func (o *ListComponentVersionsOK) SetPayload(payload *models.ListVersions) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ListComponentVersionsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/restapi/operations/versions/list_component_versions_urlbuilder.go b/restapi/operations/versions/list_component_versions_urlbuilder.go new file mode 100644 index 000000000..da0dbb219 --- /dev/null +++ b/restapi/operations/versions/list_component_versions_urlbuilder.go @@ -0,0 +1,87 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package versions + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// ListComponentVersionsURL generates an URL for the list component versions operation +type ListComponentVersionsURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ListComponentVersionsURL) WithBasePath(bp string) *ListComponentVersionsURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ListComponentVersionsURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ListComponentVersionsURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/component_versions" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/api/assisted-install/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ListComponentVersionsURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ListComponentVersionsURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ListComponentVersionsURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ListComponentVersionsURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ListComponentVersionsURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ListComponentVersionsURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/restapi/server.go b/restapi/server.go index 25454121a..77b66a099 100644 --- a/restapi/server.go +++ b/restapi/server.go @@ -2,4 +2,4 @@ package restapi -// this file is intentionally empty. otherwise go-swagger will generate a server witch we don't want +// this file is intentionally empty. Otherwise go-swagger will generate a server which we don't want diff --git a/skipper.yaml b/skipper.yaml index 5122ed526..9e776d6e7 100644 --- a/skipper.yaml +++ b/skipper.yaml @@ -4,14 +4,22 @@ build-container-image: bm-inventory-build containers: bm-inventory-build: Dockerfile.bm-inventory-build bm-inventory: Dockerfile.bm-inventory - s3-object-expirer: Dockerfile.s3-object-expirer volumes: - $HOME/.cache/go-build:/go/pkg/mod - - $HOME/.docker/config.json:$HOME/.docker/config.json + - $HOME/.cache/golangci-lint:$HOME/.cache/golangci-lint - $HOME/.minikube:$HOME/.minikube - $HOME/.kube/:$HOME/.kube + - $HOME/.docker/:$HOME/.docker - /var/run/libvirt/libvirt-sock:/var/run/libvirt/libvirt-sock - /var/lib/libvirt/:/var/lib/libvirt/ + - $KUBECONFIG:$KUBECONFIG env: SERVICE: $SERVICE - OBJEXP: $OBJEXP + KUBECONFIG: $KUBECONFIG + NAMESPACE: $NAMESPACE + APPLY_NAMESPACE: $APPLY_NAMESPACE + TARGET: $TARGET + INGRESS_DOMAIN: $INGRESS_DOMAIN + TEST_FLAGS: $TEST_FLAGS + FOCUS: $FOCUS + GOCACHE: "/go/pkg/mod" diff --git a/subsystem/cluster_test.go b/subsystem/cluster_test.go index be6c01200..28cf504bb 100644 --- a/subsystem/cluster_test.go +++ b/subsystem/cluster_test.go @@ -3,11 +3,15 @@ package subsystem import ( "context" "encoding/json" + "fmt" "io/ioutil" "os" "reflect" "time" + "github.com/filanov/bm-inventory/internal/bminventory" + "github.com/filanov/bm-inventory/internal/host" + "github.com/alecthomas/units" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" @@ -19,9 +23,47 @@ import ( "github.com/filanov/bm-inventory/models" ) +// #nosec const ( clusterInsufficientStateInfo = "cluster is insufficient, exactly 3 known master hosts are needed for installation" clusterReadyStateInfo = "Cluster ready to be installed" + pullSecret = "{\"auths\":{\"cloud.openshift.com\":{\"auth\":\"dXNlcjpwYXNzd29yZAo=\",\"email\":\"r@r.com\"}}}" + IgnoreStateInfo = "IgnoreStateInfo" + clusterErrorInfo = "cluster %s has hosts in error" + clusterResetStateInfo = "cluster was reset by user" +) + +const ( + validDiskSize = int64(128849018880) +) + +var ( + validHwInfo = &models.Inventory{ + CPU: &models.CPU{Count: 16}, + Memory: &models.Memory{PhysicalBytes: int64(32 * units.GiB)}, + Disks: []*models.Disk{ + {DriveType: "SSD", Name: "loop0", SizeBytes: validDiskSize}, + {DriveType: "HDD", Name: "sdb", SizeBytes: validDiskSize}}, + Interfaces: []*models.Interface{ + { + IPV4Addresses: []string{ + "1.2.3.4/24", + }, + }, + }, + SystemVendor: &models.SystemVendor{Manufacturer: "manu", ProductName: "prod", SerialNumber: "3534"}, + } + validFreeAddresses = models.FreeNetworksAddresses{ + { + Network: "1.2.3.0/24", + FreeAddresses: []strfmt.IPv4{ + "1.2.3.8", + "1.2.3.9", + "1.2.3.5", + "1.2.3.6", + }, + }, + } ) var _ = Describe("Cluster tests", func() { @@ -36,13 +78,14 @@ var _ = Describe("Cluster tests", func() { BeforeEach(func() { cluster, err = bmclient.Installer.RegisterCluster(ctx, &installer.RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ - Name: swag.String("test cluster"), - OpenshiftVersion: swag.String("4.4"), + Name: swag.String("test-cluster"), + OpenshiftVersion: swag.String("4.5"), }, }) Expect(err).NotTo(HaveOccurred()) Expect(swag.StringValue(cluster.GetPayload().Status)).Should(Equal("insufficient")) Expect(swag.StringValue(cluster.GetPayload().StatusInfo)).Should(Equal(clusterInsufficientStateInfo)) + Expect(cluster.GetPayload().StatusUpdatedAt).ShouldNot(Equal(strfmt.DateTime(time.Time{}))) }) JustBeforeEach(func() { @@ -81,15 +124,15 @@ var _ = Describe("Cluster tests", func() { c, err := bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ ClusterUpdateParams: &models.ClusterUpdateParams{ - SSHPublicKey: publicKey, + SSHPublicKey: &publicKey, HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ { ID: *host1.ID, - Role: "master", + Role: models.HostRoleUpdateParamsMaster, }, { ID: *host2.ID, - Role: "worker", + Role: models.HostRoleUpdateParamsWorker, }, }, }, @@ -99,18 +142,91 @@ var _ = Describe("Cluster tests", func() { Expect(c.GetPayload().SSHPublicKey).Should(Equal(publicKey)) h := getHost(clusterID, *host1.ID) - Expect(h.Role).Should(Equal("master")) + Expect(h.Role).Should(Equal(models.HostRole(models.HostRoleUpdateParamsMaster))) h = getHost(clusterID, *host2.ID) - Expect(h.Role).Should(Equal("worker")) + Expect(h.Role).Should(Equal(models.HostRole(models.HostRoleUpdateParamsWorker))) }) }) -var _ = Describe("system-test cluster install", func() { +func waitForClusterState(ctx context.Context, clusterID strfmt.UUID, state string, timeout time.Duration, stateInfo string) { + log.Infof("Waiting for cluster %s status %s", clusterID, state) + for start := time.Now(); time.Since(start) < timeout; { + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + if swag.StringValue(c.Status) == state { + break + } + time.Sleep(time.Second) + } + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + Expect(swag.StringValue(c.Status)).Should(Equal(state)) + if stateInfo != IgnoreStateInfo { + Expect(swag.StringValue(c.StatusInfo)).Should(Equal(stateInfo)) + } +} + +func waitForHostState(ctx context.Context, clusterID strfmt.UUID, hostID strfmt.UUID, state string, timeout time.Duration) { + log.Infof("Waiting for host %s state %s", hostID, state) + for start := time.Now(); time.Since(start) < timeout; { + rep, err := bmclient.Installer.GetHost(ctx, &installer.GetHostParams{ClusterID: clusterID, HostID: hostID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + if swag.StringValue(c.Status) == state { + break + } + time.Sleep(time.Second) + } + rep, err := bmclient.Installer.GetHost(ctx, &installer.GetHostParams{ClusterID: clusterID, HostID: hostID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + ExpectWithOffset(1, swag.StringValue(c.Status)).Should(Equal(state)) +} + +func waitForClusterInstallationToStart(clusterID strfmt.UUID) { + waitForClusterState(context.Background(), clusterID, models.ClusterStatusPreparingForInstallation, + 10*time.Second, IgnoreStateInfo) + waitForClusterState(context.Background(), clusterID, models.ClusterStatusInstalling, + 180*time.Second, "Installation in progress") +} + +func installCluster(clusterID strfmt.UUID) { + ctx := context.Background() + _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + Expect(len(c.Hosts)).Should(Equal(4)) + for _, host := range c.Hosts { + Expect(swag.StringValue(host.Status)).Should(Equal("installing")) + } + + for _, host := range c.Hosts { + updateProgress(*host.ID, clusterID, models.HostStageDone) + } + + waitForClusterState(ctx, clusterID, "finalizing", defaultWaitForClusterStateTimeout, "Finalizing cluster installation") + + success := true + _, err = bmclient.Installer.CompleteInstallation(ctx, + &installer.CompleteInstallationParams{ClusterID: clusterID, CompletionParams: &models.CompletionParams{IsSuccess: &success, ErrorInfo: ""}}) + Expect(err).NotTo(HaveOccurred()) + +} + +var _ = Describe("cluster install", func() { var ( ctx = context.Background() cluster *models.Cluster validDiskSize = int64(128849018880) + clusterCIDR = "10.128.0.0/14" + serviceCIDR = "172.30.0.0/16" ) AfterEach(func() { @@ -120,22 +236,23 @@ var _ = Describe("system-test cluster install", func() { BeforeEach(func() { registerClusterReply, err := bmclient.Installer.RegisterCluster(ctx, &installer.RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ - APIVip: "1.2.3.4", BaseDNSDomain: "example.com", - ClusterNetworkCidr: "10.128.0.0/14", + ClusterNetworkCidr: &clusterCIDR, ClusterNetworkHostPrefix: 23, Name: swag.String("test-cluster"), - OpenshiftVersion: swag.String("4.4"), - PullSecret: `{"auths":{"cloud.openshift.com":{"auth":""}}}`, - ServiceNetworkCidr: "172.30.0.0/16", + OpenshiftVersion: swag.String("4.5"), + PullSecret: pullSecret, + ServiceNetworkCidr: &serviceCIDR, SSHPublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC50TuHS7aYci+U+5PLe/aW/I6maBi9PBDucLje6C6gtArfjy7udWA1DCSIQd+DkHhi57/s+PmvEjzfAfzqo+L+/8/O2l2seR1pPhHDxMR/rSyo/6rZP6KIL8HwFqXHHpDUM4tLXdgwKAe1LxBevLt/yNl8kOiHJESUSl+2QSf8z4SIbo/frDD8OwOvtfKBEG4WCb8zEsEuIPNF/Vo/UxPtS9pPTecEsWKDHR67yFjjamoyLvAzMAJotYgyMoxm8PTyCgEzHk3s3S4iO956d6KVOEJVXnTVhAxrtLuubjskd7N4hVN7h2s4Z584wYLKYhrIBL0EViihOMzY4mH3YE4KZusfIx6oMcggKX9b3NHm0la7cj2zg0r6zjUn6ZCP4gXM99e5q4auc0OEfoSfQwofGi3WmxkG3tEozCB8Zz0wGbi2CzR8zlcF+BNV5I2LESlLzjPY5B4dvv5zjxsYoz94p3rUhKnnPM2zTx1kkilDK5C5fC1k9l/I/r5Qk4ebLQU= oscohen@localhost.localdomain", }, }) Expect(err).NotTo(HaveOccurred()) cluster = registerClusterReply.GetPayload() + log.Infof("Register cluster %s", cluster.ID.String()) }) - generateHWPostStepReply := func(h *models.Host, hwInfo *models.Inventory) { + generateHWPostStepReply := func(h *models.Host, hwInfo *models.Inventory, hostname string) { + hwInfo.Hostname = hostname hw, err := json.Marshal(&hwInfo) Expect(err).NotTo(HaveOccurred()) _, err = bmclient.Installer.PostStepReply(ctx, &installer.PostStepReplyParams{ @@ -143,6 +260,7 @@ var _ = Describe("system-test cluster install", func() { HostID: *h.ID, Reply: &models.StepReply{ ExitCode: 0, + StepType: models.StepTypeInventory, Output: string(hw), StepID: string(models.StepTypeInventory), }, @@ -150,123 +268,347 @@ var _ = Describe("system-test cluster install", func() { Expect(err).ShouldNot(HaveOccurred()) } + generateFAPostStepReply := func(h *models.Host, freeAddresses models.FreeNetworksAddresses) { + fa, err := json.Marshal(&freeAddresses) + Expect(err).NotTo(HaveOccurred()) + _, err = bmclient.Installer.PostStepReply(ctx, &installer.PostStepReplyParams{ + ClusterID: h.ClusterID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + Output: string(fa), + StepID: string(models.StepTypeFreeNetworkAddresses), + StepType: models.StepTypeFreeNetworkAddresses, + }, + }) + Expect(err).ShouldNot(HaveOccurred()) + } + + register3nodes := func(clusterID strfmt.UUID) []*models.Host { + h1 := registerHost(clusterID) + generateHWPostStepReply(h1, validHwInfo, "h1") + generateFAPostStepReply(h1, validFreeAddresses) + h2 := registerHost(clusterID) + generateHWPostStepReply(h2, validHwInfo, "h2") + h3 := registerHost(clusterID) + generateHWPostStepReply(h3, validHwInfo, "h3") + + apiVip := "1.2.3.5" + ingressVip := "1.2.3.6" + // All hosts are masters, one in discovering state -> state must be insufficient + _, err := bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{ + APIVip: &apiVip, + IngressVip: &ingressVip, + }, + ClusterID: clusterID, + }) + Expect(err).ShouldNot(HaveOccurred()) + return []*models.Host{h1, h2, h3} + } + Context("install cluster cases", func() { var clusterID strfmt.UUID BeforeEach(func() { clusterID = *cluster.ID - - hwInfo := &models.Inventory{ - CPU: &models.CPU{Count: 16}, - Memory: &models.Memory{PhysicalBytes: int64(32 * units.GiB)}, - Disks: []*models.Disk{ - {DriveType: "SSD", Name: "loop0", SizeBytes: validDiskSize}, - {DriveType: "HDD", Name: "sdb", SizeBytes: validDiskSize}}, - Interfaces: []*models.Interface{ - { - IPV4Addresses: []string{"1.2.3.5/24"}, - }, + registerHostsAndSetRoles(clusterID, 4) + }) + It("[only_k8s]register host while installing", func() { + _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + waitForClusterState(ctx, clusterID, models.ClusterStatusInstalling, defaultWaitForClusterStateTimeout, + IgnoreStateInfo) + _, err = bmclient.Installer.RegisterHost(context.Background(), &installer.RegisterHostParams{ + ClusterID: clusterID, + NewHostParams: &models.HostCreateParams{ + HostID: strToUUID(uuid.New().String()), }, - } + }) + Expect(err).To(BeAssignableToTypeOf(installer.NewRegisterHostForbidden())) + }) - h1 := registerHost(clusterID) - generateHWPostStepReply(h1, hwInfo) - h2 := registerHost(clusterID) - generateHWPostStepReply(h2, hwInfo) - h3 := registerHost(clusterID) - generateHWPostStepReply(h3, hwInfo) - h4 := registerHost(clusterID) - generateHWPostStepReply(h4, hwInfo) - c, err := bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ - ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ - {ID: *h1.ID, Role: "master"}, - {ID: *h2.ID, Role: "master"}, - {ID: *h3.ID, Role: "master"}, - {ID: *h4.ID, Role: "worker"}, - }}, + It("[only_k8s]register host while cluster in error state", func() { + FailCluster(ctx, clusterID) + //Wait for cluster to get to error state + waitForClusterState(ctx, clusterID, models.ClusterStatusError, defaultWaitForClusterStateTimeout, + IgnoreStateInfo) + _, err := bmclient.Installer.RegisterHost(context.Background(), &installer.RegisterHostParams{ ClusterID: clusterID, + NewHostParams: &models.HostCreateParams{ + HostID: strToUUID(uuid.New().String()), + }, }) - Expect(err).NotTo(HaveOccurred()) - Expect(swag.StringValue(c.GetPayload().Status)).Should(Equal("ready")) - Expect(swag.StringValue(c.GetPayload().StatusInfo)).Should(Equal(clusterReadyStateInfo)) + Expect(err).To(BeAssignableToTypeOf(installer.NewRegisterHostForbidden())) }) - updateProgress := func(hostID strfmt.UUID, progress string) { - installProgress := models.HostInstallProgressParams(progress) - updateReply, err := bmclient.Installer.UpdateHostInstallProgress(ctx, &installer.UpdateHostInstallProgressParams{ - ClusterID: clusterID, - HostInstallProgressParams: installProgress, - HostID: hostID, + It("[only_k8s]register existing host while cluster in installing state", func() { + c, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + hostID := c.GetPayload().Hosts[0].ID + Expect(err).NotTo(HaveOccurred()) + _, err = bmclient.Installer.RegisterHost(context.Background(), &installer.RegisterHostParams{ + ClusterID: clusterID, + NewHostParams: &models.HostCreateParams{ + HostID: hostID, + }, }) - Expect(err).ShouldNot(HaveOccurred()) - Expect(updateReply).Should(BeAssignableToTypeOf(installer.NewUpdateHostInstallProgressOK())) - } + Expect(err).To(BeNil()) + host := getHost(clusterID, *hostID) + Expect(*host.Status).To(Equal("error")) + }) - Context("install cluster", func() { + It("[only_k8s]register host after reboot - wrong boot order", func() { + c, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + hostID := c.GetPayload().Hosts[0].ID - It("install cluster", func() { - _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + installProgress := models.HostStageRebooting + updateProgress(*hostID, clusterID, installProgress) + + By("Verify the db has been updated", func() { + hostInDb := getHost(clusterID, *hostID) + Expect(*hostInDb.Status).Should(Equal(host.HostStatusInstallingInProgress)) + Expect(*hostInDb.StatusInfo).Should(Equal(string(installProgress))) + }) + + By("Try to register", func() { Expect(err).NotTo(HaveOccurred()) + _, err = bmclient.Installer.RegisterHost(context.Background(), &installer.RegisterHostParams{ + ClusterID: clusterID, + NewHostParams: &models.HostCreateParams{ + HostID: hostID, + }, + }) + Expect(err).To(BeNil()) + hostInDb := getHost(clusterID, *hostID) + Expect(*hostInDb.Status).Should(Equal(models.HostStatusInstallingPendingUserAction)) rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := rep.GetPayload() Expect(swag.StringValue(c.Status)).Should(Equal("installing")) - Expect(swag.StringValue(c.StatusInfo)).Should(Equal("Installation in progress")) - Expect(len(c.Hosts)).Should(Equal(4)) - for _, host := range c.Hosts { - Expect(swag.StringValue(host.Status)).Should(Equal("installing")) - } + }) - for _, host := range c.Hosts { - updateProgress(*host.ID, "Done") - } + By("Updating progress after fixing boot order", func() { + installProgress = models.HostStageConfiguring + updateProgress(*hostID, clusterID, installProgress) + }) - waitForClusterState(ctx, clusterID, "installed") - rep, err = bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) - Expect(err).NotTo(HaveOccurred()) - c = rep.GetPayload() - Expect(swag.StringValue(c.StatusInfo)).Should(Equal("installed")) + By("Verify the db has been updated", func() { + hostInDb := getHost(clusterID, *hostID) + Expect(*hostInDb.Status).Should(Equal(host.HostStatusInstallingInProgress)) + Expect(*hostInDb.StatusInfo).Should(Equal(string(installProgress))) }) }) - It("report_progress", func() { + + It("[only_k8s]install_cluster", func() { + By("Installing cluster till finalize") + _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + Expect(swag.StringValue(c.Status)).Should(Equal("installing")) + Expect(swag.StringValue(c.StatusInfo)).Should(Equal("Installation in progress")) + Expect(len(c.Hosts)).Should(Equal(4)) + for _, host := range c.Hosts { + Expect(swag.StringValue(host.Status)).Should(Equal("installing")) + } + + for _, host := range c.Hosts { + updateProgress(*host.ID, clusterID, models.HostStageDone) + } + + waitForClusterState(ctx, clusterID, "finalizing", defaultWaitForClusterStateTimeout, "Finalizing cluster installation") + By("Completing installation installation") + success := true + _, err = bmclient.Installer.CompleteInstallation(ctx, + &installer.CompleteInstallationParams{ClusterID: clusterID, CompletionParams: &models.CompletionParams{IsSuccess: &success, ErrorInfo: ""}}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying installation successfully completed") + waitForClusterState(ctx, clusterID, "installed", defaultWaitForClusterStateTimeout, "installed") + }) + + It("[only_k8s]install_cluster fail", func() { + By("Installing cluster till finalize") + _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + Expect(swag.StringValue(c.Status)).Should(Equal("installing")) + Expect(swag.StringValue(c.StatusInfo)).Should(Equal("Installation in progress")) + Expect(len(c.Hosts)).Should(Equal(4)) + for _, host := range c.Hosts { + Expect(swag.StringValue(host.Status)).Should(Equal("installing")) + } + + for _, host := range c.Hosts { + updateProgress(*host.ID, clusterID, models.HostStageDone) + } + + waitForClusterState(ctx, clusterID, "finalizing", defaultWaitForClusterStateTimeout, "Finalizing cluster installation") + By("Failing installation") + success := false + _, err = bmclient.Installer.CompleteInstallation(ctx, + &installer.CompleteInstallationParams{ClusterID: clusterID, CompletionParams: &models.CompletionParams{IsSuccess: &success, ErrorInfo: "failed"}}) + Expect(err).NotTo(HaveOccurred()) + By("Verifying installation failed") + waitForClusterState(ctx, clusterID, "error", defaultWaitForClusterStateTimeout, "failed") + + }) + + // TODO: re-enable the test when cluster monitor state will be affected by hosts states and cluster + // will not be ready of all the hosts are not ready. + //It("installation_conflicts", func() { + // By("try to install host with host without a role") + // host := registerHost(clusterID) + // generateHWPostStepReply(host, validHwInfo, "host") + // _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + // Expect(reflect.TypeOf(err)).To(Equal(reflect.TypeOf(installer.NewInstallClusterConflict()))) + // By("install after disabling host without a role") + // _, err = bmclient.Installer.DisableHost(ctx, + // &installer.DisableHostParams{ClusterID: clusterID, HostID: *host.ID}) + // Expect(err).NotTo(HaveOccurred()) + // _, err = bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + // Expect(err).NotTo(HaveOccurred()) + //}) + + It("[only_k8s]report_progress", func() { c, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) - h := c.GetPayload().Hosts[0] + hosts := getClusterMasters(c.GetPayload()) - By("progress_to_some_host", func() { - installProgress := "installation step 1" - updateProgress(*h.ID, installProgress) - h = getHost(clusterID, *h.ID) - Expect(*h.Status).Should(Equal("installing-in-progress")) - Expect(*h.StatusInfo).Should(Equal(installProgress)) + By("invalid_report", func() { + step := models.HostStage("INVALID REPORT") + installProgress := &models.HostProgress{ + CurrentStage: step, + } + + _, err := bmclient.Installer.UpdateHostInstallProgress(ctx, &installer.UpdateHostInstallProgressParams{ + ClusterID: clusterID, + HostProgress: installProgress, + HostID: *hosts[0].ID, + }) + + Expect(err).Should(HaveOccurred()) }) - By("progress_to_some_host_again", func() { - installProgress := "installation step 2" - updateProgress(*h.ID, installProgress) - h = getHost(clusterID, *h.ID) - Expect(*h.Status).Should(Equal("installing-in-progress")) - Expect(*h.StatusInfo).Should(Equal(installProgress)) + // Host #1 + + By("progress_to_other_host", func() { + installProgress := models.HostStageWritingImageToDisk + installInfo := "68%" + updateProgressWithInfo(*hosts[0].ID, clusterID, installProgress, installInfo) + hostFromDB := getHost(clusterID, *hosts[0].ID) + + Expect(*hostFromDB.Status).Should(Equal(host.HostStatusInstallingInProgress)) + Expect(*hostFromDB.StatusInfo).Should(Equal(string(installProgress))) + Expect(hostFromDB.Progress.CurrentStage).Should(Equal(installProgress)) + Expect(hostFromDB.Progress.ProgressInfo).Should(Equal(installInfo)) }) By("report_done", func() { - updateProgress(*h.ID, "Done") - h = getHost(clusterID, *h.ID) - Expect(*h.Status).Should(Equal("installed")) - Expect(*h.StatusInfo).Should(Equal("installed")) + installProgress := models.HostStageDone + updateProgress(*hosts[0].ID, clusterID, installProgress) + hostFromDB := getHost(clusterID, *hosts[0].ID) + + Expect(*hostFromDB.Status).Should(Equal(host.HostStatusInstalled)) + Expect(*hostFromDB.StatusInfo).Should(Equal(string(installProgress))) + Expect(hostFromDB.Progress.CurrentStage).Should(Equal(installProgress)) + Expect(hostFromDB.Progress.ProgressInfo).Should(BeEmpty()) + }) + + By("cant_report_after_done", func() { + installProgress := &models.HostProgress{ + CurrentStage: models.HostStageFailed, + } + + _, err := bmclient.Installer.UpdateHostInstallProgress(ctx, &installer.UpdateHostInstallProgressParams{ + ClusterID: clusterID, + HostProgress: installProgress, + HostID: *hosts[0].ID, + }) + + Expect(err).Should(HaveOccurred()) + }) + + // Host #2 + + By("progress_to_some_host", func() { + installProgress := models.HostStageWritingImageToDisk + updateProgress(*hosts[1].ID, clusterID, installProgress) + hostFromDB := getHost(clusterID, *hosts[1].ID) + + Expect(*hostFromDB.Status).Should(Equal(host.HostStatusInstallingInProgress)) + Expect(*hostFromDB.StatusInfo).Should(Equal(string(installProgress))) + Expect(hostFromDB.Progress.CurrentStage).Should(Equal(installProgress)) + Expect(hostFromDB.Progress.ProgressInfo).Should(BeEmpty()) + }) + + By("invalid_lower_stage", func() { + installProgress := &models.HostProgress{ + CurrentStage: models.HostStageInstalling, + } + + _, err := bmclient.Installer.UpdateHostInstallProgress(ctx, &installer.UpdateHostInstallProgressParams{ + ClusterID: clusterID, + HostProgress: installProgress, + HostID: *hosts[1].ID, + }) + + Expect(err).Should(HaveOccurred()) + }) + + By("report_failed_on_same_host", func() { + installProgress := models.HostStageFailed + installInfo := "because some error" + updateProgressWithInfo(*hosts[1].ID, clusterID, installProgress, installInfo) + hostFromDB := getHost(clusterID, *hosts[1].ID) + + Expect(*hostFromDB.Status).Should(Equal(host.HostStatusError)) + Expect(*hostFromDB.StatusInfo).Should(Equal(fmt.Sprintf("%s - %s", installProgress, installInfo))) + Expect(hostFromDB.Progress.CurrentStage).Should(Equal(models.HostStageWritingImageToDisk)) // Last stage + Expect(hostFromDB.Progress.ProgressInfo).Should(BeEmpty()) }) - By("report failed on other host", func() { - h1 := c.GetPayload().Hosts[1] - updateProgress(*h1.ID, "Failed because some error") - h1 = getHost(clusterID, *h1.ID) - Expect(*h1.Status).Should(Equal("error")) - Expect(*h1.StatusInfo).Should(Equal("Failed because some error")) + By("cant_report_after_error", func() { + installProgress := &models.HostProgress{ + CurrentStage: models.HostStageWritingImageToDisk, + } + + _, err := bmclient.Installer.UpdateHostInstallProgress(ctx, &installer.UpdateHostInstallProgressParams{ + ClusterID: clusterID, + HostProgress: installProgress, + HostID: *hosts[1].ID, + }) + + Expect(err).Should(HaveOccurred()) + }) + + By("verify_everything_changed_error", func() { + waitForClusterState(ctx, clusterID, models.ClusterStatusError, defaultWaitForClusterStateTimeout, + IgnoreStateInfo) + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + for _, host := range c.Hosts { + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusError, defaultWaitForHostStateTimeout) + } }) }) - It("install download_config_files", func() { + It("[only_k8s]install download_config_files", func() { //Test downloading kubeconfig files in worng state file, err := ioutil.TempFile("", "tmp") @@ -278,6 +620,7 @@ var _ = Describe("system-test cluster install", func() { _, err = bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) missingClusterId := strfmt.UUID(uuid.New().String()) _, err = bmclient.Installer.DownloadClusterFiles(ctx, &installer.DownloadClusterFilesParams{ClusterID: missingClusterId, FileName: "bootstrap.ign"}, file) @@ -292,9 +635,25 @@ var _ = Describe("system-test cluster install", func() { Expect(err).NotTo(HaveOccurred()) Expect(s.Size()).ShouldNot(Equal(0)) }) - It("Get kubeadmin password", func() { - //Test happy flow + It("[only_k8s]download_config_files in error state", func() { + file, err := ioutil.TempFile("", "tmp") + Expect(err).NotTo(HaveOccurred()) + defer os.Remove(file.Name()) + + FailCluster(ctx, clusterID) + //Wait for cluster to get to error state + waitForClusterState(ctx, clusterID, models.ClusterStatusError, defaultWaitForClusterStateTimeout, + IgnoreStateInfo) + + _, err = bmclient.Installer.DownloadClusterFiles(ctx, &installer.DownloadClusterFilesParams{ClusterID: clusterID, FileName: "bootstrap.ign"}, file) + Expect(err).NotTo(HaveOccurred()) + s, err := file.Stat() + Expect(err).NotTo(HaveOccurred()) + Expect(s.Size()).ShouldNot(Equal(0)) + }) + + It("[only_k8s]Get credentials", func() { By("Test getting kubeadmin password for not found cluster") { missingClusterId := strfmt.UUID(uuid.New().String()) @@ -310,106 +669,626 @@ var _ = Describe("system-test cluster install", func() { { _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) creds, err := bmclient.Installer.GetCredentials(ctx, &installer.GetCredentialsParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) - Expect(creds.GetPayload().Username).To(Equal("kubeadmin")) + Expect(creds.GetPayload().Username).To(Equal(bminventory.DefaultUser)) + Expect(creds.GetPayload().ConsoleURL).To(Equal( + fmt.Sprintf("%s.%s.%s", bminventory.ConsoleUrlPrefix, cluster.Name, cluster.BaseDNSDomain))) Expect(len(creds.GetPayload().Password)).NotTo(Equal(0)) } }) - }) - It("install cluster requirement", func() { - clusterID := *cluster.ID + It("[only_k8s]Upload ingress ca and kubeconfig download", func() { + ingressCa := "-----BEGIN CERTIFICATE-----\nMIIDozCCAougAwIBAgIULCOqWTF" + + "aEA8gNEmV+rb7h1v0r3EwDQYJKoZIhvcNAQELBQAwYTELMAkGA1UEBhMCaXMxCzAJBgNVBAgMAmRk" + + "MQswCQYDVQQHDAJkZDELMAkGA1UECgwCZGQxCzAJBgNVBAsMAmRkMQswCQYDVQQDDAJkZDERMA8GCSqGSIb3DQEJARYCZGQwHhcNMjAwNTI1MTYwNTAwWhcNMzA" + + "wNTIzMTYwNTAwWjBhMQswCQYDVQQGEwJpczELMAkGA1UECAwCZGQxCzAJBgNVBAcMAmRkMQswCQYDVQQKDAJkZDELMAkGA1UECwwCZGQxCzAJBgNVBAMMAmRkMREwDwYJKoZIh" + + "vcNAQkBFgJkZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAML63CXkBb+lvrJKfdfYBHLDYfuaC6exCSqASUAosJWWrfyDiDMUbmfs06PLKyv7N8efDhza74ov0EQJ" + + "NRhMNaCE+A0ceq6ZXmmMswUYFdLAy8K2VMz5mroBFX8sj5PWVr6rDJ2ckBaFKWBB8NFmiK7MTWSIF9n8M107/9a0QURCvThUYu+sguzbsLODFtXUxG5rtTVKBVcPZvEfRky2Tkt4AySFS" + + "mkO6Kf4sBd7MC4mKWZm7K8k7HrZYz2usSpbrEtYGtr6MmN9hci+/ITDPE291DFkzIcDCF493v/3T+7XsnmQajh6kuI+bjIaACfo8N+twEoJf/N1PmphAQdEiC0CAwEAAaNTMFEwHQYDVR0O" + + "BBYEFNvmSprQQ2HUUtPxs6UOuxq9lKKpMB8GA1UdIwQYMBaAFNvmSprQQ2HUUtPxs6UOuxq9lKKpMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJEWxnxtQV5IqPVRr2SM" + + "WNNxcJ7A/wyet39l5VhHjbrQGynk5WS80psn/riLUfIvtzYMWC0IR0pIMQuMDF5sNcKp4D8Xnrd+Bl/4/Iy/iTOoHlw+sPkKv+NL2XR3iO8bSDwjtjvd6L5NkUuzsRoSkQCG2fHASqqgFoyV9Ld" + + "RsQa1w9ZGebtEWLuGsrJtR7gaFECqJnDbb0aPUMixmpMHID8kt154TrLhVFmMEqGGC1GvZVlQ9Of3GP9y7X4vDpHshdlWotOnYKHaeu2d5cRVFHhEbrslkISgh/TRuyl7VIpnjOYUwMBpCiVH6M" + + "2lyDI6UR3Fbz4pVVAxGXnVhBExjBE=\n-----END CERTIFICATE-----" + By("Upload ingress ca for not existent clusterid") + { + missingClusterId := strfmt.UUID(uuid.New().String()) + _, err := bmclient.Installer.UploadClusterIngressCert(ctx, &installer.UploadClusterIngressCertParams{ClusterID: missingClusterId, IngressCertParams: "dummy"}) + Expect(reflect.TypeOf(err)).Should(Equal(reflect.TypeOf(installer.NewUploadClusterIngressCertNotFound()))) + } + By("Test getting upload ingress ca in wrong state") + { + _, err := bmclient.Installer.UploadClusterIngressCert(ctx, &installer.UploadClusterIngressCertParams{ClusterID: clusterID, IngressCertParams: "dummy"}) + Expect(reflect.TypeOf(err)).To(Equal(reflect.TypeOf(installer.NewUploadClusterIngressCertBadRequest()))) + } + By("Test happy flow") + { - hwInfo := &models.Inventory{ - CPU: &models.CPU{Count: 16}, - Memory: &models.Memory{PhysicalBytes: int64(32 * units.GiB)}, - Disks: []*models.Disk{ - {DriveType: "SSD", Name: "loop0", SizeBytes: validDiskSize}, - {DriveType: "HDD", Name: "sdb", SizeBytes: validDiskSize}}, - } - Expect(swag.StringValue(cluster.Status)).Should(Equal("insufficient")) - Expect(swag.StringValue(cluster.StatusInfo)).Should(Equal(clusterInsufficientStateInfo)) + installCluster(clusterID) + // Download kubeconfig before uploading + kubeconfigNoIngress, err := ioutil.TempFile("", "tmp") + Expect(err).NotTo(HaveOccurred()) + _, err = bmclient.Installer.DownloadClusterFiles(ctx, &installer.DownloadClusterFilesParams{ClusterID: clusterID, FileName: "kubeconfig-noingress"}, kubeconfigNoIngress) + Expect(err).NotTo(HaveOccurred()) + sni, err := kubeconfigNoIngress.Stat() + Expect(err).NotTo(HaveOccurred()) + Expect(sni.Size()).ShouldNot(Equal(0)) - h1 := registerHost(clusterID) - generateHWPostStepReply(h1, hwInfo) - h2 := registerHost(clusterID) - generateHWPostStepReply(h2, hwInfo) - h3 := registerHost(clusterID) - generateHWPostStepReply(h3, hwInfo) - h4 := registerHost(clusterID) + By("Trying to download kubeconfig file before it exists") + file, err := ioutil.TempFile("", "tmp") + Expect(err).NotTo(HaveOccurred()) + _, err = bmclient.Installer.DownloadClusterKubeconfig(ctx, &installer.DownloadClusterKubeconfigParams{ClusterID: clusterID}, file) + Expect(err).Should(HaveOccurred()) + Expect(reflect.TypeOf(err)).Should(Equal(reflect.TypeOf(installer.NewDownloadClusterKubeconfigConflict()))) - // All hosts are masters, one in discovering state -> state must be insufficient - cluster, err := bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ - ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ - {ID: *h1.ID, Role: "master"}, - {ID: *h2.ID, Role: "master"}, - {ID: *h4.ID, Role: "master"}, - }}, - ClusterID: clusterID, + By("Upload ingress ca") + res, err := bmclient.Installer.UploadClusterIngressCert(ctx, &installer.UploadClusterIngressCertParams{ClusterID: clusterID, IngressCertParams: models.IngressCertParams(ingressCa)}) + Expect(err).NotTo(HaveOccurred()) + Expect(reflect.TypeOf(res)).Should(Equal(reflect.TypeOf(installer.NewUploadClusterIngressCertCreated()))) + + // Download kubeconfig after uploading + file, err = ioutil.TempFile("", "tmp") + Expect(err).NotTo(HaveOccurred()) + _, err = bmclient.Installer.DownloadClusterKubeconfig(ctx, &installer.DownloadClusterKubeconfigParams{ClusterID: clusterID}, file) + Expect(err).NotTo(HaveOccurred()) + s, err := file.Stat() + Expect(err).NotTo(HaveOccurred()) + Expect(s.Size()).ShouldNot(Equal(0)) + Expect(s.Size()).ShouldNot(Equal(sni.Size())) + } + By("Try to upload ingress ca second time, do nothing and return ok") + { + // Try to upload ingress ca second time + res, err := bmclient.Installer.UploadClusterIngressCert(ctx, &installer.UploadClusterIngressCertParams{ClusterID: clusterID, IngressCertParams: models.IngressCertParams(ingressCa)}) + Expect(err).NotTo(HaveOccurred()) + Expect(reflect.TypeOf(res)).To(Equal(reflect.TypeOf(installer.NewUploadClusterIngressCertCreated()))) + } }) - Expect(err).NotTo(HaveOccurred()) - Expect(swag.StringValue(cluster.GetPayload().Status)).Should(Equal("insufficient")) - Expect(swag.StringValue(cluster.GetPayload().StatusInfo)).Should(Equal(clusterInsufficientStateInfo)) - // Adding one known host and setting as master -> state must be ready - cluster, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ - ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ - {ID: *h3.ID, Role: "master"}, - }}, - ClusterID: clusterID, + It("[only_k8s]on cluster error - verify all hosts are aborted", func() { + FailCluster(ctx, clusterID) + stateInfo := fmt.Sprintf(clusterErrorInfo, clusterID.String()) + waitForClusterState(ctx, clusterID, models.ClusterStatusError, defaultWaitForClusterStateTimeout, + stateInfo) + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + for _, host := range c.Hosts { + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusError, defaultWaitForHostStateTimeout) + } }) - Expect(err).NotTo(HaveOccurred()) - Expect(swag.StringValue(cluster.GetPayload().Status)).Should(Equal("ready")) - Expect(swag.StringValue(cluster.GetPayload().StatusInfo)).Should(Equal(clusterReadyStateInfo)) - }) + Context("cancel installation", func() { + It("[only_k8s]cancel running installation", func() { + _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + for _, host := range c.Hosts { + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusInstalling, + defaultWaitForHostStateTimeout) + } + _, err = bmclient.Installer.CancelInstallation(ctx, &installer.CancelInstallationParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + rep, err = bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c = rep.GetPayload() + Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusError)) + for _, host := range c.Hosts { + Expect(swag.StringValue(host.Status)).Should(Equal(models.HostStatusError)) + } + }) + It("[only_k8s]cancel installation conflicts", func() { + _, err := bmclient.Installer.CancelInstallation(ctx, &installer.CancelInstallationParams{ClusterID: clusterID}) + Expect(reflect.TypeOf(err)).Should(Equal(reflect.TypeOf(installer.NewCancelInstallationConflict()))) + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusReady)) + }) + It("[only_k8s]cancel failed cluster", func() { + FailCluster(ctx, clusterID) + stateInfo := fmt.Sprintf(clusterErrorInfo, clusterID.String()) + waitForClusterState(ctx, clusterID, models.ClusterStatusError, defaultWaitForClusterStateTimeout, + stateInfo) + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).ShouldNot(HaveOccurred()) + c := rep.GetPayload() + Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusError)) + rep, err = bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).ShouldNot(HaveOccurred()) + c = rep.GetPayload() - It("install_cluster_states", func() { + verifyErrorStates := func() { + Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusError)) + for _, host := range c.Hosts { + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusError, + defaultWaitForHostStateTimeout) + } + } + + verifyErrorStates() + _, err = bmclient.Installer.CancelInstallation(ctx, &installer.CancelInstallationParams{ClusterID: clusterID}) + Expect(err).ShouldNot(HaveOccurred()) + verifyErrorStates() + }) + It("[only_k8s]cancel cluster with various hosts states", func() { + _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + Expect(len(c.Hosts)).Should(Equal(4)) + + updateProgress(*c.Hosts[0].ID, clusterID, "Installing") + updateProgress(*c.Hosts[1].ID, clusterID, "Done") + + h1 := getHost(clusterID, *c.Hosts[0].ID) + Expect(*h1.Status).Should(Equal(models.HostStatusInstallingInProgress)) + h2 := getHost(clusterID, *c.Hosts[1].ID) + Expect(*h2.Status).Should(Equal(models.HostStatusInstalled)) + + _, err = bmclient.Installer.CancelInstallation(ctx, &installer.CancelInstallationParams{ClusterID: clusterID}) + Expect(err).ShouldNot(HaveOccurred()) + for _, host := range c.Hosts { + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusError, + defaultWaitForClusterStateTimeout) + } + }) + It("[only_k8s]cancel installation with a disabled host", func() { + By("register a new worker") + disabledHost := registerHost(clusterID) + generateHWPostStepReply(disabledHost, validHwInfo, "hostname") + generateFAPostStepReply(disabledHost, validFreeAddresses) + _, err := bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ + {ID: *disabledHost.ID, Role: models.HostRoleUpdateParamsWorker}, + }, + }, + ClusterID: clusterID, + }) + Expect(err).ShouldNot(HaveOccurred()) + + By("disable worker") + _, err = bmclient.Installer.DisableHost(ctx, &installer.DisableHostParams{ + ClusterID: clusterID, + HostID: *disabledHost.ID, + }) + Expect(err).ShouldNot(HaveOccurred()) + waitForHostState(ctx, clusterID, *disabledHost.ID, models.HostStatusDisabled, + defaultWaitForHostStateTimeout) + + By("install cluster") + _, err = bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + Expect(len(c.Hosts)).Should(Equal(5)) + for _, host := range c.Hosts { + if host.ID.String() == disabledHost.ID.String() { + Expect(*host.Status).Should(Equal(models.HostStatusDisabled)) + continue + } + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusInstalling, + defaultWaitForHostStateTimeout) + } + + By("cancel installation") + _, err = bmclient.Installer.CancelInstallation(ctx, &installer.CancelInstallationParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + rep, err = bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c = rep.GetPayload() + Expect(len(c.Hosts)).Should(Equal(5)) + Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusError)) + for _, host := range c.Hosts { + if host.ID.String() == disabledHost.ID.String() { + Expect(*host.Status).Should(Equal(models.HostStatusDisabled)) + continue + } + Expect(swag.StringValue(host.Status)).Should(Equal(models.HostStatusError)) + } + }) + It("[only_k8s]cancel preparing installation", func() { + _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterState(context.Background(), clusterID, models.ClusterStatusPreparingForInstallation, + 10*time.Second, IgnoreStateInfo) + _, err = bmclient.Installer.CancelInstallation(ctx, &installer.CancelInstallationParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusError)) + Expect(len(c.Hosts)).Should(Equal(4)) + for _, host := range c.Hosts { + Expect(swag.StringValue(host.Status)).Should(Equal(models.HostStatusError)) + } + }) + }) + Context("reset installation", func() { + It("[only_k8s]reset cluster and register hosts", func() { + By("verify reset success") + _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + _, err = bmclient.Installer.CancelInstallation(ctx, &installer.CancelInstallationParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + _, err = bmclient.Installer.ResetCluster(ctx, &installer.ResetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + _, err = bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + + By("verify cluster state") + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusInsufficient)) + + By("verify hosts state") + for i, host := range c.Hosts { + Expect(swag.StringValue(host.Status)).Should(Equal(models.HostStatusResetting)) + _, ok := getStepInList(getNextSteps(clusterID, *host.ID), models.StepTypeResetInstallation) + Expect(ok).Should(Equal(true)) + _, err = bmclient.Installer.RegisterHost(ctx, &installer.RegisterHostParams{ + ClusterID: clusterID, + NewHostParams: &models.HostCreateParams{ + HostID: host.ID, + }, + }) + Expect(err).ShouldNot(HaveOccurred()) + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusDiscovering, + defaultWaitForHostStateTimeout) + generateHWPostStepReply(host, validHwInfo, fmt.Sprintf("host-after-reset-%d", i)) + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusKnown, + defaultWaitForHostStateTimeout) + host = getHost(clusterID, *host.ID) + Expect(host.Progress.CurrentStage).Should(Equal(models.HostStage(""))) + Expect(host.Progress.ProgressInfo).Should(Equal("")) + Expect(host.Bootstrap).Should(Equal(false)) + } + }) + It("[only_k8s]reset cluster and disable bootstrap", func() { + var bootstrapID *strfmt.UUID + + By("verify reset success") + _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + _, err = bmclient.Installer.CancelInstallation(ctx, &installer.CancelInstallationParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + _, err = bmclient.Installer.ResetCluster(ctx, &installer.ResetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + for _, h := range c.Hosts { + if h.Bootstrap { + bootstrapID = h.ID + break + } + } + Expect(bootstrapID).ShouldNot(Equal(nil)) + + By("verify cluster state") + rep, err = bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c = rep.GetPayload() + Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusInsufficient)) + + By("register hosts and disable bootstrap") + for i, host := range c.Hosts { + Expect(swag.StringValue(host.Status)).Should(Equal(models.HostStatusResetting)) + _, ok := getStepInList(getNextSteps(clusterID, *host.ID), models.StepTypeResetInstallation) + Expect(ok).Should(Equal(true)) + _, err = bmclient.Installer.RegisterHost(ctx, &installer.RegisterHostParams{ + ClusterID: clusterID, + NewHostParams: &models.HostCreateParams{ + HostID: host.ID, + }, + }) + Expect(err).ShouldNot(HaveOccurred()) + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusDiscovering, + defaultWaitForHostStateTimeout) + generateHWPostStepReply(host, validHwInfo, fmt.Sprintf("host-after-reset-%d", i)) + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusKnown, + defaultWaitForHostStateTimeout) + + if host.Bootstrap { + _, err = bmclient.Installer.DisableHost(ctx, &installer.DisableHostParams{ + ClusterID: clusterID, + HostID: *host.ID, + }) + Expect(err).NotTo(HaveOccurred()) + } + } + h := registerHost(clusterID) + generateHWPostStepReply(h, validHwInfo, "hostname") + generateFAPostStepReply(h, validFreeAddresses) + _, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ + {ID: *h.ID, Role: models.HostRoleUpdateParamsMaster}, + }, + }, + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + + By("check for a new bootstrap") + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, defaultWaitForClusterStateTimeout, + clusterReadyStateInfo) + _, err = bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + rep, err = bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c = rep.GetPayload() + for _, h := range c.Hosts { + if h.Bootstrap { + Expect(h.ID).ShouldNot(Equal(bootstrapID)) + break + } + } + }) + It("[only_k8s]reset ready/installing cluster", func() { + _, err := bmclient.Installer.ResetCluster(ctx, &installer.ResetClusterParams{ClusterID: clusterID}) + Expect(reflect.TypeOf(err)).Should(Equal(reflect.TypeOf(installer.NewResetClusterConflict()))) + _, err = bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + for _, host := range c.Hosts { + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusInstalling, + defaultWaitForHostStateTimeout) + } + _, err = bmclient.Installer.ResetCluster(ctx, &installer.ResetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + rep, err = bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c = rep.GetPayload() + for _, host := range c.Hosts { + Expect(swag.StringValue(host.Status)).Should(Equal(models.HostStatusResetting)) + } + }) + It("[only_k8s]reset cluster with various hosts states", func() { + _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusInstalling)) + Expect(len(c.Hosts)).Should(Equal(4)) + + updateProgress(*c.Hosts[0].ID, clusterID, "Installing") + updateProgress(*c.Hosts[1].ID, clusterID, "Done") + + h1 := getHost(clusterID, *c.Hosts[0].ID) + Expect(*h1.Status).Should(Equal(models.HostStatusInstallingInProgress)) + h2 := getHost(clusterID, *c.Hosts[1].ID) + Expect(*h2.Status).Should(Equal(models.HostStatusInstalled)) + + _, err = bmclient.Installer.ResetCluster(ctx, &installer.ResetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + for _, host := range c.Hosts { + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusResettingPendingUserAction, + defaultWaitForClusterStateTimeout) + } + }) + + It("[only_k8s]reset cluster - wrong boot order", func() { + _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + Expect(len(c.Hosts)).Should(Equal(4)) + updateProgress(*c.Hosts[0].ID, clusterID, models.HostStageRebooting) + _, err = bmclient.Installer.CancelInstallation(ctx, &installer.CancelInstallationParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + _, err = bmclient.Installer.ResetCluster(ctx, &installer.ResetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterState(ctx, clusterID, models.ClusterStatusInsufficient, defaultWaitForClusterStateTimeout, clusterResetStateInfo) + for _, host := range c.Hosts { + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusResettingPendingUserAction, + defaultWaitForHostStateTimeout) + _, err = bmclient.Installer.RegisterHost(ctx, &installer.RegisterHostParams{ + ClusterID: clusterID, + NewHostParams: &models.HostCreateParams{ + HostID: host.ID, + }, + }) + Expect(err).ShouldNot(HaveOccurred()) + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusDiscovering, + defaultWaitForHostStateTimeout) + } + }) + It("[only_k8s]reset cluster with a disabled host", func() { + By("register a new worker") + disabledHost := registerHost(clusterID) + generateHWPostStepReply(disabledHost, validHwInfo, "hostname") + generateFAPostStepReply(disabledHost, validFreeAddresses) + _, err := bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ + {ID: *disabledHost.ID, Role: models.HostRoleUpdateParamsWorker}, + }, + }, + ClusterID: clusterID, + }) + Expect(err).ShouldNot(HaveOccurred()) + + By("disable worker") + _, err = bmclient.Installer.DisableHost(ctx, &installer.DisableHostParams{ + ClusterID: clusterID, + HostID: *disabledHost.ID, + }) + Expect(err).ShouldNot(HaveOccurred()) + waitForHostState(ctx, clusterID, *disabledHost.ID, models.HostStatusDisabled, + defaultWaitForHostStateTimeout) + + By("install cluster") + _, err = bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + Expect(len(c.Hosts)).Should(Equal(5)) + for _, host := range c.Hosts { + if host.ID.String() == disabledHost.ID.String() { + Expect(*host.Status).Should(Equal(models.HostStatusDisabled)) + continue + } + waitForHostState(ctx, clusterID, *host.ID, models.HostStatusInstalling, + defaultWaitForHostStateTimeout) + } + + By("reset installation") + _, err = bmclient.Installer.CancelInstallation(ctx, &installer.CancelInstallationParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + _, err = bmclient.Installer.ResetCluster(ctx, &installer.ResetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + rep, err = bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c = rep.GetPayload() + Expect(len(c.Hosts)).Should(Equal(5)) + Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusInsufficient)) + for _, host := range c.Hosts { + if host.ID.String() == disabledHost.ID.String() { + Expect(*host.Status).Should(Equal(models.HostStatusDisabled)) + continue + } + Expect(swag.StringValue(host.Status)).Should(Equal(models.HostStatusResetting)) + } + }) + It("[only_k8s]reset preparing installation", func() { + _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterState(context.Background(), clusterID, models.ClusterStatusPreparingForInstallation, + 10*time.Second, IgnoreStateInfo) + _, err = bmclient.Installer.ResetCluster(ctx, &installer.ResetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + Expect(swag.StringValue(c.Status)).Should(Equal(models.ClusterStatusInsufficient)) + Expect(len(c.Hosts)).Should(Equal(4)) + for _, h := range c.Hosts { + Expect(swag.StringValue(h.Status)).Should(Equal(models.HostStatusResetting)) + } + }) + }) + }) + + It("install cluster requirement", func() { + clusterID := *cluster.ID + + Expect(swag.StringValue(cluster.Status)).Should(Equal("insufficient")) + Expect(swag.StringValue(cluster.StatusInfo)).Should(Equal(clusterInsufficientStateInfo)) + + hosts := register3nodes(clusterID) + + h4 := registerHost(clusterID) + + // All hosts are masters, one in discovering state -> state must be insufficient + cluster, err := bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ + {ID: *hosts[0].ID, Role: models.HostRoleUpdateParamsMaster}, + {ID: *hosts[1].ID, Role: models.HostRoleUpdateParamsMaster}, + {ID: *h4.ID, Role: models.HostRoleUpdateParamsMaster}, + }, + }, + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(swag.StringValue(cluster.GetPayload().Status)).Should(Equal("insufficient")) + Expect(swag.StringValue(cluster.GetPayload().StatusInfo)).Should(Equal(clusterInsufficientStateInfo)) + + // Adding one known host and setting as master -> state must be ready + _, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ + {ID: *hosts[2].ID, Role: models.HostRoleUpdateParamsMaster}, + }}, + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, 60*time.Second, clusterReadyStateInfo) + + }) + + It("install_cluster_states", func() { clusterID := *cluster.ID - hwInfo := &models.Inventory{ - CPU: &models.CPU{Count: 16}, - Memory: &models.Memory{PhysicalBytes: int64(32 * units.GiB)}, - Disks: []*models.Disk{ - {DriveType: "SSD", Name: "loop0", SizeBytes: validDiskSize}, - {DriveType: "HDD", Name: "sdb", SizeBytes: validDiskSize}}, - } Expect(swag.StringValue(cluster.Status)).Should(Equal("insufficient")) Expect(swag.StringValue(cluster.StatusInfo)).Should(Equal(clusterInsufficientStateInfo)) wh1 := registerHost(clusterID) - generateHWPostStepReply(wh1, hwInfo) + generateHWPostStepReply(wh1, validHwInfo, "wh1") wh2 := registerHost(clusterID) - generateHWPostStepReply(wh2, hwInfo) + generateHWPostStepReply(wh2, validHwInfo, "wh2") wh3 := registerHost(clusterID) - generateHWPostStepReply(wh3, hwInfo) + generateHWPostStepReply(wh3, validHwInfo, "wh3") mh1 := registerHost(clusterID) - generateHWPostStepReply(mh1, hwInfo) + generateHWPostStepReply(mh1, validHwInfo, "mh1") + generateFAPostStepReply(mh1, validFreeAddresses) mh2 := registerHost(clusterID) - generateHWPostStepReply(mh2, hwInfo) + generateHWPostStepReply(mh2, validHwInfo, "mh2") mh3 := registerHost(clusterID) - generateHWPostStepReply(mh3, hwInfo) + generateHWPostStepReply(mh3, validHwInfo, "mh3") + + apiVip := "1.2.3.5" + ingressVip := "1.2.3.6" - // All hosts are workers -> state must be insufficient + By("All hosts are workers -> state must be insufficient") cluster, err := bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ - {ID: *wh1.ID, Role: "worker"}, - {ID: *wh2.ID, Role: "worker"}, - {ID: *wh3.ID, Role: "worker"}, - }}, + {ID: *wh1.ID, Role: models.HostRoleUpdateParamsWorker}, + {ID: *wh2.ID, Role: models.HostRoleUpdateParamsWorker}, + {ID: *wh3.ID, Role: models.HostRoleUpdateParamsWorker}, + }, + APIVip: &apiVip, + IngressVip: &ingressVip, + }, ClusterID: clusterID, }) Expect(err).NotTo(HaveOccurred()) Expect(swag.StringValue(cluster.GetPayload().Status)).Should(Equal("insufficient")) Expect(swag.StringValue(cluster.GetPayload().StatusInfo)).Should(Equal(clusterInsufficientStateInfo)) - - // Only two masters -> state must be insufficient + clusterReply, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ + ClusterID: clusterID, + }) + Expect(err).ToNot(HaveOccurred()) + Expect(clusterReply.Payload.APIVip).To(Equal(apiVip)) + Expect(clusterReply.Payload.MachineNetworkCidr).To(Equal("1.2.3.0/24")) + Expect(len(clusterReply.Payload.HostNetworks)).To(Equal(1)) + Expect(clusterReply.Payload.HostNetworks[0].Cidr).To(Equal("1.2.3.0/24")) + hids := make([]interface{}, 0) + for _, h := range clusterReply.Payload.HostNetworks[0].HostIds { + hids = append(hids, h) + } + Expect(len(hids)).To(Equal(6)) + Expect(*wh1.ID).To(BeElementOf(hids...)) + Expect(*wh2.ID).To(BeElementOf(hids...)) + Expect(*wh3.ID).To(BeElementOf(hids...)) + Expect(*mh1.ID).To(BeElementOf(hids...)) + Expect(*mh2.ID).To(BeElementOf(hids...)) + Expect(*mh3.ID).To(BeElementOf(hids...)) + + By("Only two masters -> state must be insufficient") _, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ - {ID: *mh1.ID, Role: "master"}, - {ID: *mh2.ID, Role: "master"}, + {ID: *mh1.ID, Role: models.HostRoleUpdateParamsMaster}, + {ID: *mh2.ID, Role: models.HostRoleUpdateParamsMaster}, }}, ClusterID: clusterID, }) @@ -417,21 +1296,22 @@ var _ = Describe("system-test cluster install", func() { Expect(swag.StringValue(cluster.GetPayload().Status)).Should(Equal("insufficient")) Expect(swag.StringValue(cluster.GetPayload().StatusInfo)).Should(Equal(clusterInsufficientStateInfo)) - // Three master hosts -> state must be ready - cluster, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + By("Three master hosts -> state must be ready") + _, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ - {ID: *mh3.ID, Role: "master"}, + {ID: *mh3.ID, Role: models.HostRoleUpdateParamsMaster}, }}, ClusterID: clusterID, }) + waitForHostState(ctx, clusterID, *mh3.ID, "known", 60*time.Second) + Expect(err).NotTo(HaveOccurred()) - Expect(swag.StringValue(cluster.GetPayload().Status)).Should(Equal("ready")) - Expect(swag.StringValue(cluster.GetPayload().StatusInfo)).Should(Equal(clusterReadyStateInfo)) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, 60*time.Second, clusterReadyStateInfo) - // Back to two master hosts -> state must be insufficient + By("Back to two master hosts -> state must be insufficient") cluster, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ - {ID: *mh3.ID, Role: "worker"}, + {ID: *mh3.ID, Role: models.HostRoleUpdateParamsWorker}, }}, ClusterID: clusterID, }) @@ -439,21 +1319,22 @@ var _ = Describe("system-test cluster install", func() { Expect(swag.StringValue(cluster.GetPayload().Status)).Should(Equal("insufficient")) Expect(swag.StringValue(cluster.GetPayload().StatusInfo)).Should(Equal(clusterInsufficientStateInfo)) - // Three master hosts -> state must be ready - cluster, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + By("Three master hosts -> state must be ready") + _, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ - {ID: *mh3.ID, Role: "master"}, + {ID: *mh3.ID, Role: models.HostRoleUpdateParamsMaster}, }}, ClusterID: clusterID, }) + waitForHostState(ctx, clusterID, *mh3.ID, "known", 60*time.Second) + Expect(err).NotTo(HaveOccurred()) - Expect(swag.StringValue(cluster.GetPayload().Status)).Should(Equal("ready")) - Expect(swag.StringValue(cluster.GetPayload().StatusInfo)).Should(Equal(clusterReadyStateInfo)) + waitForClusterState(ctx, clusterID, "ready", 60*time.Second, clusterReadyStateInfo) - // Back to two master hosts -> state must be insufficient + By("Back to two master hosts -> state must be insufficient") cluster, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ - {ID: *mh3.ID, Role: "worker"}, + {ID: *mh3.ID, Role: models.HostRoleUpdateParamsWorker}, }}, ClusterID: clusterID, }) @@ -475,50 +1356,475 @@ var _ = Describe("system-test cluster install", func() { CPU: &models.CPU{Count: 2}, Memory: &models.Memory{PhysicalBytes: int64(8 * units.GiB)}, Disks: []*models.Disk{ - {DriveType: "HDD", Name: "sdb", SizeBytes: validDiskSize}}, + {DriveType: "HDD", Name: "sdb", SizeBytes: validDiskSize}, + }, + Interfaces: []*models.Interface{ + { + IPV4Addresses: []string{ + "1.2.3.4/24", + }, + }, + }, } h1 := registerHost(clusterID) - generateHWPostStepReply(h1, hwInfo) - Expect(*getHost(clusterID, *h1.ID).Status).Should(Equal("known")) + generateHWPostStepReply(h1, hwInfo, "h1") + generateFAPostStepReply(h1, validFreeAddresses) + apiVip := "1.2.3.8" + ingressVip := "1.2.3.9" + _, err := bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{ + APIVip: &apiVip, + IngressVip: &ingressVip, + }, + ClusterID: clusterID, + }) + Expect(err).To(Not(HaveOccurred())) + waitForHostState(ctx, clusterID, *h1.ID, models.HostStatusPendingForInput, 60*time.Second) hwInfo = &models.Inventory{ CPU: &models.CPU{Count: 16}, Memory: &models.Memory{PhysicalBytes: int64(32 * units.GiB)}, + Disks: []*models.Disk{ + {DriveType: "HDD", Name: "sdb", SizeBytes: validDiskSize}, + }, } h2 := registerHost(clusterID) - generateHWPostStepReply(h2, hwInfo) + generateHWPostStepReply(h2, hwInfo, "h2") h3 := registerHost(clusterID) - generateHWPostStepReply(h3, hwInfo) + generateHWPostStepReply(h3, hwInfo, "h3") h4 := registerHost(clusterID) - generateHWPostStepReply(h4, hwInfo) + generateHWPostStepReply(h4, hwInfo, "h4") + _, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ + {ID: *h1.ID, Role: models.HostRoleUpdateParamsMaster}, + {ID: *h2.ID, Role: models.HostRoleUpdateParamsMaster}, + {ID: *h3.ID, Role: models.HostRoleUpdateParamsMaster}, + {ID: *h4.ID, Role: models.HostRoleUpdateParamsWorker}, + }}, + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + h1 = getHost(clusterID, *h1.ID) + waitForHostState(ctx, clusterID, *h1.ID, "insufficient", 60*time.Second) + }) + + It("[only_k8s]unique_hostname_validation", func() { + clusterID := *cluster.ID + hosts := register3nodes(clusterID) _, err := bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ - {ID: *h1.ID, Role: "master"}, - {ID: *h2.ID, Role: "master"}, - {ID: *h3.ID, Role: "master"}, - {ID: *h4.ID, Role: "worker"}, + {ID: *hosts[0].ID, Role: models.HostRoleUpdateParamsMaster}, }}, ClusterID: clusterID, }) Expect(err).NotTo(HaveOccurred()) + + h1 := getHost(clusterID, *hosts[0].ID) + waitForHostState(ctx, clusterID, *h1.ID, "known", 60*time.Second) + Expect(h1.RequestedHostname).Should(Equal("h1")) + + By("Registering host with same hostname") + h4 := registerHost(clusterID) + generateHWPostStepReply(h4, validHwInfo, "h1") + h4 = getHost(clusterID, *h4.ID) + waitForHostState(ctx, clusterID, *h1.ID, "insufficient", 60*time.Second) + Expect(h4.RequestedHostname).Should(Equal("h1")) h1 = getHost(clusterID, *h1.ID) Expect(*h1.Status).Should(Equal("insufficient")) + + By("Verifying install command") + _, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ + {ID: *h1.ID, Role: models.HostRoleUpdateParamsMaster}, + {ID: *hosts[1].ID, Role: models.HostRoleUpdateParamsMaster}, + {ID: *hosts[2].ID, Role: models.HostRoleUpdateParamsMaster}, + {ID: *h4.ID, Role: models.HostRoleUpdateParamsWorker}, + }}, + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + _, err = bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).Should(HaveOccurred()) + + By("Registering one more host with same hostname") + disabledHost := registerHost(clusterID) + generateHWPostStepReply(disabledHost, validHwInfo, "h1") + disabledHost = getHost(clusterID, *disabledHost.ID) + waitForHostState(ctx, clusterID, *disabledHost.ID, models.HostStatusPendingForInput, 60*time.Second) + _, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ + {ID: *disabledHost.ID, Role: models.HostRoleUpdateParamsWorker}, + }}, + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + + By("Changing hostname, verify host is known now") + generateHWPostStepReply(h4, validHwInfo, "h4") + waitForHostState(ctx, clusterID, *h4.ID, "known", 60*time.Second) + h4 = getHost(clusterID, *h4.ID) + Expect(h4.RequestedHostname).Should(Equal("h4")) + + By("Disable host with the same hostname and verify h1 is known") + _, err = bmclient.Installer.DisableHost(ctx, &installer.DisableHostParams{ + ClusterID: clusterID, + HostID: *disabledHost.ID, + }) + Expect(err).NotTo(HaveOccurred()) + disabledHost = getHost(clusterID, *disabledHost.ID) + Expect(*disabledHost.Status).Should(Equal("disabled")) + waitForHostState(ctx, clusterID, *h1.ID, "known", 60*time.Second) + + By("waiting for cluster to be in ready state") + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, 60*time.Second, clusterReadyStateInfo) + + By("Verify install after disabling the host with same hostname") + _, err = bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + + }) + + It("[only_k8s]localhost is not valid", func() { + localhost := "localhost" + clusterID := *cluster.ID + + hosts := register3nodes(clusterID) + _, err := bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ + {ID: *hosts[0].ID, Role: models.HostRoleUpdateParamsMaster}, + }}, + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + + h1 := getHost(clusterID, *hosts[0].ID) + waitForHostState(ctx, clusterID, *h1.ID, "known", 60*time.Second) + Expect(h1.RequestedHostname).Should(Equal("h1")) + + By("Changing hostname reply to localhost") + generateHWPostStepReply(h1, validHwInfo, localhost) + waitForHostState(ctx, clusterID, *h1.ID, models.HostStatusInsufficient, 60*time.Second) + h1Host := getHost(clusterID, *h1.ID) + Expect(h1Host.RequestedHostname).Should(Equal(localhost)) + + By("Setting hostname to valid name") + _, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{HostsNames: []*models.ClusterUpdateParamsHostsNamesItems0{ + {ID: *h1Host.ID, Hostname: "reqh0"}, + }}, + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + + waitForHostState(ctx, clusterID, *h1.ID, models.HostStatusKnown, 60*time.Second) + + By("Setting hostname to localhost") + _, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{HostsNames: []*models.ClusterUpdateParamsHostsNamesItems0{ + {ID: *h1Host.ID, Hostname: localhost}, + }}, + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + + waitForHostState(ctx, clusterID, *h1.ID, models.HostStatusInsufficient, 60*time.Second) + }) + + It("[only_k8s]different_roles_stages", func() { + clusterID := *cluster.ID + registerHostsAndSetRoles(clusterID, 4) + _, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + + rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c := rep.GetPayload() + Expect(len(c.Hosts)).Should(Equal(4)) + + var atLeastOneBootstrap bool = false + + for _, h := range c.Hosts { + if h.Bootstrap { + Expect(h.ProgressStages).Should(Equal(host.BootstrapStages[:])) + atLeastOneBootstrap = true + } else if h.Role == models.HostRoleMaster { + Expect(h.ProgressStages).Should(Equal(host.MasterStages[:])) + } else { + Expect(h.ProgressStages).Should(Equal(host.WorkerStages[:])) + } + } + + Expect(atLeastOneBootstrap).Should(BeTrue()) + }) + + It("[only_k8s]set_requested_hostnames", func() { + clusterID := *cluster.ID + hosts := register3nodes(clusterID) + _, err := bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ + {ID: *hosts[0].ID, Role: models.HostRoleUpdateParamsMaster}, + {ID: *hosts[1].ID, Role: models.HostRoleUpdateParamsMaster}, + {ID: *hosts[2].ID, Role: models.HostRoleUpdateParamsMaster}, + }}, + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + + h1 := getHost(clusterID, *hosts[0].ID) + h2 := getHost(clusterID, *hosts[1].ID) + h3 := getHost(clusterID, *hosts[2].ID) + waitForHostState(ctx, clusterID, *h1.ID, models.HostStatusKnown, time.Minute) + waitForHostState(ctx, clusterID, *h2.ID, models.HostStatusKnown, time.Minute) + waitForHostState(ctx, clusterID, *h3.ID, models.HostStatusKnown, time.Minute) + // update requested hostnames + _, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{HostsNames: []*models.ClusterUpdateParamsHostsNamesItems0{ + {ID: *hosts[0].ID, Hostname: "reqh0"}, + {ID: *hosts[1].ID, Hostname: "reqh1"}, + }}, + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + + // check hostnames were updated + h1 = getHost(clusterID, *h1.ID) + h2 = getHost(clusterID, *h2.ID) + h3 = getHost(clusterID, *h3.ID) + Expect(h1.RequestedHostname).Should(Equal("reqh0")) + Expect(h2.RequestedHostname).Should(Equal("reqh1")) + Expect(*h1.Status).Should(Equal(models.HostStatusKnown)) + Expect(*h2.Status).Should(Equal(models.HostStatusKnown)) + Expect(*h3.Status).Should(Equal(models.HostStatusKnown)) + + // register new host with the same name in inventory + By("Registering new host with same hostname as in node's inventory") + h4 := registerHost(clusterID) + generateHWPostStepReply(h4, validHwInfo, "h3") + h4 = getHost(clusterID, *h4.ID) + waitForHostState(ctx, clusterID, *h4.ID, host.HostStatusPendingForInput, time.Minute) + waitForHostState(ctx, clusterID, *h3.ID, models.HostStatusInsufficient, time.Minute) + + By("Check cluster install fails on validation") + _, err = bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).Should(HaveOccurred()) + + By("Registering new host with same hostname as in node's requested_hostname") + h5 := registerHost(clusterID) + generateHWPostStepReply(h5, validHwInfo, "reqh0") + h5 = getHost(clusterID, *h5.ID) + waitForHostState(ctx, clusterID, *h5.ID, host.HostStatusPendingForInput, time.Minute) + waitForHostState(ctx, clusterID, *h1.ID, models.HostStatusInsufficient, time.Minute) + + By("Change requested hostname of an insufficient node") + _, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{ + HostsNames: []*models.ClusterUpdateParamsHostsNamesItems0{ + {ID: *hosts[0].ID, Hostname: "reqh0new"}, + }, + HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ + {ID: *h5.ID, Role: models.HostRoleUpdateParamsWorker}, + }, + }, + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + waitForHostState(ctx, clusterID, *h1.ID, models.HostStatusKnown, time.Minute) + waitForHostState(ctx, clusterID, *h5.ID, models.HostStatusKnown, time.Minute) + + By("change the requested hostname of the insufficient node") + _, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{ + HostsNames: []*models.ClusterUpdateParamsHostsNamesItems0{ + {ID: *h3.ID, Hostname: "reqh2"}, + }, + HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ + {ID: *h4.ID, Role: models.HostRoleUpdateParamsWorker}, + }, + }, + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + waitForHostState(ctx, clusterID, *h3.ID, models.HostStatusKnown, time.Minute) + waitForClusterState(ctx, clusterID, models.ClusterStatusReady, time.Minute, clusterReadyStateInfo) + _, err = bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + }) + }) -func waitForClusterState(ctx context.Context, clusterID strfmt.UUID, state string) { - for start := time.Now(); time.Since(start) < 10*time.Second; { +func FailCluster(ctx context.Context, clusterID strfmt.UUID) strfmt.UUID { + c, err := bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + var masterHostID strfmt.UUID = *getClusterMasters(c.GetPayload())[0].ID + + installStep := models.HostStageFailed + installInfo := "because some error" + + updateProgressWithInfo(masterHostID, clusterID, installStep, installInfo) + masterHost := getHost(clusterID, masterHostID) + Expect(*masterHost.Status).Should(Equal("error")) + Expect(*masterHost.StatusInfo).Should(Equal(fmt.Sprintf("%s - %s", installStep, installInfo))) + return masterHostID +} + +var _ = Describe("cluster install, with default network params", func() { + var ( + ctx = context.Background() + cluster *models.Cluster + ) + + AfterEach(func() { + clearDB() + }) + + BeforeEach(func() { + By("Register cluster") + registerClusterReply, err := bmclient.Installer.RegisterCluster(ctx, &installer.RegisterClusterParams{ + NewClusterParams: &models.ClusterCreateParams{ + BaseDNSDomain: "example.com", + Name: swag.String("test-cluster"), + OpenshiftVersion: swag.String("4.5"), + PullSecret: pullSecret, + SSHPublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC50TuHS7aYci+U+5PLe/aW/I6maBi9PBDucLje6C6gtArfjy7udWA1DCSIQd+DkHhi57/s+PmvEjzfAfzqo+L+/8/O2l2seR1pPhHDxMR/rSyo/6rZP6KIL8HwFqXHHpDUM4tLXdgwKAe1LxBevLt/yNl8kOiHJESUSl+2QSf8z4SIbo/frDD8OwOvtfKBEG4WCb8zEsEuIPNF/Vo/UxPtS9pPTecEsWKDHR67yFjjamoyLvAzMAJotYgyMoxm8PTyCgEzHk3s3S4iO956d6KVOEJVXnTVhAxrtLuubjskd7N4hVN7h2s4Z584wYLKYhrIBL0EViihOMzY4mH3YE4KZusfIx6oMcggKX9b3NHm0la7cj2zg0r6zjUn6ZCP4gXM99e5q4auc0OEfoSfQwofGi3WmxkG3tEozCB8Zz0wGbi2CzR8zlcF+BNV5I2LESlLzjPY5B4dvv5zjxsYoz94p3rUhKnnPM2zTx1kkilDK5C5fC1k9l/I/r5Qk4ebLQU= oscohen@localhost.localdomain", + }, + }) + Expect(err).NotTo(HaveOccurred()) + cluster = registerClusterReply.GetPayload() + }) + + It("[only_k8s]install cluster", func() { + clusterID := *cluster.ID + registerHostsAndSetRoles(clusterID, 3) rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) c := rep.GetPayload() - if swag.StringValue(c.Status) == state { - break + startTimeInstalling := c.InstallStartedAt + startTimeInstalled := c.InstallCompletedAt + + _, err = bmclient.Installer.InstallCluster(ctx, &installer.InstallClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + waitForClusterInstallationToStart(clusterID) + + rep, err = bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + Expect(err).NotTo(HaveOccurred()) + c = rep.GetPayload() + Expect(len(c.Hosts)).Should(Equal(3)) + Expect(c.InstallStartedAt).ShouldNot(Equal(startTimeInstalling)) + for _, host := range c.Hosts { + waitForHostState(ctx, clusterID, *host.ID, "installing", 10*time.Second) } - time.Sleep(time.Second) + // fake installation completed + for _, host := range c.Hosts { + updateProgress(*host.ID, clusterID, models.HostStageDone) + } + + waitForClusterState(ctx, clusterID, "finalizing", defaultWaitForClusterStateTimeout, "Finalizing cluster installation") + success := true + _, err = bmclient.Installer.CompleteInstallation(ctx, + &installer.CompleteInstallationParams{ClusterID: clusterID, CompletionParams: &models.CompletionParams{IsSuccess: &success, ErrorInfo: ""}}) + Expect(err).NotTo(HaveOccurred()) + + waitForClusterState(ctx, clusterID, "installed", defaultWaitForClusterStateTimeout, "installed") + + rep, err = bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + + Expect(err).NotTo(HaveOccurred()) + c = rep.GetPayload() + Expect(swag.StringValue(c.Status)).Should(Equal("installed")) + Expect(c.InstallCompletedAt).ShouldNot(Equal(startTimeInstalled)) + }) +}) + +func registerHostsAndSetRoles(clusterID strfmt.UUID, numHosts int) []*models.Host { + ctx := context.Background() + hosts := make([]*models.Host, 0) + + generateHWPostStepReply := func(h *models.Host, hwInfo *models.Inventory, hostname string) { + hwInfo.Hostname = hostname + hw, err := json.Marshal(&hwInfo) + Expect(err).NotTo(HaveOccurred()) + _, err = bmclient.Installer.PostStepReply(ctx, &installer.PostStepReplyParams{ + ClusterID: h.ClusterID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + Output: string(hw), + StepID: string(models.StepTypeInventory), + StepType: models.StepTypeInventory, + }, + }) + Expect(err).ShouldNot(HaveOccurred()) } - rep, err := bmclient.Installer.GetCluster(ctx, &installer.GetClusterParams{ClusterID: clusterID}) + generateFAPostStepReply := func(h *models.Host, freeAddresses models.FreeNetworksAddresses) { + fa, err := json.Marshal(&freeAddresses) + Expect(err).NotTo(HaveOccurred()) + _, err = bmclient.Installer.PostStepReply(ctx, &installer.PostStepReplyParams{ + ClusterID: h.ClusterID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + Output: string(fa), + StepID: string(models.StepTypeFreeNetworkAddresses), + StepType: models.StepTypeFreeNetworkAddresses, + }, + }) + Expect(err).ShouldNot(HaveOccurred()) + } + for i := 0; i < numHosts; i++ { + hostname := fmt.Sprintf("h%d", i) + host := registerHost(clusterID) + generateHWPostStepReply(host, validHwInfo, hostname) + generateFAPostStepReply(host, validFreeAddresses) + var role models.HostRoleUpdateParams + if i < 3 { + role = models.HostRoleUpdateParamsMaster + } else { + role = models.HostRoleUpdateParamsWorker + } + _, err := bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{HostsRoles: []*models.ClusterUpdateParamsHostsRolesItems0{ + {ID: *host.ID, Role: role}, + }}, + ClusterID: clusterID, + }) + Expect(err).NotTo(HaveOccurred()) + } + apiVip := "" + ingressVip := "" + _, err := bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{ + APIVip: &apiVip, + IngressVip: &ingressVip, + }, + ClusterID: clusterID, + }) Expect(err).NotTo(HaveOccurred()) - c := rep.GetPayload() - Expect(swag.StringValue(c.Status)).Should(Equal(state)) + apiVip = "1.2.3.8" + ingressVip = "1.2.3.9" + _, err = bmclient.Installer.UpdateCluster(ctx, &installer.UpdateClusterParams{ + ClusterUpdateParams: &models.ClusterUpdateParams{ + APIVip: &apiVip, + IngressVip: &ingressVip, + }, + ClusterID: clusterID, + }) + + Expect(err).NotTo(HaveOccurred()) + waitForClusterState(ctx, clusterID, "ready", 60*time.Second, clusterReadyStateInfo) + + return hosts +} + +func getClusterMasters(c *models.Cluster) (masters []*models.Host) { + for _, host := range c.Hosts { + if host.Role == models.HostRoleMaster { + masters = append(masters, host) + } + } + + return } diff --git a/subsystem/host_test.go b/subsystem/host_test.go index ed8eaa3a2..8e5f44b09 100644 --- a/subsystem/host_test.go +++ b/subsystem/host_test.go @@ -2,6 +2,10 @@ package subsystem import ( "context" + "encoding/json" + "time" + + "github.com/filanov/stateswitch/examples/host/host" "github.com/go-openapi/strfmt" "github.com/google/uuid" @@ -26,8 +30,8 @@ var _ = Describe("Host tests", func() { var err error cluster, err = bmclient.Installer.RegisterCluster(ctx, &installer.RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ - Name: swag.String("test cluster"), - OpenshiftVersion: swag.String("4.4"), + Name: swag.String("test-cluster"), + OpenshiftVersion: swag.String("4.5"), }, }) Expect(err).NotTo(HaveOccurred()) @@ -41,6 +45,7 @@ var _ = Describe("Host tests", func() { host := registerHost(clusterID) host = getHost(clusterID, *host.ID) Expect(*host.Status).Should(Equal("discovering")) + Expect(host.StatusUpdatedAt).ShouldNot(Equal(strfmt.DateTime(time.Time{}))) list, err := bmclient.Installer.ListHosts(ctx, &installer.ListHostsParams{ClusterID: clusterID}) Expect(err).NotTo(HaveOccurred()) @@ -62,21 +67,71 @@ var _ = Describe("Host tests", func() { Expect(err).Should(HaveOccurred()) }) + var defaultInventory = func() string { + inventory := models.Inventory{ + Interfaces: []*models.Interface{ + { + Name: "eth0", + IPV4Addresses: []string{ + "1.2.3.4/24", + }, + SpeedMbps: 20, + }, + { + Name: "eth1", + IPV4Addresses: []string{ + "1.2.5.4/24", + }, + SpeedMbps: 40, + }, + }, + + // CPU, Disks, and Memory were added here to prevent the case that bm-inventory crashes in case the monitor starts + // working in the middle of the test and this inventory is in the database. + CPU: &models.CPU{ + Count: 4, + }, + Disks: []*models.Disk{ + { + Name: "sda1", + DriveType: "HDD", + SizeBytes: int64(120) * (int64(1) << 30), + }, + }, + Memory: &models.Memory{ + PhysicalBytes: int64(16) * (int64(1) << 30), + }, + } + b, err := json.Marshal(&inventory) + Expect(err).To(Not(HaveOccurred())) + return string(b) + } + It("next step", func() { host := registerHost(clusterID) steps := getNextSteps(clusterID, *host.ID) - _, ok := getStepInList(steps, models.StepTypeHardwareInfo) + _, ok := getStepInList(steps, models.StepTypeInventory) Expect(ok).Should(Equal(true)) _, ok = getStepInList(steps, models.StepTypeConnectivityCheck) Expect(ok).Should(Equal(true)) host = getHost(clusterID, *host.ID) + Expect(db.Model(host).Update("status", "insufficient").Error).NotTo(HaveOccurred()) + Expect(db.Model(host).UpdateColumn("inventory", defaultInventory()).Error).NotTo(HaveOccurred()) + steps = getNextSteps(clusterID, *host.ID) + _, ok = getStepInList(steps, models.StepTypeInventory) + Expect(ok).Should(Equal(true)) + _, ok = getStepInList(steps, models.StepTypeFreeNetworkAddresses) + Expect(ok).Should(Equal(true)) Expect(db.Model(host).Update("status", "known").Error).NotTo(HaveOccurred()) steps = getNextSteps(clusterID, *host.ID) _, ok = getStepInList(steps, models.StepTypeConnectivityCheck) Expect(ok).Should(Equal(true)) + _, ok = getStepInList(steps, models.StepTypeFreeNetworkAddresses) + Expect(ok).Should(Equal(true)) Expect(db.Model(host).Update("status", "disabled").Error).NotTo(HaveOccurred()) steps = getNextSteps(clusterID, *host.ID) - Expect(len(steps)).Should(Equal(0)) + Expect(steps.NextInstructionSeconds).Should(Equal(int64(120))) + Expect(len(steps.Instructions)).Should(Equal(0)) Expect(db.Model(host).Update("status", "insufficient").Error).NotTo(HaveOccurred()) steps = getNextSteps(clusterID, *host.ID) _, ok = getStepInList(steps, models.StepTypeConnectivityCheck) @@ -85,39 +140,70 @@ var _ = Describe("Host tests", func() { steps = getNextSteps(clusterID, *host.ID) _, ok = getStepInList(steps, models.StepTypeConnectivityCheck) Expect(ok).Should(Equal(true)) + Expect(db.Model(host).Update("status", "error").Error).NotTo(HaveOccurred()) + steps = getNextSteps(clusterID, *host.ID) + _, ok = getStepInList(steps, models.StepTypeExecute) + Expect(ok).Should(Equal(true)) + Expect(db.Model(host).Update("status", models.HostStatusResetting).Error).NotTo(HaveOccurred()) + steps = getNextSteps(clusterID, *host.ID) + _, ok = getStepInList(steps, models.StepTypeResetInstallation) + Expect(ok).Should(Equal(true)) }) - It("hardware_info_store_only_relevant_hw_reply", func() { + It("host installation progress", func() { host := registerHost(clusterID) + Expect(db.Model(host).Update("status", "installing").Error).NotTo(HaveOccurred()) + Expect(db.Model(host).Update("role", "master").Error).NotTo(HaveOccurred()) + Expect(db.Model(host).Update("bootstrap", "true").Error).NotTo(HaveOccurred()) + Expect(db.Model(host).UpdateColumn("inventory", defaultInventory()).Error).NotTo(HaveOccurred()) - extraHwInfo := "{\"extra\":\"data\",\"block_devices\":null,\"cpu\":{\"architecture\":\"x86_64\",\"cpus\":8,\"sockets\":1},\"memory\":[{\"available\":19743372,\"free\":8357316,\"name\":\"Mem\",\"shared\":1369116,\"total\":32657728,\"used\":11105024},{\"free\":16400380,\"name\":\"Swap\",\"total\":16400380}],\"nics\":[{\"cidrs\":[],\"mac\":\"f8:75:a4:a4:01:6e\",\"mtu\":1500,\"name\":\"enp0s31f6\",\"state\":\"NO-CARRIER,BROADCAST,MULTICAST,UP\"},{\"cidrs\":[{\"mask\":24}],\"mac\":\"80:32:53:4f:16:4f\",\"mtu\":1500,\"name\":\"wlp0s20f3\",\"state\":\"BROADCAST,MULTICAST,UP,LOWER_UP\"},{\"cidrs\":[{\"mask\":24}],\"mac\":\"52:54:00:71:50:da\",\"mtu\":1500,\"name\":\"virbr1\",\"state\":\"BROADCAST,MULTICAST,UP,LOWER_UP\"},{\"cidrs\":[],\"mac\":\"8e:59:a1:a9:14:23\",\"mtu\":1500,\"name\":\"virbr1-nic\",\"state\":\"BROADCAST,MULTICAST\"},{\"cidrs\":[{\"mask\":24}],\"mac\":\"52:54:00:bc:9b:3f\",\"mtu\":1500,\"name\":\"virbr0\",\"state\":\"BROADCAST,MULTICAST,UP,LOWER_UP\"},{\"cidrs\":[],\"mac\":\"52:54:00:bc:9b:3f\",\"mtu\":1500,\"name\":\"virbr0-nic\",\"state\":\"BROADCAST,MULTICAST\"},{\"cidrs\":[{\"mask\":16}],\"mac\":\"02:42:aa:59:3a:d3\",\"mtu\":1500,\"name\":\"docker0\",\"state\":\"NO-CARRIER,BROADCAST,MULTICAST,UP\"},{\"cidrs\":[],\"mac\":\"fe:9b:ea:d0:f5:70\",\"mtu\":1500,\"name\":\"vnet0\",\"state\":\"BROADCAST,MULTICAST,UP,LOWER_UP\"},{\"cidrs\":[],\"mac\":\"fe:16:a0:ea:b3:0b\",\"mtu\":1500,\"name\":\"vnet1\",\"state\":\"BROADCAST,MULTICAST,UP,LOWER_UP\"}]}" - hwInfo := "{\"block_devices\":null,\"cpu\":{\"architecture\":\"x86_64\",\"cpus\":8,\"sockets\":1},\"memory\":[{\"available\":19743372,\"free\":8357316,\"name\":\"Mem\",\"shared\":1369116,\"total\":32657728,\"used\":11105024},{\"free\":16400380,\"name\":\"Swap\",\"total\":16400380}],\"nics\":[{\"cidrs\":[],\"mac\":\"f8:75:a4:a4:01:6e\",\"mtu\":1500,\"name\":\"enp0s31f6\",\"state\":\"NO-CARRIER,BROADCAST,MULTICAST,UP\"},{\"cidrs\":[{\"mask\":24}],\"mac\":\"80:32:53:4f:16:4f\",\"mtu\":1500,\"name\":\"wlp0s20f3\",\"state\":\"BROADCAST,MULTICAST,UP,LOWER_UP\"},{\"cidrs\":[{\"mask\":24}],\"mac\":\"52:54:00:71:50:da\",\"mtu\":1500,\"name\":\"virbr1\",\"state\":\"BROADCAST,MULTICAST,UP,LOWER_UP\"},{\"cidrs\":[],\"mac\":\"8e:59:a1:a9:14:23\",\"mtu\":1500,\"name\":\"virbr1-nic\",\"state\":\"BROADCAST,MULTICAST\"},{\"cidrs\":[{\"mask\":24}],\"mac\":\"52:54:00:bc:9b:3f\",\"mtu\":1500,\"name\":\"virbr0\",\"state\":\"BROADCAST,MULTICAST,UP,LOWER_UP\"},{\"cidrs\":[],\"mac\":\"52:54:00:bc:9b:3f\",\"mtu\":1500,\"name\":\"virbr0-nic\",\"state\":\"BROADCAST,MULTICAST\"},{\"cidrs\":[{\"mask\":16}],\"mac\":\"02:42:aa:59:3a:d3\",\"mtu\":1500,\"name\":\"docker0\",\"state\":\"NO-CARRIER,BROADCAST,MULTICAST,UP\"},{\"cidrs\":[],\"mac\":\"fe:9b:ea:d0:f5:70\",\"mtu\":1500,\"name\":\"vnet0\",\"state\":\"BROADCAST,MULTICAST,UP,LOWER_UP\"},{\"cidrs\":[],\"mac\":\"fe:16:a0:ea:b3:0b\",\"mtu\":1500,\"name\":\"vnet1\",\"state\":\"BROADCAST,MULTICAST,UP,LOWER_UP\"}]}" + updateProgress(*host.ID, clusterID, models.HostStageStartingInstallation) + host = getHost(clusterID, *host.ID) + Expect(host.Progress.CurrentStage).Should(Equal(models.HostStageStartingInstallation)) + time.Sleep(time.Second * 3) + updateProgress(*host.ID, clusterID, models.HostStageInstalling) + host = getHost(clusterID, *host.ID) + Expect(host.Progress.CurrentStage).Should(Equal(models.HostStageInstalling)) + time.Sleep(time.Second * 3) + updateProgress(*host.ID, clusterID, models.HostStageWritingImageToDisk) + host = getHost(clusterID, *host.ID) + Expect(host.Progress.CurrentStage).Should(Equal(models.HostStageWritingImageToDisk)) + time.Sleep(time.Second * 3) + updateProgress(*host.ID, clusterID, models.HostStageRebooting) + host = getHost(clusterID, *host.ID) + Expect(host.Progress.CurrentStage).Should(Equal(models.HostStageRebooting)) + time.Sleep(time.Second * 3) + updateProgress(*host.ID, clusterID, models.HostStageConfiguring) + host = getHost(clusterID, *host.ID) + Expect(host.Progress.CurrentStage).Should(Equal(models.HostStageConfiguring)) + time.Sleep(time.Second * 3) + updateProgress(*host.ID, clusterID, models.HostStageDone) + host = getHost(clusterID, *host.ID) + Expect(host.Progress.CurrentStage).Should(Equal(models.HostStageDone)) + time.Sleep(time.Second * 3) + }) + + It("installation_error_reply", func() { + host := registerHost(clusterID) + Expect(db.Model(host).Update("status", "installing").Error).NotTo(HaveOccurred()) + Expect(db.Model(host).UpdateColumn("inventory", defaultInventory()).Error).NotTo(HaveOccurred()) + Expect(db.Model(host).Update("role", "worker").Error).NotTo(HaveOccurred()) _, err := bmclient.Installer.PostStepReply(ctx, &installer.PostStepReplyParams{ ClusterID: clusterID, HostID: *host.ID, Reply: &models.StepReply{ - ExitCode: 0, - Output: extraHwInfo, - StepID: string(models.StepTypeHardwareInfo), + ExitCode: 137, + Output: "Failed to install", + StepType: models.StepTypeInstall, + StepID: "installCmd-" + string(models.StepTypeExecute), }, }) - Expect(err).NotTo(HaveOccurred()) + Expect(err).Should(HaveOccurred()) host = getHost(clusterID, *host.ID) - Expect(host.HardwareInfo).Should(Equal(hwInfo)) + Expect(swag.StringValue(host.Status)).Should(Equal("error")) + Expect(swag.StringValue(host.StatusInfo)).Should(Equal("installation command failed")) - _, err = bmclient.Installer.PostStepReply(ctx, &installer.PostStepReplyParams{ - ClusterID: clusterID, - HostID: *host.ID, - Reply: &models.StepReply{ - ExitCode: 0, - Output: "not a json", - StepID: string(models.StepTypeHardwareInfo), - }, - }) - Expect(err).To(HaveOccurred()) - host = getHost(clusterID, *host.ID) - Expect(host.HardwareInfo).Should(Equal(hwInfo)) }) It("connectivity_report_store_only_relevant_reply", func() { @@ -133,6 +219,7 @@ var _ = Describe("Host tests", func() { ExitCode: 0, Output: extraConnectivity, StepID: string(models.StepTypeConnectivityCheck), + StepType: models.StepTypeConnectivityCheck, }, }) Expect(err).NotTo(HaveOccurred()) @@ -146,6 +233,7 @@ var _ = Describe("Host tests", func() { ExitCode: 0, Output: "not a json", StepID: string(models.StepTypeConnectivityCheck), + StepType: models.StepTypeConnectivityCheck, }, }) Expect(err).To(HaveOccurred()) @@ -169,6 +257,80 @@ var _ = Describe("Host tests", func() { }) + It("free addresses report", func() { + h := registerHost(clusterID) + + free_addresses_report := "[{\"free_addresses\":[\"10.0.0.0\",\"10.0.0.1\"],\"network\":\"10.0.0.0/24\"},{\"free_addresses\":[\"10.0.1.0\"],\"network\":\"10.0.1.0/24\"}]" + + _, err := bmclient.Installer.PostStepReply(ctx, &installer.PostStepReplyParams{ + ClusterID: clusterID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + Output: free_addresses_report, + StepID: string(models.StepTypeFreeNetworkAddresses), + StepType: models.StepTypeFreeNetworkAddresses, + }, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(db.Model(h).UpdateColumn("status", host.StateInsufficient).Error).NotTo(HaveOccurred()) + h = getHost(clusterID, *h.ID) + Expect(h.FreeAddresses).Should(Equal(free_addresses_report)) + + freeAddressesReply, err := bmclient.Installer.GetFreeAddresses(ctx, &installer.GetFreeAddressesParams{ + ClusterID: clusterID, + Network: "10.0.0.0/24", + }) + Expect(err).ToNot(HaveOccurred()) + Expect(freeAddressesReply.Payload).To(HaveLen(2)) + Expect(freeAddressesReply.Payload[0]).To(Equal(strfmt.IPv4("10.0.0.0"))) + Expect(freeAddressesReply.Payload[1]).To(Equal(strfmt.IPv4("10.0.0.1"))) + + freeAddressesReply, err = bmclient.Installer.GetFreeAddresses(ctx, &installer.GetFreeAddressesParams{ + ClusterID: clusterID, + Network: "10.0.1.0/24", + }) + Expect(err).ToNot(HaveOccurred()) + Expect(freeAddressesReply.Payload).To(HaveLen(1)) + Expect(freeAddressesReply.Payload[0]).To(Equal(strfmt.IPv4("10.0.1.0"))) + + freeAddressesReply, err = bmclient.Installer.GetFreeAddresses(ctx, &installer.GetFreeAddressesParams{ + ClusterID: clusterID, + Network: "10.0.2.0/24", + }) + Expect(err).NotTo(HaveOccurred()) + Expect(freeAddressesReply.Payload).To(BeEmpty()) + + _, err = bmclient.Installer.PostStepReply(ctx, &installer.PostStepReplyParams{ + ClusterID: clusterID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: 0, + Output: "not a json", + StepID: string(models.StepTypeFreeNetworkAddresses), + StepType: models.StepTypeFreeNetworkAddresses, + }, + }) + Expect(err).To(HaveOccurred()) + h = getHost(clusterID, *h.ID) + Expect(h.FreeAddresses).Should(Equal(free_addresses_report)) + + //exit code is not 0 + _, err = bmclient.Installer.PostStepReply(ctx, &installer.PostStepReplyParams{ + ClusterID: clusterID, + HostID: *h.ID, + Reply: &models.StepReply{ + ExitCode: -1, + Error: "some error", + Output: "not a json", + StepID: string(models.StepTypeFreeNetworkAddresses), + }, + }) + Expect(err).To(HaveOccurred()) + h = getHost(clusterID, *h.ID) + Expect(h.FreeAddresses).Should(Equal(free_addresses_report)) + }) + It("disable enable", func() { host := registerHost(clusterID) _, err := bmclient.Installer.DisableHost(ctx, &installer.DisableHostParams{ @@ -178,7 +340,7 @@ var _ = Describe("Host tests", func() { Expect(err).NotTo(HaveOccurred()) host = getHost(clusterID, *host.ID) Expect(*host.Status).Should(Equal("disabled")) - Expect(len(getNextSteps(clusterID, *host.ID))).Should(Equal(0)) + Expect(len(getNextSteps(clusterID, *host.ID).Instructions)).Should(Equal(0)) _, err = bmclient.Installer.EnableHost(ctx, &installer.EnableHostParams{ ClusterID: clusterID, @@ -187,7 +349,7 @@ var _ = Describe("Host tests", func() { Expect(err).NotTo(HaveOccurred()) host = getHost(clusterID, *host.ID) Expect(*host.Status).Should(Equal("discovering")) - Expect(len(getNextSteps(clusterID, *host.ID))).ShouldNot(Equal(0)) + Expect(len(getNextSteps(clusterID, *host.ID).Instructions)).ShouldNot(Equal(0)) }) It("debug", func() { @@ -241,8 +403,8 @@ var _ = Describe("Host tests", func() { cluster2, err := bmclient.Installer.RegisterCluster(ctx, &installer.RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ - Name: swag.String("another cluster"), - OpenshiftVersion: swag.String("4.4"), + Name: swag.String("another-cluster"), + OpenshiftVersion: swag.String("4.5"), }, }) Expect(err).NotTo(HaveOccurred()) @@ -282,21 +444,3 @@ var _ = Describe("Host tests", func() { Expect(swag.StringValue(h.Status)).Should(Equal("discovering")) }) }) - -func getStepInList(steps models.Steps, sType models.StepType) (*models.Step, bool) { - for _, step := range steps { - if step.StepType == sType { - return step, true - } - } - return nil, false -} - -func getNextSteps(clusterID, hostID strfmt.UUID) models.Steps { - steps, err := bmclient.Installer.GetNextSteps(context.Background(), &installer.GetNextStepsParams{ - ClusterID: clusterID, - HostID: hostID, - }) - Expect(err).NotTo(HaveOccurred()) - return steps.GetPayload() -} diff --git a/subsystem/image_test.go b/subsystem/image_test.go index 8d69ac154..133a81a14 100644 --- a/subsystem/image_test.go +++ b/subsystem/image_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io/ioutil" - "log" "os" "reflect" "strings" @@ -23,6 +22,7 @@ var _ = Describe("system-test image tests", func() { ctx := context.Background() var cluster *installer.RegisterClusterCreated var clusterID strfmt.UUID + pullSecret := "{\"auths\":{\"cloud.openshift.com\":{\"auth\":\"dXNlcjpwYXNzd29yZAo=\",\"email\":\"r@r.com\"}}}" // #nosec AfterEach(func() { clearDB() @@ -32,15 +32,16 @@ var _ = Describe("system-test image tests", func() { var err error cluster, err = bmclient.Installer.RegisterCluster(ctx, &installer.RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ - Name: swag.String("test cluster"), - OpenshiftVersion: swag.String("4.4"), + Name: swag.String("test-cluster"), + OpenshiftVersion: swag.String("4.5"), + PullSecret: pullSecret, }, }) Expect(err).NotTo(HaveOccurred()) clusterID = *cluster.GetPayload().ID }) - It("create_and_get_image", func() { + It("[only_k8s]create_and_get_image", func() { file, err := ioutil.TempFile("", "tmp") if err != nil { log.Fatal(err) @@ -97,11 +98,11 @@ var _ = Describe("image tests", func() { Expect(err).Should(HaveOccurred()) }) - It("download_non_existing_image", func() { + It("[only_k8s]download_non_existing_image", func() { cluster, err := bmclient.Installer.RegisterCluster(ctx, &installer.RegisterClusterParams{ NewClusterParams: &models.ClusterCreateParams{ - Name: swag.String("test cluster"), - OpenshiftVersion: swag.String("4.4"), + Name: swag.String("test-cluster"), + OpenshiftVersion: swag.String("4.5"), }, }) Expect(err).NotTo(HaveOccurred()) diff --git a/subsystem/subsystem_suite_test.go b/subsystem/subsystem_suite_test.go index a9545755b..663cfe7ec 100644 --- a/subsystem/subsystem_suite_test.go +++ b/subsystem/subsystem_suite_test.go @@ -2,13 +2,12 @@ package subsystem import ( "fmt" - "log" "net/url" "testing" "github.com/filanov/bm-inventory/client" "github.com/jinzhu/gorm" - _ "github.com/jinzhu/gorm/dialects/mysql" + _ "github.com/jinzhu/gorm/dialects/postgres" "github.com/kelseyhightower/envconfig" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -17,6 +16,7 @@ import ( var db *gorm.DB var bmclient *client.AssistedInstall +var log *logrus.Logger var Options struct { DBHost string `envconfig:"DB_HOST"` @@ -26,6 +26,8 @@ var Options struct { func init() { var err error + log = logrus.New() + log.SetReportCaller(true) err = envconfig.Process("subsystem", &Options) if err != nil { log.Fatal(err.Error()) @@ -39,8 +41,8 @@ func init() { }, }) - db, err = gorm.Open("mysql", - fmt.Sprintf("admin:admin@tcp(%s:%s)/installer?charset=utf8&parseTime=True&loc=Local", + db, err = gorm.Open("postgres", + fmt.Sprintf("host=%s port=%s user=admin dbname=installer password=admin sslmode=disable", Options.DBHost, Options.DBPort)) if err != nil { logrus.Fatal("Fail to connect to DB, ", err) diff --git a/subsystem/test_kubeconfig b/subsystem/test_kubeconfig new file mode 100644 index 000000000..625470617 --- /dev/null +++ b/subsystem/test_kubeconfig @@ -0,0 +1,17 @@ +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURRRENDQWlpZ0F3SUJBZ0lJZGd5RFlHbkRoVkl3RFFZSktvWklodmNOQVFFTEJRQXdQakVTTUJBR0ExVUUKQ3hNSmIzQmxibk5vYVdaME1TZ3dKZ1lEVlFRREV4OXJkV0psTFdGd2FYTmxjblpsY2kxc2IyTmhiR2h2YzNRdApjMmxuYm1WeU1CNFhEVEl3TURVeU5qRTFNRFV5TjFvWERUTXdNRFV5TkRFMU1EVXlOMW93UGpFU01CQUdBMVVFCkN4TUpiM0JsYm5Ob2FXWjBNU2d3SmdZRFZRUURFeDlyZFdKbExXRndhWE5sY25abGNpMXNiMk5oYkdodmMzUXQKYzJsbmJtVnlNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTNzdnNGbVlYd3pNMApIcDNDY1p3Z2RBSnJWMXRocDNxMzcyMFV1RkdIUitvSXhhNkkwQXBVcnlxalhnY3JBSXRSTzBOZlkzMGUvaXdICmZ6bXI0MTZ4YVg0S3JXNUl1SkIyZkQ1OVFUQVBPRUJJSWhrRkV2Z0cxMitQaGlOVmhsbGt2V0oyTGVIL1V3cjMKN1J3eHlERFFrQ3ZDR1o5Ky83djlmSmVVVnJhSW5sL1dYTGp0c2orRXowRm1Ucks3bXpKaVFQcGs1RjNmUHpyMwpHQlh6REZQSU9JeHR6TTdsSng2dFNlaG5hZlByTHMxb3lXTm1HdXRNK0trYzE1dUROWUhybUo4Q1ZqZm83a2Q0CnQ3SXR6NUN5TGxwRFdwVEdEdmVoOXdMRnpRVjN2NHA0amJNZlZVTVJiV1BFRHp4cWphVGIwalVvVUFBRlBicTkKUTMxNG1zWlF3d0lEQVFBQm8wSXdRREFPQmdOVkhROEJBZjhFQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQgovekFkQmdOVkhRNEVGZ1FVWkNGdmF3QWdWSnZtcGhIT20vbEdlZXdjZzI4d0RRWUpLb1pJaHZjTkFRRUxCUUFECmdnRUJBTmNxY0ZaMEJnZmM0TnlhclMwNXRWeGkzbzU2Qi9KMmpwNnVRVUtvZlJyMGhraUhYVDd0QnZubEppUSsKeSt5OHBEb29pVHpJOHMrMFd1dEN3S1d5dHdXNnNIczd4Zk9MUEtncFZXZWgyekRyZDhHY29hbXhlMzRmalRwdgppeUxDeWN2TkIzb1FpWkcyWkU1SDRXTmMwUmZXU0ZHUDcweG1JKzVVY2RvZTR6R3lEQU43bm5CYmZtU2xIK0kwCkkzWC9FaS9lRnZka081eG9kYVlUbFNWV2hTOHdib3FDVjF0SEN0YmIyQjYvY1VWTTRXUHg2Nm81Q05PMmYzMHoKcWJMU3F4ZzhYUXhjYW5FTGsxaGhNTjVlZDZkanc2YVFZUWxGSXBlcklPVVVTNDNJMGpRZTBYdmsrWWJhZGJscgpUVVR2VVV6RTdTNW5MUGRKK0hYZ2hZdFBHR1k9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURURENDQWpTZ0F3SUJBZ0lJSXFpUUtKdUhNY293RFFZSktvWklodmNOQVFFTEJRQXdSREVTTUJBR0ExVUUKQ3hNSmIzQmxibk5vYVdaME1TNHdMQVlEVlFRREV5VnJkV0psTFdGd2FYTmxjblpsY2kxelpYSjJhV05sTFc1bApkSGR2Y21zdGMybG5ibVZ5TUI0WERUSXdNRFV5TmpFMU1EVXlOMW9YRFRNd01EVXlOREUxTURVeU4xb3dSREVTCk1CQUdBMVVFQ3hNSmIzQmxibk5vYVdaME1TNHdMQVlEVlFRREV5VnJkV0psTFdGd2FYTmxjblpsY2kxelpYSjIKYVdObExXNWxkSGR2Y21zdGMybG5ibVZ5TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQwpBUUVBM3FDYjEycHFMcEt3M1dTVU1MclhTdFcza2ltYllVWGk1R1AyeXJhZjFYOE5pMlZwZnZHcUxtaWFpNFhVCjF2WkJhdEhYSE1SS1VteDczUTA3ZWVKUnQzNTdSUU00ZW9tbVhqQUloQjhPRU1JV0gzTTN1dTFnVlUvZkpNb2wKTlozY3JvN1BjUGFtVmRzdzIwYjhSTEIwcW55NW9FSWdLU2F5YkdGWlF6V2t4WitaR2NlNWxLbmZ0OGZpci9Fegp3NkpOK0lxbW1YOTFtVXR6T25CaFZNNFB5RzFHbnUyZ1g1SXNUOWFHbGdwOWlTSENJMnZyMEtORXZVZFJaY0k2Ck9SM0ZQUSs1WnQ5c2hPOHhYZlJpMFF3SmN2bG5xdCtDS2N6RFcwelUwc1BhVzJ5VlJzUmIyQjZFVHExMGxpQXoKN3NKRzdFc3U4TW5JVnQ1dFpqMmoxSkpUMVFJREFRQUJvMEl3UURBT0JnTlZIUThCQWY4RUJBTUNBcVF3RHdZRApWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVXZ4RGJ5NnJaNWRkczBTOEtDR3dNcVRhWitFb3dEUVlKCktvWklodmNOQVFFTEJRQURnZ0VCQUtIdmxZR2N2NTFyQW1QalR2dWFIZUpaQjhoRmlsTkdHdnIxK1QxajZIY2YKcndPYzYxSCtGUjJtenNqRFBMSWhBSjV1WWlidUZEQVB3eldDUXpJT0lxeDZhanhaZlhaSTNBcDdibDB1TlBtUwpEeW1XdnJVemx2MGZoMHVwUHE1RGFiNXFPMnRPTWJxTjNjVDQxNXU0YldWaThnVmZ2Mk9SWFUxVEhpWkliVUVJCkVNbE1vRStSRU5WQzZlU0U3Rmd3S3lUQ2M2cmZ4ZGtyMU8yOEhYOFNOOGhydW9WYXdJajN6Z1k5ZWVlZUpxdE0KSmdPVVRwWThKSWIzRzcwZjRId01xQ3hrcERqUWROVTVMYTR1OUlBN0NlQldiemdrTVBiMmI4bmRMUngzQ0Z2MAovcmF1U21GY2l0MnVid1BYbWIzalBFcVhuWXh1ckp0YWtvVTAvQnRraGdrPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlETWpDQ0FocWdBd0lCQWdJSUhTRXB2RFlvcHBnd0RRWUpLb1pJaHZjTkFRRUxCUUF3TnpFU01CQUdBMVVFCkN4TUpiM0JsYm5Ob2FXWjBNU0V3SHdZRFZRUURFeGhyZFdKbExXRndhWE5sY25abGNpMXNZaTF6YVdkdVpYSXcKSGhjTk1qQXdOVEkyTVRVd05USTNXaGNOTXpBd05USTBNVFV3TlRJM1dqQTNNUkl3RUFZRFZRUUxFd2x2Y0dWdQpjMmhwWm5ReElUQWZCZ05WQkFNVEdHdDFZbVV0WVhCcGMyVnlkbVZ5TFd4aUxYTnBaMjVsY2pDQ0FTSXdEUVlKCktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1nREt3Ni9ONkY2UXBWZEJRdXpHSy95RFhkUGhtRXgKNWNiK2h6ZEw2RDFPNWZVRHBjZitHT1FEdjB2dUZrNXcveDlVRmxoRldRTkNHOEtQbWxvSFhkMHIxQllWYWc0VgpYcGFYYklGUHJmMzVGeFhzZk1HS0JUQllaRXNiKzJRTWFNci9Ec0g0cTQ5Z294NDJ5U24rV2prbnNCWnZ4Ukd0CndBT2t0b1VWUVloaWxrWDg5dHl1VjJlTU1CbmVEZmdyazZ1WEt4YjIzTWg2WEZvNHAvMW9MV1VrVmxQQ3hlM2MKNWI2VWNON29NdTI3M016enJnZzVDOXdmbGVycWFoamxBRXFHY0JMQ3NrT0NBbGhMWGRvMW5FZ09lM0ZMUHJFeApiNW93UTJkNnQvbEFlellhbU5lTUl4bTN1OWx6djJXVytLUm4rRmcxci9UZWt4Qi9ZUUVkNHhjQ0F3RUFBYU5DCk1FQXdEZ1lEVlIwUEFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZHN0QKOXc2UEloYUxEYTRIaU05MVNMdUw3bGlUTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBV2JZeENVUzF2SStEUQpvK1JJZmxRSU5KZFcwZkF5WEFtVUdPZGdiVW8zL2R5UldnY0RDcFdCbVhMWlhkaVJwd3R5MWV2NTNzM1VrM0ZrCldoRGJSZVFPb29nSW1RMmVZdnZxWjFlbHhGeHdYdHZtVGw1cGZ1aFdMSDI4UC8rZnZFQW04T1dpTmtSUmpsNWYKMkhLUEU1MzBMQmtsSGx0OE9naUh0bS9XOGRUcTk3Y3lXaVlsdlREeFRkREFPRTVLZERrNXpxbXRBWUpWRlhnWApyajR0UUthTkphRWdyK2JkamhKbUIyUjU4b3RxMExXeS9uNnBjTWNmSFk2Q2k5dWhwT0Y3R3RWYjdTcUNGd0tPClMvdlVORFBsK2QyRjdLMDQweUVRUExQZHI5cEF5QlNWSkZyL2lnRkJycHJJVEdkYUZ1dFhQUHYybjBtTHBvS0kKVTlIV2FJcTgKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server: https://api.test-infra-cluster.redhat:6443 + name: test-infra-cluster +contexts: +- context: + cluster: test-infra-cluster + user: admin + name: admin +current-context: admin +preferences: {} +users: +- name: admin + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURaekNDQWsrZ0F3SUJBZ0lJUnljNWdMZE1sMjB3RFFZSktvWklodmNOQVFFTEJRQXdOakVTTUJBR0ExVUUKQ3hNSmIzQmxibk5vYVdaME1TQXdIZ1lEVlFRREV4ZGhaRzFwYmkxcmRXSmxZMjl1Wm1sbkxYTnBaMjVsY2pBZQpGdzB5TURBMU1qWXhOVEExTWpaYUZ3MHpNREExTWpReE5UQTFNamRhTURBeEZ6QVZCZ05WQkFvVERuTjVjM1JsCmJUcHRZWE4wWlhKek1SVXdFd1lEVlFRREV3eHplWE4wWlcwNllXUnRhVzR3Z2dFaU1BMEdDU3FHU0liM0RRRUIKQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURXR0h2ak13Tzh6SGRPTGtJcmVYMFYvdXptMjZ6dlJYQWJ2K3R6YU42dgpXbFByTFZUSkRwVHJwUVpmbnI4ZlBXQ0xSazRmRlNTZlZlR2EvRi80MzNUNDdTWGpQcHhYWEV3Y1VhYi8yRjhhCnlmdlphVGRXRHBNd3NFNzYzeXBEODhvalNQQVlRdDRqSGhRcEV3TExqVVR6VXdkTGJjdnREVEQyWmtwTE1oZFcKMmhrNGU1eVZXSzhrUDRsSUxYbnJMbFJxR2VUNGRtbmN6NHdMcTZSWXgrR0hUaFhNUzlTZEhkZ2V1dHlVZGlVMQpPNFRIOGNEZklrRnQ5Y3lkSmNZajdmREh6WWRQemlVdmtrcFJ0eHNuamlFTzVwM0h0cDAyL3kvUmlpN2t5TjByCndlNGM2Y29LZzRXZzYrUzVzaEpZV3d0ZGsrTmkxYU1BTnVIOExMbGhaUnlEQWdNQkFBR2pmekI5TUE0R0ExVWQKRHdFQi93UUVBd0lGb0RBZEJnTlZIU1VFRmpBVUJnZ3JCZ0VGQlFjREFRWUlLd1lCQlFVSEF3SXdEQVlEVlIwVApBUUgvQkFJd0FEQWRCZ05WSFE0RUZnUVVqWWlDdGo4blg2dHIzTHFCTURmSHU0aDdERUF3SHdZRFZSMGpCQmd3CkZvQVUxYkQ2VUUwN3cxUFBHMzZKWjJSZHJLL2lQUEl3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUhHUWpSRUoKU3NsbFM1YkdHYkxmNGFGdVIxbDdiVGh0UTBLTFl3bkowQmtKNDBnVVZUcHdmMEFpcFRVanU1UTkxNzZaNEJWUQpleFB4U1BuRngwSGxoQXhUYmJQaWQ2Y25leVlId2I1Vi95R3I2M0p5cWcxR2pScVJ5djBLR3k0bHRnWGRKNmQyCktZaVFlT2pYN3B2SXVSMjE2T2ZNMWdnYUdOUkFzSDZwYjlEMU5ENmoyRnVlUkcrVFN1MUxzRlJXWUZsVHk0aDMKc014RElWblY3L0R1T1M5QTJCVm80alpYL0pmbzFId2ExL3BaVkJ1YVU5UHJlemNOblBkZ09TRmxVc0tBbUNjMgpCb3dES0RPbkZZMm9IZTc5ZjF6NlU1a1Buby80ZGlBT0FIQ2dPY0lPbk9ubUNOR1d3R0tLU2tGc05lYUtnVVltCjVYVkFqNVd3amNTdmhqMD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBMWhoNzR6TUR2TXgzVGk1Q0szbDlGZjdzNXR1czcwVndHNy9yYzJqZXIxcFQ2eTFVCnlRNlU2NlVHWDU2L0h6MWdpMFpPSHhVa24xWGhtdnhmK045MCtPMGw0ejZjVjF4TUhGR20vOWhmR3NuNzJXazMKVmc2VE1MQk8rdDhxUS9QS0kwandHRUxlSXg0VUtSTUN5NDFFODFNSFMyM0w3UTB3OW1aS1N6SVhWdG9aT0h1YwpsVml2SkQrSlNDMTU2eTVVYWhuaytIWnAzTStNQzZ1a1dNZmhoMDRWekV2VW5SM1lIcnJjbEhZbE5UdUV4L0hBCjN5SkJiZlhNblNYR0krM3d4ODJIVDg0bEw1SktVYmNiSjQ0aER1YWR4N2FkTnY4djBZb3U1TWpkSzhIdUhPbksKQ29PRm9Pdmt1YklTV0ZzTFhaUGpZdFdqQURiaC9DeTVZV1VjZ3dJREFRQUJBb0lCQUFRRHo0YnlOUGE4YXR4WApkN3d5K2dxSWprN0IvZHM2MVNCZ0YvMUJFVFArb0tZL1ltQ20ybG9VN1NxcjRtK21pZ0h5bnBKc3BoUXEyeUU1CjdGN1JhZk1sRjFuTW1jZjFuaVBGMERqcUNOYUt4U05ObXRFTlV1dE4weDFYUkFha01yMDRwKy84aVFmbGo0RTUKcndxOEtuZlpyY0JYWGNTalE3RExPRWR5dUFkVDVPN0lqRjV3dENNNUw0Tlc5K0dkOFg4VnpxaTNndWlkdzUwRQphSjlndjR2Nnp3RDAydFNSZHNoUlBBbU9oVzBCVDl5bkZCNEFrdlg3b1psVHR2YU5QMVF3S0RkQXd0cmF5K2ZXCjRMcmY0ZldEWDIzNERqS01sOUN3cjhnTmNja0FoejVjN09HQ0dzeU9xc3JBTmllNFB6VWtmc0xhM1ZQQ0FsTHoKV2s1dEs0a0NnWUVBNVBMM3hzMzB2Y1NPcjhjV2R4eElCTE1pTzNnRldBb05qeEJkQ2ExNEZCYjlGaVMxK1NNSgpQTkZNTU1rT2VUMGZPamx1dVpyTGVueDIrUFZNVnhrb29kYXNmcGxtNGI3RHk1RkZzcGhUajlOOXJJdmIvWWpJCk51SDdKNkgzVEZJTzhDVE5GK1FwSmIza2pNZUhRcWM0S0cwSFBCeTlpLytvUExkOGdxZ1JCczhDZ1lFQTcyUSsKT3prYlZaUzlad25YcEZPZmQwa2hZQkkzUFlvaElockJjOEw3Q2xHZlVwdFRQdlMxSWVqUjlMNHFiM1BsSHB6bAptaG5wQXRGTFFYQXJCdFk4UmErZkxlbW1Xdzc2ZndrdlFraXlLblUvZ0s1UlVkdmdQbkxrSCtVSmJJTmwzcjgyCkRzQzB0bWJJL1QrblpObEJHdVFkQm1US3hkZ1R1MXRBZ3dIcy9BMENnWUVBMlZISDMrMmZZb0l3N3FrTHFnUXUKV0VleE5zRzJVTnM2QTVLRXZhcnJVQ2FDRllMRE9Ma0pDN0dmb0s4NERkejJ4MDI4ekhFaXRDRnd6T0FLbHFKSwo3MVBXYUZVMFV4UEF4bm9lcm1mbzZaeldyZklUMzVUMmR5SUtSSlI1S1BpN05UZTVkZlFkR3JZbE8zd3A2QnJTCk00MUtVTVQzSnV5RnhSeG1FNTkwaWdFQ2dZQjU5bHRTTnVUN00vMU8rbyszczdiaHdndFQ4OVBhOFgyeDcybXgKdlp2Q2hSVWpzK2kwZ1YycStmL0ZyZ0RXcVhnSW9hekVWd0VFbzNhd3p5SE1xT2NxSmJCMlpyeVBWZEUvV1lHUApScFFtMTNkVDZ2dVpOZWxJUjZaN3JXZWd0a3ozTC9tdGlIWkpHNUs0bTI2QURjT0NuTWRBMDZjUEp1ZmVvejM1CndNaHBIUUtCZ0E1WVpYeWE1azUrcGFRd1RaWTFKb2ZsZ2FjNlRRZ2IzOTJDWStZR1RjNHdDVVlNNWVVazg0TCsKRFM4bTYwc0liL0g3UVBOa2RBTXZobFNPMk9McFVEbGtWUnhrUXFvckxWRDRRQUc0SFBMU1VLVzI0eFNqeXpZNApncXQ4b3VmcEFIV0pMLzZpZEthUzhvVjVlc2NyV0Y5clJ6bWliUGx4V1JIZ3ZzdVJJSEhjCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/subsystem/utils_test.go b/subsystem/utils_test.go index b0056b1cf..2045b7759 100644 --- a/subsystem/utils_test.go +++ b/subsystem/utils_test.go @@ -2,6 +2,7 @@ package subsystem import ( "context" + "time" "github.com/filanov/bm-inventory/client/installer" "github.com/filanov/bm-inventory/models" @@ -10,6 +11,11 @@ import ( . "github.com/onsi/gomega" ) +const ( + defaultWaitForHostStateTimeout = 20 * time.Second + defaultWaitForClusterStateTimeout = 30 * time.Second +) + func clearDB() { db.Delete(&models.Host{}) db.Delete(&models.Cluster{}) @@ -39,3 +45,41 @@ func getHost(clusterID, hostID strfmt.UUID) *models.Host { Expect(err).NotTo(HaveOccurred()) return host.GetPayload() } + +func getStepInList(steps models.Steps, sType models.StepType) (*models.Step, bool) { + for _, step := range steps.Instructions { + if step.StepType == sType { + return step, true + } + } + return nil, false +} + +func getNextSteps(clusterID, hostID strfmt.UUID) models.Steps { + steps, err := bmclient.Installer.GetNextSteps(context.Background(), &installer.GetNextStepsParams{ + ClusterID: clusterID, + HostID: hostID, + }) + Expect(err).NotTo(HaveOccurred()) + return *steps.GetPayload() +} + +func updateProgress(hostID strfmt.UUID, clusterID strfmt.UUID, current_step models.HostStage) { + updateProgressWithInfo(hostID, clusterID, current_step, "") +} + +func updateProgressWithInfo(hostID strfmt.UUID, clusterID strfmt.UUID, current_step models.HostStage, info string) { + ctx := context.Background() + + installProgress := &models.HostProgress{ + CurrentStage: current_step, + ProgressInfo: info, + } + updateReply, err := bmclient.Installer.UpdateHostInstallProgress(ctx, &installer.UpdateHostInstallProgressParams{ + ClusterID: clusterID, + HostProgress: installProgress, + HostID: hostID, + }) + Expect(err).ShouldNot(HaveOccurred()) + Expect(updateReply).Should(BeAssignableToTypeOf(installer.NewUpdateHostInstallProgressOK())) +} diff --git a/subsystem/versions_test.go b/subsystem/versions_test.go new file mode 100644 index 000000000..94f678b39 --- /dev/null +++ b/subsystem/versions_test.go @@ -0,0 +1,18 @@ +package subsystem + +import ( + "context" + + "github.com/filanov/bm-inventory/client/versions" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("test versions", func() { + It("get versions list", func() { + reply, err := bmclient.Versions.ListComponentVersions(context.Background(), + &versions.ListComponentVersionsParams{}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(reply.GetPayload().Versions)).To(Equal(6)) + }) +}) diff --git a/swagger.yaml b/swagger.yaml index 693b56912..0ad44a9d5 100644 --- a/swagger.yaml +++ b/swagger.yaml @@ -182,6 +182,8 @@ paths: $ref: '#/definitions/error' 409: description: Error. + schema: + $ref: '#/definitions/error' 500: description: Error. schema: @@ -237,7 +239,7 @@ paths: - in: query name: file_name type: string - enum: [bootstrap.ign, master.ign, metadata.json, worker.ign, kubeadmin-password, kubeconfig] + enum: [bootstrap.ign, master.ign, metadata.json, worker.ign, kubeadmin-password, kubeconfig, kubeconfig-noingress, install-config.yaml] required: true responses: 200: @@ -286,6 +288,72 @@ paths: schema: $ref: '#/definitions/error' + /clusters/{cluster_id}/downloads/kubeconfig: + get: + tags: + - installer + summary: Downloads the kubeconfig file for this cluster. + operationId: DownloadClusterKubeconfig + produces: + - application/octet-stream + parameters: + - in: path + name: cluster_id + type: string + format: uuid + required: true + responses: + 200: + description: Success. + schema: + type: string + format: binary + 404: + description: Error. + schema: + $ref: '#/definitions/error' + 409: + description: Error. + schema: + $ref: '#/definitions/error' + 500: + description: Error. + schema: + $ref: '#/definitions/error' + + /clusters/{cluster_id}/uploads/ingress-cert: + post: + tags: + - installer + summary: Transfer the ingress certificate for the cluster. + operationId: UploadClusterIngressCert + parameters: + - in: path + name: cluster_id + type: string + format: uuid + required: true + - in: body + name: ingress-cert-params + required: true + schema: + $ref: '#/definitions/ingress-cert-params' + responses: + 201: + description: Success. + 400: + description: Error. + schema: + $ref: '#/definitions/error' + 404: + description: Error. + schema: + $ref: '#/definitions/error' + 500: + description: Error. + schema: + $ref: '#/definitions/error' + /clusters/{cluster_id}/actions/install: post: tags: @@ -320,6 +388,101 @@ paths: schema: $ref: '#/definitions/error' + /clusters/{cluster_id}/actions/cancel: + post: + tags: + - installer + summary: Cancels an ongoing installation. + operationId: CancelInstallation + parameters: + - in: path + name: cluster_id + type: string + format: uuid + required: true + responses: + 202: + description: Success. + schema: + $ref: '#/definitions/cluster' + 404: + description: Error. + schema: + $ref: '#/definitions/error' + 409: + description: Error. + schema: + $ref: '#/definitions/error' + 500: + description: Error. + schema: + $ref: '#/definitions/error' + + /clusters/{cluster_id}/actions/reset: + post: + tags: + - installer + summary: Resets a failed installation. + operationId: ResetCluster + parameters: + - in: path + name: cluster_id + type: string + format: uuid + required: true + responses: + 202: + description: Success. + schema: + $ref: '#/definitions/cluster' + 404: + description: Error. + schema: + $ref: '#/definitions/error' + 409: + description: Error. + schema: + $ref: '#/definitions/error' + 500: + description: Error. + schema: + $ref: '#/definitions/error' + + /clusters/{cluster_id}/actions/complete_installation: + post: + tags: + - installer + summary: Agent API to mark a finalizing installation as complete. + operationId: CompleteInstallation + parameters: + - in: path + name: cluster_id + type: string + format: uuid + required: true + - in: body + name: completion-params + required: true + schema: + $ref: '#/definitions/completion-params' + responses: + 202: + description: Success. + schema: + $ref: '#/definitions/cluster' + 404: + description: Error. + schema: + $ref: '#/definitions/error' + 409: + description: Error. + schema: + $ref: '#/definitions/error' + 500: + description: Error. + schema: + $ref: '#/definitions/error' + /clusters/{cluster_id}/hosts: post: tags: @@ -346,6 +509,14 @@ paths: description: Error. schema: $ref: '#/definitions/error' + 403: + description: Error. + schema: + $ref: '#/definitions/error' + 404: + description: Error. + schema: + $ref: '#/definitions/error' 500: description: Error. schema: @@ -435,7 +606,7 @@ paths: schema: $ref: '#/definitions/error' - /clusters/{clusterId}/hosts/{hostId}/progress: + /clusters/{cluster_id}/hosts/{host_id}/progress: put: tags: - installer @@ -443,26 +614,34 @@ paths: operationId: UpdateHostInstallProgress parameters: - in: path - name: clusterId + name: cluster_id description: The ID of the cluster to retrieve type: string format: uuid required: true - in: path - name: hostId + name: host_id description: The ID of the host to retrieve type: string format: uuid required: true - in: body - name: host-install-progress-params + name: host-progress description: New progress value required: true schema: - $ref: '#/definitions/host-install-progress-params' + $ref: '#/definitions/host-progress' responses: 200: description: Update install progress + 404: + description: Error. + schema: + $ref: '#/definitions/error' + 500: + description: Error. + schema: + $ref: '#/definitions/error' /clusters/{cluster_id}/hosts/{host_id}/actions/debug: post: @@ -516,8 +695,10 @@ paths: format: uuid required: true responses: - 204: + 200: description: Success. + schema: + $ref: '#/definitions/host' 404: description: Error. schema: @@ -548,8 +729,10 @@ paths: format: uuid required: true responses: - 204: + 200: description: Success. + schema: + $ref: '#/definitions/host' 404: description: Error. schema: @@ -630,6 +813,77 @@ paths: schema: $ref: '#/definitions/error' + # The following API call should be admin only + /clusters/{cluster_id}/free_addresses: + get: + tags: + - installer + summary: Retrieves the free address list for a network. + operationId: GetFreeAddresses + parameters: + - in: path + name: cluster_id + type: string + format: uuid + required: true + - in: query + name: network + type: string + pattern: '^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$' + required: true + - in: query + name: limit + type: integer + minimum: 1 + maximum: 8000 + default: 8000 + required: false + - in: query + name: prefix + type: string + required: false + responses: + 200: + description: Success + schema: + $ref: '#/definitions/free-addresses-list' + 404: + description: Error. + schema: + $ref: '#/definitions/error' + 500: + description: Error. + schema: + $ref: '#/definitions/error' + + /domains: + get: + tags: + - managed_domains + summary: List of managed DNS domains + operationId: ListManagedDomains + responses: + 200: + description: Success. + schema: + $ref: '#/definitions/list-managed-domains' + 500: + description: Error. + schema: + $ref: '#/definitions/error' + + /component_versions: + get: + tags: + - versions + summary: List of componenets versions + operationId: ListComponentVersions + responses: + 200: + description: Success. + schema: + $ref: '#/definitions/list-versions' + /events/{entity_id}: get: tags: @@ -654,6 +908,33 @@ paths: definitions: + list-managed-domains: + type: array + items: + $ref: '#/definitions/managed-domain' + + managed-domain: + type: object + properties: + domain: + type: string + provider: + type: string + enum: ['route53'] + + list-versions: + type: object + properties: + versions: + $ref: '#/definitions/versions' + release_tag: + type: string + + versions: + type: object + additionalProperties: + type: string + event-list: type: array items: @@ -663,6 +944,7 @@ definitions: type: object required: - entity_id + - severity - message - event_time properties: @@ -671,12 +953,16 @@ definitions: format: uuid description: Unique identifier of the object this event relates to. x-go-custom-tag: gorm:"index" + severity: + type: string + enum: [info, warning, error, critical] message: type: string + x-go-custom-tag: gorm:"type:varchar(4096)" event_time: type: string format: date-time - x-go-custom-tag: gorm:"type:datetime" + x-go-custom-tag: gorm:"type:timestamp with time zone" request_id: type: string format: uuid @@ -702,6 +988,8 @@ definitions: host_id: type: string format: uuid + discovery_agent_version: + type: string host: type: object @@ -737,48 +1025,99 @@ definitions: - disconnected - insufficient - disabled + - preparing-for-installation + - pending-for-input - installing - installing-in-progress + - installing-pending-user-action + - resetting-pending-user-action - installed - error + - resetting status_info: type: string x-go-custom-tag: gorm:"type:varchar(2048)" + validations_info: + type: string + description: Json formatted string containing the validations results for each validation id grouped by category (network, hardware, etc.) + x-go-custom-tag: gorm:"type:varchar(2048)" + status_updated_at: + type: string + format: date-time + x-go-custom-tag: gorm:"type:timestamp with time zone" + description: The last time that the host status has been updated + progress: + $ref: '#/definitions/host-progress-info' + x-go-custom-tag: gorm:"embedded;embedded_prefix:progress_" + stage_started_at: + type: string + format: date-time + x-go-custom-tag: gorm:"type:timestamp with time zone" + description: Time at which the current progress stage started + stage_updated_at: + type: string + format: date-time + x-go-custom-tag: gorm:"type:timestamp with time zone" + description: Time at which the current progress stage was last updated + progress_stages: + type: array + items: + $ref: '#/definitions/host-stage' + x-go-custom-tag: gorm:"-" connectivity: x-go-custom-tag: gorm:"type:text" type: string - hardware_info: + inventory: x-go-custom-tag: gorm:"type:text" type: string - inventory: + free_addresses: x-go-custom-tag: gorm:"type:text" type: string role: - type: string - enum: ['undefined', 'master', 'worker'] + $ref: '#/definitions/host-role' bootstrap: type: boolean + installer_version: + type: string + description: Installer version updated_at: type: string format: date-time - x-go-custom-tag: gorm:"type:datetime" + x-go-custom-tag: gorm:"type:timestamp with time zone" created_at: type: string format: date-time - x-go-custom-tag: gorm:"type:datetime" + x-go-custom-tag: gorm:"type:timestamp with time zone" + checked_in_at: + type: string + format: date-time + x-go-custom-tag: gorm:"type:timestamp with time zone" + description: The last time the host's agent communicated with the service. + discovery_agent_version: + type: string + requested_hostname: + type: string steps: - type: array - items: - $ref: '#/definitions/step' + type: object + properties: + next_instruction_seconds: + type: integer + instructions: + type: array + items: + $ref: '#/definitions/step' + step-type: type: string enum: - - hardware-info - connectivity-check - execute - inventory + - install + - free-network-addresses + - reset-installation step: type: object @@ -802,6 +1141,8 @@ definitions: step-reply: type: object properties: + step_type: + $ref: '#/definitions/step-type' step_id: type: string exit_code: @@ -855,7 +1196,7 @@ definitions: description: Name of the OpenShift cluster. openshift_version: type: string - enum: ['4.4', '4.5'] + enum: ['4.5'] description: Version of the OpenShift cluster. base_dns_domain: type: string @@ -864,26 +1205,21 @@ definitions: type: string description: IP address block from which Pod IPs are allocated This block must not overlap with existing physical networks. These IP addresses are used for the Pod network, and if you need to access the Pods from an external network, configure load balancers and routers to manage the traffic. pattern: '^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$' + default: "10.128.0.0/14" cluster_network_host_prefix: type: integer description: The subnet prefix length to assign to each individual node. For example, if clusterNetworkHostPrefix is set to 23, then each node is assigned a /23 subnet out of the given cidr (clusterNetworkCIDR), which allows for 510 (2^(32 - 23) - 2) pod IPs addresses. If you are required to provide access to nodes from an external network, configure load balancers and routers to manage the traffic. minimum: 1 maximum: 32 + default: 23 service_network_cidr: type: string description: The IP address pool to use for service IP addresses. You can enter only one IP address pool. If you need to access the services from an external network, configure load balancers and routers to manage the traffic. pattern: '^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$' - api_vip: - type: string - format: ipv4 - description: Virtual IP used to reach the OpenShift cluster API. - dns_vip: - type: string - format: ipv4 - description: Virtual IP used internally by the cluster for automating internal DNS requirements. + default: "172.30.0.0/16" ingress_vip: type: string - format: ipv4 + pattern: '^(([0-9]{1,3}\.){3}[0-9]{1,3})?$' description: Virtual IP used for cluster ingress traffic. pull_secret: type: string @@ -898,44 +1234,50 @@ definitions: name: type: string description: OpenShift cluster name + x-nullable: true base_dns_domain: type: string description: Base domain of the cluster. All DNS records must be sub-domains of this base and include the cluster name. + x-nullable: true cluster_network_cidr: type: string description: IP address block from which Pod IPs are allocated This block must not overlap with existing physical networks. These IP addresses are used for the Pod network, and if you need to access the Pods from an external network, configure load balancers and routers to manage the traffic. pattern: '^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$' + x-nullable: true cluster_network_host_prefix: type: integer description: The subnet prefix length to assign to each individual node. For example, if clusterNetworkHostPrefix is set to 23, then each node is assigned a /23 subnet out of the given cidr (clusterNetworkCIDR), which allows for 510 (2^(32 - 23) - 2) pod IPs addresses. If you are required to provide access to nodes from an external network, configure load balancers and routers to manage the traffic. minimum: 1 maximum: 32 + x-nullable: true service_network_cidr: type: string description: The IP address pool to use for service IP addresses. You can enter only one IP address pool. If you need to access the services from an external network, configure load balancers and routers to manage the traffic. pattern: '^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$' + x-nullable: true api_vip: type: string - format: ipv4 + pattern: '^(([0-9]{1,3}\.){3}[0-9]{1,3})?$' description: Virtual IP used to reach the OpenShift cluster API. - dns_vip: - type: string - format: ipv4 - description: Virtual IP used internally by the cluster for automating internal DNS requirements. + x-nullable: true ingress_vip: type: string - format: ipv4 + pattern: '^(([0-9]{1,3}\.){3}[0-9]{1,3})?$' description: Virtual IP used for cluster ingress traffic. + x-nullable: true pull_secret: type: string description: The pull secret that obtained from the Pull Secret page on the Red Hat OpenShift Cluster Manager site. + x-nullable: true ssh_public_key: type: string description: SSH public key for debugging OpenShift nodes. + x-nullable: true hosts_roles: type: array x-go-custom-tag: gorm:"type:varchar(64)[]" description: The desired role for hosts associated with the cluster. + x-nullable: true items: type: object properties: @@ -943,8 +1285,20 @@ definitions: type: string format: uuid role: + $ref: '#/definitions/host-role-update-params' + hosts_names: + type: array + x-go-custom-tag: gorm:"type:varchar(64)[]" + description: The desired hostname for hosts associated with the cluster. + x-nullable: true + items: + type: object + properties: + id: + type: string + format: uuid + hostname: type: string - enum: ['master', 'worker'] cluster: type: object @@ -971,9 +1325,13 @@ definitions: name: type: string description: Name of the OpenShift cluster. + user_id: + type: string + org_id: + type: string openshift_version: type: string - enum: ['4.4', '4.5'] + enum: ['4.5'] description: Version of the OpenShift cluster. image_info: $ref: '#/definitions/image_info' @@ -996,20 +1354,16 @@ definitions: pattern: '^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$' api_vip: type: string - format: ipv4 + pattern: '^(([0-9]{1,3}\.){3}[0-9]{1,3})?$' description: Virtual IP used to reach the OpenShift cluster API. - dns_vip: + machine_network_cidr: type: string - format: ipv4 - description: Virtual IP used internally by the cluster for automating internal DNS requirements. + description: A CIDR that all hosts belonging to the cluster should have an interfaces with IP address that belongs to this CIDR. The api_vip belongs to this CIDR. + pattern: '^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$' ingress_vip: type: string - format: ipv4 + pattern: '^(([0-9]{1,3}\.){3}[0-9]{1,3})?$' description: Virtual IP used for cluster ingress traffic. - pull_secret: - type: string - x-go-custom-tag: gorm:"type:varchar(4096)" - description: The pull secret that obtained from the Pull Secret page on the Red Hat OpenShift Cluster Manager site. ssh_public_key: type: string x-go-custom-tag: gorm:"type:varchar(1024)" @@ -1021,12 +1375,19 @@ definitions: - insufficient - ready - error + - preparing-for-installation - installing + - finalizing - installed status_info: type: string x-go-custom-tag: gorm:"type:varchar(2048)" description: Additional information pertaining to the status of the OpenShift cluster. + status_updated_at: + type: string + format: date-time + x-go-custom-tag: gorm:"type:timestamp with time zone" + description: The last time that the cluster status has been updated hosts: x-go-custom-tag: gorm:"foreignkey:ClusterID;association_foreignkey:ID" type: array @@ -1037,23 +1398,34 @@ definitions: updated_at: type: string format: date-time - x-go-custom-tag: gorm:"type:datetime" + x-go-custom-tag: gorm:"type:timestamp with time zone" description: The last time that this cluster was updated. created_at: type: string format: date-time - x-go-custom-tag: gorm:"type:datetime" + x-go-custom-tag: gorm:"type:timestamp with time zone" description: The time that this cluster was created. install_started_at: type: string format: date-time - x-go-custom-tag: gorm:"type:datetime;default:0" + x-go-custom-tag: gorm:"type:timestamp with time zone;default:'2000-01-01 00:00:00z'" description: The time that this cluster began installation. install_completed_at: type: string format: date-time - x-go-custom-tag: gorm:"type:datetime;default:0" + x-go-custom-tag: gorm:"type:timestamp with time zone;default:'2000-01-01 00:00:00z'" description: The time that this cluster completed installation. + host_networks: + type: array + items: + $ref: '#/definitions/host_network' + x-go-custom-tag: gorm:"-" + description: List of host networks to be filled during query. + pull_secret_set: + type: boolean + description: True if the pull-secret has been added to the cluster + ignition_generator_version: + type: string image_info: type: object @@ -1067,123 +1439,43 @@ definitions: type: string x-go-custom-tag: gorm:"type:varchar(1024)" description: SSH public key for debugging the installation + generator_version: + type: string + description: Image generator version created_at: type: string format: date-time - x-go-custom-tag: gorm:"type:datetime" + x-go-custom-tag: gorm:"type:timestamp with time zone" + + free-addresses-list: + type: array + items: + type: string + format: ipv4 cluster-list: type: array items: $ref: '#/definitions/cluster' - debug-step: - type: object - required: - - command - properties: - command: - type: string - - cpu_details: - type: object - properties: - architecture: - type: string - model_name: - type: string - cpus: - type: integer - threads_per_core: - type: integer - sockets: - type: integer - cpu_mhz: - type: number - - block-device: - type: object - properties: - name: - type: string - major_device_number: - type: integer - minor_device_number: - type: integer - removable_device: - type: integer - size: - type: integer - read_only: - type: boolean - device_type: - type: string - mountpoint: - type: string - fstype: - type: string - - memory_details: + host_network: type: object properties: - name: + cidr: type: string - total: - type: integer - used: - type: integer - free: - type: integer - shared: - type: integer - buff_cached: - type: integer - available: - type: integer - - cidr: - type: object - properties: - ip_address: - type: string - mask: - type: integer - - nic: - type: object - properties: - name: - type: string - state: - type: string - mtu: - type: integer - mac: - type: string - cidrs: + host_ids: type: array items: - $ref: '#/definitions/cidr' - + type: string + format: uuid - # Return value of hardware info - introspection: + debug-step: type: object + required: + - command properties: - cpu: - $ref: '#/definitions/cpu_details' - block_devices: - type: array - items: - $ref: '#/definitions/block-device' - memory: - type: array - items: - $ref: '#/definitions/memory_details' - nics: - type: array - items: - $ref: '#/definitions/nic' + command: + type: string l2-connectivity: type: object @@ -1233,9 +1525,68 @@ definitions: items: $ref: '#/definitions/connectivity-remote-host' - host-install-progress-params: + ingress-cert-params: type: string + completion-params: + type: object + required: + - is_success + properties: + is_success: + type: boolean + error_info: + type: string + + host-progress: + type: object + required: + - current_stage + properties: + current_stage: + type: string + $ref: '#/definitions/host-stage' + progress_info: + type: string + x-go-custom-tag: gorm:"type:varchar(2048)" + + host-progress-info: + type: object + required: + - current_stage + properties: + current_stage: + type: string + $ref: '#/definitions/host-stage' + progress_info: + type: string + x-go-custom-tag: gorm:"type:varchar(2048)" + stage_started_at: + type: string + format: date-time + x-go-custom-tag: gorm:"type:timestamp with time zone" + description: Time at which the current progress stage started + stage_updated_at: + type: string + format: date-time + x-go-custom-tag: gorm:"type:timestamp with time zone" + description: Time at which the current progress stage was last updated + + host-stage: + type: string + enum: + - Starting installation + - Waiting for control plane + - Start Waiting for control plane + - Installing + - Writing image to disk + - Rebooting + - Waiting for ignition + - Configuring + - Joined + - Done + - Failed + error: type: object required: @@ -1392,6 +1743,29 @@ definitions: cpu: $ref: '#/definitions/cpu' + free_network_addresses: + type: object + properties: + network: + type: string + pattern: '^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$' + free_addresses: + type: array + items: + type: string + format: ipv4 + + free_networks_addresses: + type: array + items: + $ref: '#/definitions/free_network_addresses' + + free_addresses_request: + type: array + items: + type: string + pattern: '^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]|[1-2][0-9]|3[0-2]?$' + credentials: type: object properties: @@ -1399,3 +1773,34 @@ definitions: type: string password: type: string + console_url: + type: string + + host-role-update-params: + type: string + enum: + - 'master' + - 'worker' + + host-role: + type: string + enum: + - 'master' + - 'worker' + - 'bootstrap' + + host-validation-id: + type: string + enum: + - 'connected' + - 'has-inventory' + - 'has-min-cpu-cores' + - 'has-min-valid-disks' + - 'has-min-memory' + - 'machine-cidr-defined' + - 'role-defined' + - 'has-cpu-cores-for-role' + - 'has-memory-for-role' + - 'hostname-unique' + - 'hostname-valid' + - 'belongs-to-machine-cidr' diff --git a/tools/clear_deployment.py b/tools/clear_deployment.py index fabc80525..3eda27a06 100644 --- a/tools/clear_deployment.py +++ b/tools/clear_deployment.py @@ -1,8 +1,15 @@ import utils +import argparse +import deployment_options def main(): - print(utils.check_output("kubectl delete all --all -n assisted-installer 1> /dev/null ; true")) - print(utils.check_output("kubectl delete namespace assisted-installer 1> /dev/null ; true")) + parser = argparse.ArgumentParser() + parser.add_argument("--delete-namespace", type=lambda x: (str(x).lower() == 'true'), default=True) + deploy_options = deployment_options.load_deployment_options(parser) + + print(utils.check_output(f"kubectl delete all --all -n {deploy_options.namespace} 1> /dev/null ; true")) + if deploy_options.delete_namespace is True: + print(utils.check_output(f"kubectl delete namespace {deploy_options.namespace} 1> /dev/null ; true")) if __name__ == "__main__": main() diff --git a/tools/create_default_s3_bucket.py b/tools/create_default_s3_bucket.py deleted file mode 100644 index d902d1f9e..000000000 --- a/tools/create_default_s3_bucket.py +++ /dev/null @@ -1,33 +0,0 @@ -import os -import utils -import boto3 -import sys -import time - -def get_s3_client(): - endpoint_url = utils.get_service_url("scality") - return boto3.client( - 's3', - aws_access_key_id="accessKey1", - aws_secret_access_key="verySecretKey1", - endpoint_url=endpoint_url - ) - -def main(): - retry = 20 - success = False - while retry > 0 and success == False: - try: - client = get_s3_client() - client.create_bucket(Bucket="test") - success = True - except Exception as e: - print(e) - retry -= 1 - time.sleep(5) - if retry == 0: - print("failed to create default s3 bucket") - sys.exit(1) - -if __name__ == "__main__": - main() diff --git a/tools/debug/minikube_postgres.sh b/tools/debug/minikube_postgres.sh new file mode 100755 index 000000000..320fca01c --- /dev/null +++ b/tools/debug/minikube_postgres.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# use mysql cli on a DB exposed as a service in minikube + + +function print_usage() { + [[ -n "$1" ]] && echo "$1" && echo + echo "usage: minikube_postgres.sh [-u ] [-p ] [-t ] " + echo + echo " -u - username" + echo " -p - password" + echo " -t - table" + exit 1 +} + +USER="admin" +PWD="admin" +TABLE="installer" + +while getopts ':t:u:p:h' flag; do + case "${flag}" in + t) TABLE=${OPTARG} ;; + p) PWD=${OPTARG} ;; + u) USER=${OPTARG} ;; + h) print_usage ;; + ?) print_usage "invalid flag ${OPTARG}" ;; + esac +done + + +DB_SERVICE=${@:$OPTIND:1} +[[ -z "${DB_SERVICE}" ]] && print_usage "pod-name-filter is missing" + +SERVICE_URL=$(minikube service list | grep ${DB_SERVICE} | awk -F"|" '{print $5}' | tr -d '[:space:]') +PORT=$(echo ${SERVICE_URL}| awk -F"://|:" '{print $3}') +SERVER=$(echo ${SERVICE_URL}| awk -F"://|:" '{print $2}') +PGPASSWORD=admin psql -U ${USER} --dbname ${TABLE} --host ${SERVER} --port ${PORT} -w diff --git a/tools/debug/pod_image_labels.sh b/tools/debug/pod_image_labels.sh new file mode 100755 index 000000000..dab5a994c --- /dev/null +++ b/tools/debug/pod_image_labels.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Get the image labels of a running pod. +# This could be important, for example, to get the git_revision label for the pod + +function print_usage() { + [[ -n "$1" ]] && echo "$1" && echo + echo "usage: pod_image_labels [-p] " + echo + echo " -p - Use podman instead of docker" + exit 1 +} + + +DOCKER_ENGINE="docker" +while getopts ':ph' flag; do + case "${flag}" in + p) DOCKER_ENGINE="podman" ;; + h) print_usage ;; + ?) print_usage "invalid flag ${OPTARG}" ;; + esac +done + + +POD_FILTER=${@:$OPTIND:1} +[[ -z "${POD_FILTER}" ]] && print_usage "pod-name-filter is missing" + +result=($(kubectl get pods --all-namespaces | grep ${POD_FILTER})) +NAMESPACE=${result[0]} +POD_NAME=${result[1]} + +result=$(kubectl get pods -n ${NAMESPACE} -o=jsonpath='{.status.containerStatuses[0].imageID}' ${POD_NAME}) +IMAGE=$(echo ${result} | awk -F"://" '{print $2}') + + +${DOCKER_ENGINE} pull ${IMAGE} +echo "image labels for pod ${POD_NAME}:" +${DOCKER_ENGINE} inspect ${IMAGE} | jq .[0].Config.Labels diff --git a/tools/debug/s3-list-cron.yaml b/tools/debug/s3-list-cron.yaml new file mode 100644 index 000000000..e91c74d6a --- /dev/null +++ b/tools/debug/s3-list-cron.yaml @@ -0,0 +1,23 @@ +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: s3-list + namespace: assisted-installer +spec: + schedule: "*/15 * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: s3-list + image: amazon/aws-cli + env: + - name: AWS_ACCESS_KEY_ID + value: "accessKey1" + - name: AWS_SECRET_ACCESS_KEY + value: "verySecretKey1" + command: ["aws"] + args: ["--endpoint-url", "http://cloudserver-front:8000", "s3api", "list-objects", "--bucket", "test"] + restartPolicy: OnFailure + backoffLimit: 3 diff --git a/tools/deploy_assisted_installer.py b/tools/deploy_assisted_installer.py index 67b291cd3..1e98738f0 100644 --- a/tools/deploy_assisted_installer.py +++ b/tools/deploy_assisted_installer.py @@ -1,17 +1,41 @@ import os -import sys import utils +import argparse +import yaml +import deployment_options + SRC_FILE = os.path.join(os.getcwd(), "deploy/bm-inventory.yaml") DST_FILE = os.path.join(os.getcwd(), "build/bm-inventory.yaml") +TEST_CLUSTER_MONITOR_INTERVAL = "1s" +TEST_HOST_MONITOR_INTERVAL = "1s" + def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--subsystem-test", help='deploy in subsystem mode', action='store_true') + deploy_options = deployment_options.load_deployment_options(parser) + with open(SRC_FILE, "r") as src: - with open(DST_FILE, "w+") as dst: - data = src.read() - data = data.replace("REPLACE_IMAGE", os.environ.get("SERVICE")) - print("Deploying {}:\n{}".format(DST_FILE, data)) - dst.write(data) + raw_data = src.read() + raw_data = raw_data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + + data = yaml.safe_load(raw_data) + + image_fqdn = deployment_options.get_image_override(deploy_options, "bm-inventory", "SERVICE") + data["spec"]["template"]["spec"]["containers"][0]["image"] = image_fqdn + if deploy_options.subsystem_test: + if data["spec"]["template"]["spec"]["containers"][0].get("env", None) is None: + data["spec"]["template"]["spec"]["containers"][0]["env"] = [] + data["spec"]["template"]["spec"]["containers"][0]["env"].append({'name':'CLUSTER_MONITOR_INTERVAL', 'value': TEST_CLUSTER_MONITOR_INTERVAL}) + data["spec"]["template"]["spec"]["containers"][0]["env"].append({'name':'HOST_MONITOR_INTERVAL', 'value': TEST_HOST_MONITOR_INTERVAL}) + data["spec"]["template"]["spec"]["containers"][0]["imagePullPolicy"] = "Never" + else: + data["spec"]["template"]["spec"]["containers"][0]["imagePullPolicy"] = "Always" + + with open(DST_FILE, "w+") as dst: + yaml.dump(data, dst, default_flow_style=False) + print("Deploying {}".format(DST_FILE)) utils.apply(DST_FILE) diff --git a/tools/deploy_assisted_installer_configmap.py b/tools/deploy_assisted_installer_configmap.py index 90c7bf1a6..5be5bb9d5 100644 --- a/tools/deploy_assisted_installer_configmap.py +++ b/tools/deploy_assisted_installer_configmap.py @@ -1,19 +1,74 @@ import os import utils +import argparse +import yaml +import deployment_options + SRC_FILE = os.path.join(os.getcwd(), "deploy/bm-inventory-configmap.yaml") DST_FILE = os.path.join(os.getcwd(), "build/bm-inventory-configmap.yaml") SERVICE = "bm-inventory" + +def get_deployment_tag(args): + if args.deploy_manifest_tag: + return args.deploy_manifest_tag + if args.deploy_tag: + return args.deploy_tag + + +def handle_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument("--target") + parser.add_argument("--domain") + parser.add_argument("--base-dns-domains") + + return deployment_options.load_deployment_options(parser) + + def main(): - service_host = utils.get_service_host(SERVICE) - service_port = utils.get_service_port(SERVICE) + deploy_options = handle_arguments() + # TODO: delete once rename everything to assisted-installer + if deploy_options.target == "oc-ingress": + service_host = "assisted-installer.{}".format(utils.get_domain(deploy_options.domain)) + service_port = "80" + else: + service_host = utils.get_service_host(SERVICE, deploy_options.target, namespace=deploy_options.namespace) + service_port = utils.get_service_port(SERVICE, deploy_options.target, namespace=deploy_options.namespace) + with open(SRC_FILE, "r") as src: with open(DST_FILE, "w+") as dst: data = src.read() data = data.replace("REPLACE_URL", '"{}"'.format(service_host)) data = data.replace("REPLACE_PORT", '"{}"'.format(service_port)) - print("Deploying {}:\n{}".format(DST_FILE, data)) + data = data.replace("REPLACE_DOMAINS", '"{}"'.format(deploy_options.base_dns_domains)) + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + print("Deploying {}".format(DST_FILE)) + + versions = {"IMAGE_BUILDER": "installer-image-build", + "AGENT_DOCKER_IMAGE": "agent", + "KUBECONFIG_GENERATE_IMAGE": "ignition-manifests-and-kubeconfig-generate", + "INSTALLER_IMAGE": "assisted-installer", + "CONTROLLER_IMAGE": "assisted-installer-controller", + "CONNECTIVITY_CHECK_IMAGE": "connectivity_check", + "INVENTORY_IMAGE": "inventory"} + for env_var_name, image_short_name in versions.items(): + image_fqdn = deployment_options.get_image_override(deploy_options, image_short_name, env_var_name) + versions[env_var_name] = image_fqdn + + # Edge case for controller image override + if os.environ.get("INSTALLER_IMAGE") and not os.environ.get("CONTROLLER_IMAGE"): + versions["CONTROLLER_IMAGE"] = deployment_options.IMAGE_FQDN_TEMPLATE.format("assisted-installer-controller", + deployment_options.get_tag(versions["INSTALLER_IMAGE"])) + + versions["SELF_VERSION"] = deployment_options.get_image_override(deploy_options, "bm-inventory", "SERVICE") + deploy_tag = get_deployment_tag(deploy_options) + if deploy_tag: + versions["RELEASE_TAG"] = deploy_tag + + y = yaml.load(data) + y['data'].update(versions) + data = yaml.dump(y) dst.write(data) utils.apply(DST_FILE) diff --git a/tools/deploy_grafana.py b/tools/deploy_grafana.py new file mode 100644 index 000000000..7b971b9a5 --- /dev/null +++ b/tools/deploy_grafana.py @@ -0,0 +1,195 @@ +''' +This script deploy Grafana instance of it on K8s and OCP +n the OCP case, it will be integrated automatically with OCP oauth. +''' + +import os +import sys +from time import sleep +import argparse +import secrets +import utils +import deployment_options + +parser = argparse.ArgumentParser() +parser.add_argument("--target") +deploy_options = deployment_options.load_deployment_options(parser) + + +if deploy_options.target != "oc-ingress": + CMD_BIN = 'kubectl' +else: + CMD_BIN = 'oc' + +def deploy_oauth_reqs(): + '''oauth Integration in OCP''' + # Token generation for session_secret + session_secret = secrets.token_hex(43) + secret_name = 'grafana-proxy' + if not utils.check_if_exists('secret', secret_name, deploy_options.namespace): + cmd = "{} -n {} create secret generic {} --from-literal=session_secret={}"\ + .format(CMD_BIN, deploy_options.namespace, secret_name, session_secret) + utils.check_output(cmd) + + ## Create and Annotate Serviceaccount + sa_name = 'grafana' + if not utils.check_if_exists('sa', sa_name, deploy_options.namespace): + cmd = "{} -n {} create serviceaccount {} ".format(CMD_BIN, deploy_options.namespace, sa_name) + utils.check_output(cmd) + json_manifest = '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"grafana"}}' + annotation_name = 'serviceaccounts.openshift.io/oauth-redirectreference.grafana' + cmd = "{} -n {} annotate serviceaccount {} --overwrite {}='{}'".format( + CMD_BIN, deploy_options.namespace, sa_name, annotation_name, json_manifest) + utils.check_output(cmd) + + # Get OCP Certificate + if not utils.check_if_exists('secret', 'openshift-custom-ca', deploy_options.namespace): + secret_name = 'router-certs-default' + namespace = 'openshift-ingress' + template = '{{index .data "tls.crt"}}' + cmd = "{} get secret {} --namespace={} --template '{}'".format(CMD_BIN, secret_name, namespace, template) + ca_cert = utils.check_output(cmd) + + # Renderized secret with CA Certificate of the OCP Cluster + src_file = os.path.join(os.getcwd(),\ + "deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-custom-ca.yaml") + dst_file = os.path.join(os.getcwd(),\ + "build/assisted-installer-ocp-prometheus-custom-ca.yaml") + topic = 'OCP Custom CA' + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: + data = src.read() + data = data.replace("BASE64_CERT", ca_cert) + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + print("Deploying {}: {}".format(topic, dst_file)) + dst.write(data) + utils.apply(dst_file) + + +def deployer(src_file, topic): + '''Wrapper for oc/kubectl apply -f''' + src_file = os.path.join(os.getcwd(), src_file) + dst_file = os.path.join(os.getcwd(), 'build', os.path.basename(src_file)) + with open(src_file) as fp: + data = fp.read() + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + with open(dst_file, 'w') as fp: + fp.write(data) + print("Deploying {}: {}".format(topic ,dst_file)) + utils.apply(dst_file) + + +def deploy_grafana_route(): + '''Deploy Grafana Route''' + topic = 'Grafana Route' + src_file = os.path.join(os.getcwd(),\ + "deploy/monitoring/grafana/assisted-installer-ocp-grafana-route.yaml") + dst_file = os.path.join(os.getcwd(),\ + "build/assisted-installer-ocp-grafana-route.yaml") + try: + # I have permissions + ingress_domain = utils.get_domain(namespace=deploy_options.namespace) + except: + # I have not permissions, yes it's ugly... + # This ingress should be there because of UI deployment + json_path_ingress = '{.spec.rules[0].host}' + cmd = "{} -n {} get ingress assisted-installer -o jsonpath='{}'".format( + CMD_BIN, deploy_options.namespace, json_path_ingress) + assisted_installer_ingress_domain = utils.check_output(cmd) + if assisted_installer_ingress_domain.split(".")[0] != 'assisted-installer': + print("Error recovering the ingress route") + sys.exit(1) + ingress_domain = assisted_installer_ingress_domain.split(".", maxsplit=1)[1] + + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: + data = src.read() + data = data.replace("INGRESS_DOMAIN", ingress_domain) + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + print("Deploying {}: {}".format(topic, dst_file)) + dst.write(data) + utils.apply(dst_file) + + +def deploy_grafana_ds(): + '''Deploy grafana daemonSet''' + secret_name = 'grafana-datasources' + src_file = os.path.join(os.getcwd(), "deploy/monitoring/grafana/prometheus.json") + dst_file = os.path.join(os.getcwd(), "build/prometheus.json") + with open(src_file) as fp: + data = fp.read() + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + with open(dst_file, 'w') as fp: + fp.write(data) + if not utils.check_if_exists('secret', secret_name, deploy_options.namespace): + print("Creating Grafana Datasource") + cmd = "{} create secret generic {} --namespace={} --from-file=prometheus.yaml={}".format(CMD_BIN, secret_name, deploy_options.namespace, dst_file) + utils.check_output(cmd) + + +def deploy_grafana_config(conf_file): + '''Deploy Grafana ConfigMap''' + secret_name = 'grafana-config' + src_file = os.path.join(os.getcwd(), "deploy/monitoring/grafana/" + conf_file) + dst_file = os.path.join(os.getcwd(), conf_file) + with open(src_file) as fp: + data = fp.read() + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + with open(dst_file, 'w') as fp: + fp.write(data) + if not utils.check_if_exists('secret', secret_name, deploy_options.namespace): + print("Creating Grafana Configuration") + cmd = "{} create secret generic {} --namespace={} --from-file=grafana.ini={}".format(CMD_BIN, secret_name, deploy_options.namespace, dst_file) + utils.check_output(cmd) + else: + print("Updating Grafana Configuration") + cmd = "{} delete secret {} --namespace={}".format(CMD_BIN, secret_name, deploy_options.namespace) + utils.check_output(cmd) + cmd = "{} create secret generic {} --namespace={} --from-file=grafana.ini={}".format(CMD_BIN, secret_name, deploy_options.namespace, dst_file) + utils.check_output(cmd) + + +def main(): + '''Deploy Grafana for Assisted Installer''' + if deploy_options.target != "oc-ingress": + # Deploy grafana configuration + grafana_conf_file = 'grafana-k8s.ini' + deploy_grafana_config(grafana_conf_file) + # Deploy grafana DS + deploy_grafana_ds() + # Deploy Dashboards + deployer('deploy/monitoring/grafana/grafana-dashboards.yaml', + 'Grafana Dashboards') + # Deploy Assisted Installer Dashboard + deployer('deploy/monitoring/grafana/assisted-installer-grafana-dashboard.yaml', + 'Grafana Assisted Installer Dashboard') + # Deploy Grafana + deployer('deploy/monitoring/grafana/assisted-installer-k8s-grafana.yaml', + 'Grafana Instance on K8s') + sleep(10) + utils.check_k8s_rollout('deployment', 'grafana', deploy_options.namespace) + else: + # Deploy Oauth Pre-reqs for OCP integration + deploy_oauth_reqs() + # Deploy grafana configuration + grafana_conf_file = 'grafana.ini' + deploy_grafana_config(grafana_conf_file) + # Deploy grafana DS + deploy_grafana_ds() + # Deploy Dashboards + deployer('deploy/monitoring/grafana/grafana-dashboards.yaml', + 'Grafana Dashboards') + # Deploy Assisted Installer Dashboard + deployer('deploy/monitoring/grafana/assisted-installer-grafana-dashboard.yaml', + 'Grafana Assisted Installer Dashboard') + # Deploy Grafana + deployer('deploy/monitoring/grafana/assisted-installer-ocp-grafana.yaml', + 'Grafana Instance on OCP') + sleep(10) + utils.check_k8s_rollout('deployment', 'grafana', deploy_options.namespace) + # Deploy grafana Route + deploy_grafana_route() + + +if __name__ == "__main__": + main() diff --git a/tools/deploy_inventory_service.py b/tools/deploy_inventory_service.py index ded0f6171..0ae4ed7be 100644 --- a/tools/deploy_inventory_service.py +++ b/tools/deploy_inventory_service.py @@ -1,18 +1,58 @@ +import argparse import os -import sys + +import deploy_tls_secret +import deployment_options import utils -SRC_FILE = os.path.join(os.getcwd(), "deploy/bm-inventory-service.yaml") -DST_FILE = os.path.join(os.getcwd(), "build/bm-inventory-service.yaml") def main(): - with open(SRC_FILE, "r") as src: - with open(DST_FILE, "w+") as dst: + parser = argparse.ArgumentParser() + parser.add_argument("--target") + parser.add_argument("--domain") + parser.add_argument('--enable-tls', action='store_true', default=False) + deploy_options = deployment_options.load_deployment_options(parser) + + src_file = os.path.join(os.getcwd(), "deploy/bm-inventory-service.yaml") + dst_file = os.path.join(os.getcwd(), "build/bm-inventory-service.yaml") + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: data = src.read() - print("Deploying {}:\n{}".format(DST_FILE, data)) + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + print("Deploying {}".format(dst_file)) dst.write(data) - utils.apply(DST_FILE) + utils.apply(dst_file) + + # in case of OpenShift deploy ingress as well + if deploy_options.target == "oc-ingress": + hostname = utils.get_service_host("assisted-installer", deploy_options.target, deploy_options.domain, + deploy_options.namespace) + + if deploy_options.enable_tls: + print("WARNING: To change TLS redirection behavior update " + "spec/tls/insecureEdgeTerminationPolicy (None|Allow|Redirect) " + "in the corresponding OpenShift route") + deploy_tls_secret.generate_secret(output_dir=os.path.join(os.getcwd(), "build"), + service="bm-inventory", san=hostname, namespace=deploy_options.namespace) + template = "assisted-installer-ingress-tls.yaml" + else: + template = "assisted-installer-ingress.yaml" + + deploy_ingress(hostname=hostname, namespace=deploy_options.namespace, template_file=template) + + +def deploy_ingress(hostname, namespace, template_file): + src_file = os.path.join(os.getcwd(), "deploy", template_file) + dst_file = os.path.join(os.getcwd(), "build", template_file) + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: + data = src.read() + data = data.replace('REPLACE_NAMESPACE', namespace) + data = data.replace("REPLACE_HOSTNAME", hostname) + print("Deploying {}".format(dst_file)) + dst.write(data) + utils.apply(dst_file) if __name__ == "__main__": diff --git a/tools/deploy_mariadb.py b/tools/deploy_mariadb.py index 125d174c4..74392ea1e 100644 --- a/tools/deploy_mariadb.py +++ b/tools/deploy_mariadb.py @@ -1,29 +1,50 @@ import os -import sys import utils - +import deployment_options def main(): - SRC_FILE = os.path.join(os.getcwd(), "deploy/mariadb/mariadb-configmap.yaml") - DST_FILE = os.path.join(os.getcwd(), "build/mariadb-configmap.yaml") - with open(SRC_FILE, "r") as src: - with open(DST_FILE, "w+") as dst: + deploy_options = deployment_options.load_deployment_options() + + src_file = os.path.join(os.getcwd(), "deploy/mariadb/mariadb-configmap.yaml") + dst_file = os.path.join(os.getcwd(), "build/mariadb-configmap.yaml") + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: data = src.read() - print("Deploying {}:\n{}".format(DST_FILE, data)) + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + print("Deploying {}".format(dst_file)) dst.write(data) - utils.apply(DST_FILE) + utils.apply(dst_file) + + src_file = os.path.join(os.getcwd(), "deploy/mariadb/mariadb-deployment.yaml") + dst_file = os.path.join(os.getcwd(), "build/mariadb-deployment.yaml") + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: + data = src.read() + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + print("Deploying {}".format(dst_file)) + dst.write(data) + utils.apply(dst_file) - SRC_FILE = os.path.join(os.getcwd(), "deploy/mariadb/mariadb-deployment.yaml") - DST_FILE = os.path.join(os.getcwd(), "build/mariadb-deployment.yaml") - with open(SRC_FILE, "r") as src: - with open(DST_FILE, "w+") as dst: + src_file = os.path.join(os.getcwd(), "deploy/mariadb/mariadb-storage.yaml") + dst_file = os.path.join(os.getcwd(), "build/mariadb-storage.yaml") + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: data = src.read() - print("Deploying {}:\n{}".format(DST_FILE, data)) + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + try: + size = utils.check_output( + f"kubectl -n {deploy_options.namespace} get persistentvolumeclaims mariadb-pv-claim " + + "-o=jsonpath='{.status.capacity.storage}'") + print("Using existing disk size", size) + except: + size = "10Gi" + print("Using default size", size) + data = data.replace("REPLACE_STORAGE", size) + print("Deploying {}".format(dst_file)) dst.write(data) - utils.apply("deploy/mariadb/mariadb-storage.yaml") - utils.apply(DST_FILE) + utils.apply(dst_file) if __name__ == "__main__": diff --git a/tools/deploy_namespace.py b/tools/deploy_namespace.py index 3768a927d..0a02d3532 100644 --- a/tools/deploy_namespace.py +++ b/tools/deploy_namespace.py @@ -1,16 +1,28 @@ import os import utils +import argparse +import deployment_options + def main(): - SRC_FILE = os.path.join(os.getcwd(), "deploy/namespace/namespace.yaml") - DST_FILE = os.path.join(os.getcwd(), "build/namespace.yaml") - with open(SRC_FILE, "r") as src: - with open(DST_FILE, "w+") as dst: + parser = argparse.ArgumentParser() + parser.add_argument("--deploy-namespace", type=lambda x: (str(x).lower() == 'true'), default=True) + deploy_options = deployment_options.load_deployment_options(parser) + + if deploy_options.deploy_namespace is False: + print("Not deploying namespace") + return + src_file = os.path.join(os.getcwd(), "deploy/namespace/namespace.yaml") + dst_file = os.path.join(os.getcwd(), "build/namespace.yaml") + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: data = src.read() - print("Deploying {}:\n{}".format(DST_FILE, data)) + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + print("Deploying {}".format(dst_file)) dst.write(data) - utils.apply(DST_FILE) + utils.apply(dst_file) + if __name__ == "__main__": main() diff --git a/tools/deploy_olm.py b/tools/deploy_olm.py new file mode 100644 index 000000000..eeb5de21e --- /dev/null +++ b/tools/deploy_olm.py @@ -0,0 +1,44 @@ +import os +import utils +import argparse +from urllib.request import urlretrieve +from urllib.parse import urlparse + +parser = argparse.ArgumentParser() +parser.add_argument("--target") +args = parser.parse_args() + +def check_deployment(): + # Checks + print("Checking OLM deployment") + deployments = ['olm-operator', 'catalog-operator', 'packageserver'] + for deployment in deployments: + utils.wait_for_rollout('deployment', deployment, namespace='olm') + + +def main(): + ## Main OLM Manifest for K8s + if args.target != "oc-ingress": + # K8s + deployed = utils.check_if_exists('namespace', 'olm', namespace='olm') + if not deployed: + olm_manifests = [ + "https://github.com/operator-framework/operator-lifecycle-manager/releases/download/0.15.1/crds.yaml", + "https://github.com/operator-framework/operator-lifecycle-manager/releases/download/0.15.1/olm.yaml" + ] + for manifest_url in olm_manifests: + file_name = "build/{}".format(os.path.basename(urlparse(manifest_url).path)) + dst_file = os.path.join(os.getcwd(), file_name) + print("Deploying {}".format(dst_file)) + urlretrieve(manifest_url, dst_file) + utils.apply(dst_file) + + check_deployment() + + else: + # OCP + print("OLM Deployment not necessary") + + +if __name__ == "__main__": + main() diff --git a/tools/deploy_postgres.py b/tools/deploy_postgres.py new file mode 100644 index 000000000..f48f47e53 --- /dev/null +++ b/tools/deploy_postgres.py @@ -0,0 +1,52 @@ +import os +import utils +import deployment_options + + +def main(): + deploy_options = deployment_options.load_deployment_options() + + src_file = os.path.join(os.getcwd(), "deploy/postgres/postgres-secret.yaml") + dst_file = os.path.join(os.getcwd(), "build/postgres-secret.yaml") + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: + data = src.read() + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + print("Deploying {}".format(dst_file)) + dst.write(data) + + utils.apply(dst_file) + + src_file = os.path.join(os.getcwd(), "deploy/postgres/postgres-deployment.yaml") + dst_file = os.path.join(os.getcwd(), "build/postgres-deployment.yaml") + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: + data = src.read() + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + print("Deploying {}".format(dst_file)) + dst.write(data) + utils.apply(dst_file) + + src_file = os.path.join(os.getcwd(), "deploy/postgres/postgres-storage.yaml") + dst_file = os.path.join(os.getcwd(), "build/postgres-storage.yaml") + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: + data = src.read() + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + try: + size = utils.check_output( + "kubectl -n assisted-installer get persistentvolumeclaims postgres-pv-claim " + + "-o=jsonpath='{.status.capacity.storage}'") + print("Using existing disk size", size) + except: + size = "10Gi" + print("Using default size", size) + data = data.replace("REPLACE_STORAGE", size) + print("Deploying {}".format(dst_file)) + dst.write(data) + + utils.apply(dst_file) + + +if __name__ == "__main__": + main() diff --git a/tools/deploy_prometheus.py b/tools/deploy_prometheus.py new file mode 100644 index 000000000..bdfe009f3 --- /dev/null +++ b/tools/deploy_prometheus.py @@ -0,0 +1,193 @@ +''' +This script deploy Prometheus Operator and the instance of it on K8s and OCP +n the OCP case, it will be integrated automatically with OCP oauth. +''' + +import os +import sys +from time import sleep +import argparse +import secrets +import utils +import deployment_options + +parser = argparse.ArgumentParser() +parser.add_argument("--target") +deploy_options = deployment_options.load_deployment_options(parser) + + +if deploy_options.target != "oc-ingress": + CMD_BIN = 'kubectl' + OLM_NS = 'olm' + CAT_SRC = 'operatorhubio-catalog' +else: + CMD_BIN = 'oc' + OLM_NS = 'openshift-marketplace' + CAT_SRC = 'community-operators' + +def deploy_oauth_reqs(): + '''oauth Integration in OCP''' + ## Token generation for session_secret + session_secret = secrets.token_hex(43) + secret_name = 'prometheus-k8s-proxy' + if not utils.check_if_exists('secret', secret_name, deploy_options.namespace): + cmd = "{} -n {} create secret generic {} --from-literal=session_secret={}" \ + .format(CMD_BIN, deploy_options.namespace, secret_name, session_secret) + utils.check_output(cmd) + + ## Annotate Serviceaccount + json_manifest = '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus-assisted"}}' + sa_name = 'prometheus-k8s' + annotation_name = 'serviceaccounts.openshift.io/oauth-redirectreference.assisted-installer-prometheus' + cmd = "{} -n {} annotate serviceaccount {} --overwrite {}='{}'"\ + .format(CMD_BIN, deploy_options.namespace, sa_name, annotation_name, json_manifest) + utils.check_output(cmd) + + # Download OCP Certificate as a secret + cert_secret_name = 'openshift-custom-ca' + cmd = "{} -n {} get secret {} --no-headers".format(CMD_BIN, deploy_options.namespace, cert_secret_name) + cert_secret = utils.check_output(cmd) + if not cert_secret: + # Get OCP Certificate + secret_name = 'router-certs-default' + namespace = 'openshift-ingress' + template = '{{index .data "tls.crt"}}' + cmd = "{} get secret {} --namespace={} --template '{}'"\ + .format(CMD_BIN, secret_name, namespace, template) + ca_cert = utils.check_output(cmd) + + # Renderized secret with CA Certificate of the OCP Cluster + src_file = os.path.join(os.getcwd(), \ + "deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-custom-ca.yaml") + dst_file = os.path.join(os.getcwd(), \ + "build/assisted-installer-ocp-prometheus-custom-ca.yaml") + topic = 'OCP Custom CA' + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: + data = src.read() + data = data.replace("BASE64_CERT", ca_cert) + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + print("Deploying {}: {}".format(topic, dst_file)) + dst.write(data) + utils.apply(dst_file) + else: + print("Secret {} already exists", cert_secret_name) + + +def deploy_prometheus_route(): + '''Deploy Prometheus Route''' + topic = 'Prometheus Operator Route' + src_file = os.path.join(os.getcwd(),\ + "deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-route.yaml") + dst_file = os.path.join(os.getcwd(),\ + "build/assisted-installer-ocp-prometheus-route.yaml") + try: + # I have permissions + ingress_domain = utils.get_domain() + except: + # I have not permissions, yes it's ugly... + # This ingress should be there because of UI deployment + json_path_ingress = '{.spec.rules[0].host}' + cmd = "{} -n {} get ingress assisted-installer -o jsonpath='{}'".format( + CMD_BIN, deploy_options.namespace, json_path_ingress) + assisted_installer_ingress_domain = utils.check_output(cmd) + if assisted_installer_ingress_domain.split(".")[0] != 'assisted-installer': + print("Error recovering the ingress route") + sys.exit(1) + + ingress_domain = assisted_installer_ingress_domain.split(".", maxsplit=1)[1] + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: + data = src.read() + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + data = data.replace("INGRESS_DOMAIN", ingress_domain) + print("Deploying {}: {}".format(topic, dst_file)) + dst.write(data) + utils.apply(dst_file) + + +def deploy_prometheus_sub(olm_ns, cat_src): + '''Deploy Operator Subscription''' + topic = 'Prometheus Operator Subscription' + src_file = os.path.join(os.getcwd(),\ + "deploy/monitoring/prometheus/assisted-installer-operator-subscription.yaml") + dst_file = os.path.join(os.getcwd(),\ + "build/assisted-installer-operator-subscription.yaml") + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: + data = src.read() + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + data = data.replace("CAT_SRC", cat_src).replace("OLM_NAMESPACE", olm_ns) + print("Deploying {}: {}".format(topic, dst_file)) + dst.write(data) + utils.apply(dst_file) + utils.wait_for_rollout('deployment', 'prometheus-operator', deploy_options.namespace) + + +def deployer(src_file, topic): + '''Wrapper for oc/kubectl apply -f''' + src_file = os.path.join(os.getcwd(), src_file) + dst_file = os.path.join(os.getcwd(), 'build', os.path.basename(src_file)) + with open(src_file) as fp: + data = fp.read() + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + with open(dst_file, 'w') as fp: + fp.write(data) + print("Deploying {}: {}".format(topic ,dst_file)) + utils.apply(dst_file) + + + +def main(): + '''Deploy Prometheus operator and Instance ''' + if deploy_options.target != "oc-ingress": + # Deploy Operator Group + deployer('deploy/monitoring/prometheus/assisted-installer-operator-group.yaml', + 'OperatorGroup') + # Deploy Subscription + deploy_prometheus_sub(OLM_NS, CAT_SRC) + # Deploy Prom svc + deployer('deploy/monitoring/prometheus/assisted-installer-k8s-prometheus-svc.yaml', + 'Prometheus Service') + # Deploy Prometheus Instance + deployer('deploy/monitoring/prometheus/assisted-installer-k8s-prometheus-subscription-instance.yaml', + 'Prometheus Instance on K8s') + sleep(10) + utils.check_k8s_rollout('statefulset', 'prometheus-assisted-installer-prometheus', deploy_options.namespace) + # Deploy Prom svc Monitor + deployer('deploy/monitoring/prometheus/assisted-installer-prometheus-svc-monitor.yaml', + 'Prometheus Service Monitor') + else: + # Deploy Operator Group + try: + deployer('deploy/monitoring/prometheus/assisted-installer-operator-group.yaml', + 'OperatorGroup') + except: + cmd = "{} -n {} get OperatorGroup --no-headers".format(CMD_BIN, deploy_options.namespace) + if not utils.check_output(cmd): + print("The creation of an OperatorGroup is Forbidden for you user please request a creation of one before execute this again, exiting...") + sys.exit(1) + else: + print("Another OperatorGroup exists, continuing") + # Deploy Subscription + deploy_prometheus_sub(OLM_NS, CAT_SRC) + # Deploy Oauth Pre-reqs for OCP integration + deploy_oauth_reqs() + # Deploy Prom svc; + # We create the service first in order to self-generate the secret prometheus-k8s-tls + deployer('deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-svc.yaml', + 'Prometheus Service on OCP') + # Deploy Prometheus Instance + deployer('deploy/monitoring/prometheus/assisted-installer-ocp-prometheus-subscription-instance.yaml', + 'Prometheus Instance on OCP') + sleep(10) + utils.check_k8s_rollout('statefulset', 'prometheus-assisted-installer-prometheus', deploy_options.namespace) + # Deploy Prom svc Monitor + deployer('deploy/monitoring/prometheus/assisted-installer-prometheus-svc-monitor.yaml', + 'Prometheus Service Monitor') + # Deploy Prometheus Route + deploy_prometheus_route() + + +if __name__ == "__main__": + main() diff --git a/tools/deploy_role.py b/tools/deploy_role.py index 24d9022b9..b50e48f6b 100644 --- a/tools/deploy_role.py +++ b/tools/deploy_role.py @@ -1,18 +1,22 @@ import os -import sys import utils +import deployment_options -SRC_FILE = os.path.join(os.getcwd(), "deploy/roles/role_binding.yaml") -DST_FILE = os.path.join(os.getcwd(), "build/role_binding.yaml") def main(): - with open(SRC_FILE, "r") as src: - with open(DST_FILE, "w+") as dst: + deploy_options = deployment_options.load_deployment_options() + + src_file = os.path.join(os.getcwd(), "deploy/roles/default_role.yaml") + dst_file = os.path.join(os.getcwd(), "build/default_role.yaml") + + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: data = src.read() - print("Deploying {}:\n{}".format(DST_FILE, data)) + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + print("Deploying {}".format(dst_file)) dst.write(data) - utils.apply(DST_FILE) + utils.apply(dst_file) if __name__ == "__main__": diff --git a/tools/deploy_route53.py b/tools/deploy_route53.py new file mode 100644 index 000000000..d9e9026a9 --- /dev/null +++ b/tools/deploy_route53.py @@ -0,0 +1,34 @@ +import os +import utils +import argparse +import deployment_options + +parser = argparse.ArgumentParser() +parser.add_argument("--secret") +deploy_options = deployment_options.load_deployment_options(parser) + + +def deploy_secret(): + if deploy_options.secret is "": + return + + # Renderized secret with specified secret + src_file = os.path.join(os.getcwd(), "deploy/route53/route53-secret.yaml") + dst_file = os.path.join(os.getcwd(), "build/route53-secret.yaml") + topic = 'Route53 Secret' + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: + data = src.read() + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + data = data.replace("BASE64_CREDS", deploy_options.secret) + print("Deploying {}: {}".format(topic, dst_file)) + dst.write(data) + utils.apply(dst_file) + + +def main(): + deploy_secret() + + +if __name__ == "__main__": + main() diff --git a/tools/deploy_s3.py b/tools/deploy_s3.py index 6fa98c86f..523962b4d 100644 --- a/tools/deploy_s3.py +++ b/tools/deploy_s3.py @@ -1,18 +1,29 @@ import os -import sys import utils +import deployment_options def main(): - SRC_FILE = os.path.join(os.getcwd(), "deploy/s3/scality-deployment.yaml") - DST_FILE = os.path.join(os.getcwd(), "build/scality-deployment.yaml") - with open(SRC_FILE, "r") as src: - with open(DST_FILE, "w+") as dst: + deploy_options = deployment_options.load_deployment_options() + + src_file = os.path.join(os.getcwd(), "deploy/s3/scality-deployment.yaml") + dst_file = os.path.join(os.getcwd(), "build/scality-deployment.yaml") + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: data = src.read() - print("Deploying {}:\n{}".format(DST_FILE, data)) + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + print("Deploying {}".format(dst_file)) dst.write(data) + utils.apply(dst_file) - utils.apply("deploy/s3/scality-storage.yaml") - utils.apply(DST_FILE) + src_file = os.path.join(os.getcwd(), "deploy/s3/scality-storage.yaml") + dst_file = os.path.join(os.getcwd(), "build/scality-storage.yaml") + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: + data = src.read() + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + print("Deploying {}".format(dst_file)) + dst.write(data) + utils.apply(dst_file) if __name__ == "__main__": diff --git a/tools/deploy_s3_object_expirer.py b/tools/deploy_s3_object_expirer.py deleted file mode 100644 index f43d1f0de..000000000 --- a/tools/deploy_s3_object_expirer.py +++ /dev/null @@ -1,20 +0,0 @@ -import os -import sys -import utils - -SRC_FILE = os.path.join(os.getcwd(), "deploy/s3/s3-object-expirer-cron.yaml") -DST_FILE = os.path.join(os.getcwd(), "build/s3-object-expirer-cron.yaml") - -def main(): - with open(SRC_FILE, "r") as src: - with open(DST_FILE, "w+") as dst: - data = src.read() - data = data.replace("REPLACE_IMAGE", os.environ.get("OBJEXP")) - print("Deploying {}:\n{}".format(DST_FILE, data)) - dst.write(data) - - utils.apply(DST_FILE) - - -if __name__ == "__main__": - main() diff --git a/tools/deploy_scality_configmap.py b/tools/deploy_scality_configmap.py index d5e34c9c9..b04c4fd67 100644 --- a/tools/deploy_scality_configmap.py +++ b/tools/deploy_scality_configmap.py @@ -1,23 +1,21 @@ import os -import sys import utils - -SRC_FILE = os.path.join(os.getcwd(), "deploy/s3/scality-configmap.yaml") -DST_FILE = os.path.join(os.getcwd(), "build/scality-configmap.yaml") -SERVICE = 'scality' +import deployment_options def main(): - scality_url = utils.get_service_url(SERVICE) - scality_host = utils.get_service_host(SERVICE) - with open(SRC_FILE, "r") as src: - with open(DST_FILE, "w+") as dst: + deploy_options = deployment_options.load_deployment_options() + + src_file = os.path.join(os.getcwd(), "deploy/s3/scality-secret.yaml") + dst_file = os.path.join(os.getcwd(), "build/scality-secret.yaml") + scality_url = "http://cloudserver-front:8000" + with open(src_file, "r") as src: + with open(dst_file, "w+") as dst: data = src.read() - data = data.replace('REPLACE_URL', scality_url) - data = data.replace('REPLACE_HOST_NAME', scality_host) - print("Deploying {}:\n{}".format(DST_FILE, data)) + data = data.replace('REPLACE_NAMESPACE', deploy_options.namespace) + print("Deploying {}".format(dst_file)) dst.write(data) - utils.apply(DST_FILE) + utils.apply(dst_file) if __name__ == "__main__": diff --git a/tools/deploy_tls_secret.py b/tools/deploy_tls_secret.py new file mode 100644 index 000000000..e0f1e8212 --- /dev/null +++ b/tools/deploy_tls_secret.py @@ -0,0 +1,75 @@ +import argparse +import os +import textwrap + +import deployment_options +import utils + + +def get_ca(output_dir, force_replace=False): + ca_subject = "/CN=Assisted Installer" + ca_expiration = 365 + + ca_csr_path = os.path.join(output_dir, "ca.csr") + ca_key_path = os.path.join(output_dir, "ca-key.pem") + + if force_replace or not os.path.exists(ca_csr_path): + print(utils.check_output(f'openssl req -x509 -nodes -subj "{ca_subject}" -days {ca_expiration} ' + f'-newkey rsa:4096 -keyout "{ca_key_path}" -outform PEM -out "{ca_csr_path}"')) + + return ca_csr_path, ca_key_path + + +def generate_secret(output_dir, service, san, namespace, expiration=120, keep_files=False): + ca_csr_path, ca_key_path = get_ca(output_dir) + server_csr_path = os.path.join(output_dir, f'{service}.csr') + server_key_path = os.path.join(output_dir, f'{service}-key.pem') + + print(utils.check_output(f'openssl req -new -newkey rsa:2048 -nodes -subj "/CN={service}" ' + f'-keyout "{server_key_path}" -out "{server_csr_path}"')) + + server_cert_path = os.path.join(output_dir, f'{service}.crt') + ext_file = os.path.join(output_dir, f'{service}-tls-ext.conf') + with open(ext_file, "w") as f: + f.write(f'subjectAltName=DNS:{san}') + + print(utils.check_output(f'openssl x509 -req -days {expiration} ' + f'-extfile "{ext_file}" ' + f'-CAcreateserial -CA "{ca_csr_path}" -CAkey "{ca_key_path}" ' + f'-in "{server_csr_path}" -outform PEM -out "{server_cert_path}"')) + + secret_name = f'{service}-tls' + print(utils.check_output(textwrap.dedent(f""" + cat <