From b6c0d8c1e74d74ea007e11a4049d23b96d67bdb8 Mon Sep 17 00:00:00 2001 From: Alex Crawford Date: Wed, 26 Sep 2018 15:11:46 -0700 Subject: [PATCH] installer: remove package This is the last of the old installer code. It's no longer used and can be removed. --- .gitignore | 43 -- CONTRIBUTING.md | 1 - examples/aws.yaml | 234 ------- examples/libvirt.yaml | 103 --- hack/test-bazel-build-tarball.sh | 12 - installer/cmd/tectonic/main.go | 54 -- .../fixtures/generated/tls/aggregator-ca.crt | 20 - .../fixtures/kube-system.yaml | 109 ---- .../config-generator/fixtures/test-aws.yaml | 40 -- .../pkg/config-generator/fixtures/test.yaml | 11 - installer/pkg/config-generator/generator.go | 521 --------------- .../pkg/config-generator/generator_test.go | 171 ----- installer/pkg/config-generator/ignition.go | 146 ----- installer/pkg/config-generator/tls.go | 406 ------------ installer/pkg/copy/copy.go | 26 - installer/pkg/copy/copy_test.go | 38 -- installer/pkg/validate/fixtures/exists | 0 installer/pkg/validate/last_ip_test.go | 49 -- installer/pkg/validate/validate.go | 470 -------------- installer/pkg/validate/validate_test.go | 612 ------------------ installer/pkg/workflow/convert.go | 43 -- installer/pkg/workflow/destroy.go | 160 ----- .../pkg/workflow/fixtures/aws.basic.yaml | 39 -- .../pkg/workflow/fixtures/terraform.tfvars | 27 - installer/pkg/workflow/init.go | 122 ---- installer/pkg/workflow/init_test.go | 126 ---- installer/pkg/workflow/install.go | 92 --- installer/pkg/workflow/utils.go | 102 --- installer/pkg/workflow/workflow.go | 49 -- installer/pkg/workflow/workflow_test.go | 53 -- 30 files changed, 3879 deletions(-) delete mode 100644 examples/aws.yaml delete mode 100644 examples/libvirt.yaml delete mode 100755 hack/test-bazel-build-tarball.sh delete mode 100644 installer/cmd/tectonic/main.go delete mode 100644 installer/pkg/config-generator/fixtures/generated/tls/aggregator-ca.crt delete mode 100644 installer/pkg/config-generator/fixtures/kube-system.yaml delete mode 100644 installer/pkg/config-generator/fixtures/test-aws.yaml delete mode 100644 installer/pkg/config-generator/fixtures/test.yaml delete mode 100644 installer/pkg/config-generator/generator.go delete mode 100644 installer/pkg/config-generator/generator_test.go delete mode 100644 installer/pkg/config-generator/ignition.go delete mode 100644 installer/pkg/config-generator/tls.go delete mode 100644 installer/pkg/copy/copy.go delete mode 100644 installer/pkg/copy/copy_test.go delete mode 100644 installer/pkg/validate/fixtures/exists delete mode 100644 installer/pkg/validate/last_ip_test.go delete mode 100644 installer/pkg/validate/validate.go delete mode 100644 installer/pkg/validate/validate_test.go delete mode 100644 installer/pkg/workflow/convert.go delete mode 100644 installer/pkg/workflow/destroy.go delete mode 100644 installer/pkg/workflow/fixtures/aws.basic.yaml delete mode 100644 installer/pkg/workflow/fixtures/terraform.tfvars delete mode 100644 installer/pkg/workflow/init.go delete mode 100644 installer/pkg/workflow/init_test.go delete mode 100644 installer/pkg/workflow/install.go delete mode 100644 installer/pkg/workflow/utils.go delete mode 100644 installer/pkg/workflow/workflow.go delete mode 100644 installer/pkg/workflow/workflow_test.go diff --git a/.gitignore b/.gitignore index d719d0491d6..6c47db76625 100644 --- a/.gitignore +++ b/.gitignore @@ -16,46 +16,3 @@ bin_test/ matchbox/ /contrib/govcloud/vpn.conf tectonic-dev - -# non-default Bazel stuff -.build/ -.cache - -# Created by https://www.gitignore.io/api/go,bazel,terraform -# HOWEVER, I had to remove the ignore for `vendor/` - -### Bazel ### -/bazel-* - -### Go ### -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -### Terraform ### -# Local .terraform directories -**/.terraform/* - -# .tfstate files -*.tfstate -*.tfstate.* - -# Crash log files -crash.log - -# Ignore any .tfvars files that are generated automatically for each Terraform run. Most -# .tfvars files are managed as part of configuration and so should be included in -# version control. -# -# example.tfvars - -# End of https://www.gitignore.io/api/go,bazel,terraform diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 24ee0b39fc6..e9e9029e5a5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -40,7 +40,6 @@ For contributors who want to work up pull requests, the workflow is roughly: hack/go-lint.sh $(go list -f '{{ .ImportPath }}' ./...) hack/go-vet.sh ./... hack/shellcheck.sh - hack/test-bazel-build-tarball.sh hack/tf-fmt.sh -list -check hack/tf-lint.sh hack/yaml-lint.sh diff --git a/examples/aws.yaml b/examples/aws.yaml deleted file mode 100644 index 676d7f52dec..00000000000 --- a/examples/aws.yaml +++ /dev/null @@ -1,234 +0,0 @@ -admin: - email: "a@b.c" - password: "verysecure" - sshKey: "ssh-ed25519 AAAA..." -aws: - # (optional) AMI override for all nodes. Example: `ami-foobar123`. - # ec2AMIOverride: - - external: - # (optional) List of subnet IDs within an existing VPC to deploy master nodes into. - # Required to use an existing VPC and the list must match the AZ count. - # - # Example: `["subnet-111111", "subnet-222222", "subnet-333333"]` - # masterSubnetIDs: - - # (optional) If set, the given Route53 zone ID will be used as the internal (private) zone. - # This zone will be used to create etcd DNS records as well as internal API and internal Ingress records. - # If set, no additional private zone will be created. - # - # Example: `"Z1ILINNUJGTAO1"` - # privateZone: - - # (optional) ID of an existing VPC to launch nodes into. - # If unset a new VPC is created. - # - # Example: `vpc-123456` - # vpcID: - - # (optional) List of subnet IDs within an existing VPC to deploy worker nodes into. - # Required to use an existing VPC and the list must match the AZ count. - # - # Example: `["subnet-111111", "subnet-222222", "subnet-333333"]` - # workerSubnetIDs: - - # (optional) Extra AWS tags to be applied to created resources. - # - # Example: `{ "key" = "value", "foo" = "bar" }` - # extraTags: - - # (optional) Name of IAM role to use to access AWS in order to deploy the Tectonic Cluster. - # The name is also the full role's ARN. - # - # Example: - # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer - # installerRole: - - master: - # (optional) This configures master availability zones and their corresponding subnet CIDRs directly. - # - # Example: - # `{ eu-west-1a = "10.0.0.0/20", eu-west-1b = "10.0.16.0/20" }` - # customSubnets: - - # Instance size for the master node(s). Example: `t2.medium`. - ec2Type: t2.medium - - # (optional) List of additional security group IDs for master nodes. - # - # Example: `["sg-51530134", "sg-b253d7cc"]` - # extraSGIDs: - - # (optional) Name of IAM role to use for the instance profiles of master nodes. - # The name is also the last part of a role's ARN. - # - # Example: - # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer - # * Role Name = tectonic-installer - # iamRoleName: - - rootVolume: - # The amount of provisioned IOPS for the root block device of master nodes. - # Ignored if the volume type is not io1. - iops: 100 - - # The size of the volume in gigabytes for the root block device of master nodes. - size: 30 - - # The type of volume for the root block device of master nodes. - type: gp2 - - # (optional) If set to true, create private-facing ingress resources (ELB, A-records). - # If set to false, no private-facing ingress resources will be provisioned and all DNS records will be created in the public Route53 zone. - # privateEndpoints: true - - # (optional) This declares the AWS credentials profile to use. - # profile: default - - # (optional) If set to true, create public-facing ingress resources (ELB, A-records). - # If set to false, no public-facing ingress resources will be created. - # publicEndpoints: true - - # The target AWS region for the cluster. - region: us-east-1 - - # Block of IP addresses used by the VPC. - # This should not overlap with any other networks, such as a private datacenter connected via Direct Connect. - vpcCIDRBlock: 10.0.0.0/16 - - worker: - # (optional) This configures worker availability zones and their corresponding subnet CIDRs directly. - # - # Example: `{ eu-west-1a = "10.0.64.0/20", eu-west-1b = "10.0.80.0/20" }` - # customSubnets: - - # Instance size for the worker node(s). Example: `t2.medium`. - ec2Type: t2.medium - - # (optional) List of additional security group IDs for worker nodes. - # - # Example: `["sg-51530134", "sg-b253d7cc"]` - # extraSGIDs: - - # (optional) Name of IAM role to use for the instance profiles of worker nodes. - # The name is also the last part of a role's ARN. - # - # Example: - # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer - # * Role Name = tectonic-installer - # iamRoleName: - - # (optional) List of ELBs to attach all worker instances to. - # This is useful for exposing NodePort services via load-balancers managed separately from the cluster. - # - # Example: - # * `["ingress-nginx"]` - # loadBalancers: - - rootVolume: - # The amount of provisioned IOPS for the root block device of worker nodes. - # Ignored if the volume type is not io1. - iops: 100 - - # The size of the volume in gigabytes for the root block device of worker nodes. - size: 30 - - # The type of volume for the root block device of worker nodes. - type: gp2 - -# The base DNS domain of the cluster. It must NOT contain a trailing period. Some -# DNS providers will automatically add this if necessary. -# -# Example: `openshift.example.com`. -# -# Note: This field MUST be set manually prior to creating the cluster. -# This applies only to cloud platforms. -# -# For AWS, this must be a previously-existing public Route 53 zone. -# You can check for any already in your account with: -# -# $ aws route53 list-hosted-zones --query 'HostedZones[? !(Config.PrivateZone)].Name' --output text -baseDomain: - -ca: - # (optional) The content of the PEM-encoded CA certificate, used to generate Tectonic Console's server certificate. - # If left blank, a CA certificate will be automatically generated. - # cert: - - # (optional) The content of the PEM-encoded CA key, used to generate Tectonic Console's server certificate. - # This field is mandatory if `ca_cert` is set. - # key: - - # (optional) The algorithm used to generate ca_key. - # The default value is currently recommended. - # This field is mandatory if `ca_cert` is set. - # keyAlg: RSA - -iscsi: - # (optional) Start iscsid.service to enable iscsi volume attachment. - # enabled: false - -master: - # The name of the node pool(s) to use for master nodes - nodePools: - - master - -# The name of the cluster. -# If used in a cloud-environment, this will be prepended to `baseDomain` resulting in the URL to the Tectonic console. -# -# Note: This field MUST be set manually prior to creating the cluster. -# Warning: Special characters in the name like '.' may cause errors on OpenStack platforms due to resource name constraints. -name: - -networking: - # (optional) This declares the MTU used by Calico. - # mtu: - - # This declares the IP range to assign Kubernetes pod IPs in CIDR notation. - podCIDR: 10.2.0.0/16 - - # This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation. - # The maximum size of this IP range is /12 - serviceCIDR: 10.3.0.0/16 - - # (optional) Configures the network to be used in Tectonic. One of the following values can be used: - # - # - "flannel": enables overlay networking only. This is implemented by flannel using VXLAN. - # - # - "canal": enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico. - # - # - "calico-ipip": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on baremetal installations only. - # - # - "none": disables the installation of any Pod level networking layer provided by Tectonic. By setting this value, users are expected to deploy their own solution to enable network connectivity for Pods and Services. - # type: flannel - -nodePools: - # The number of master nodes to be created. - # This applies only to cloud platforms. - - count: 1 - name: master - - # The number of worker nodes to be created. - # This applies only to cloud platforms. - - count: 3 - name: worker - -# The platform used for deploying. -platform: aws - -# The pull secret in JSON format. -# This is known to be a "Docker pull secret" as produced by the docker login [1] command. -# A sample JSON content is shown in [2]. -# You can download the pull secret from your Account overview page at [3]. -# -# [1] https://docs.docker.com/engine/reference/commandline/login/ -# -# [2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup -# -# [3] https://account.coreos.com/overview -pullSecret: '{"auths": {}}' - -worker: - # The name of the node pool(s) to use for workers - nodePools: - - worker diff --git a/examples/libvirt.yaml b/examples/libvirt.yaml deleted file mode 100644 index 7065c8ad9ae..00000000000 --- a/examples/libvirt.yaml +++ /dev/null @@ -1,103 +0,0 @@ -admin: - email: a@b.c - password: verysecure - sshKey: "ssh-ed25519 AAAA..." -# The base DNS domain of the cluster. It must NOT contain a trailing period. Some -# DNS providers will automatically add this if necessary. -# -# Example: `openshift.example.com`. -# -# Note: This field MUST be set manually prior to creating the cluster. -baseDomain: - -libvirt: - # You must specify an IP address here that libvirtd is listening on, - # and that the cluster-api controller pod will be able to connect - # to. Often 192.168.122.1 is the default for the virbr0 interface. - uri: qemu+tcp://192.168.122.1/system - network: - name: tectonic - ifName: tt0 - ipRange: 192.168.124.0/24 - image: http://aos-ostree.rhev-ci-vms.eng.rdu2.redhat.com/rhcos/images/cloud/latest/rhcos-qemu.qcow2.gz - -ca: - # (optional) The content of the PEM-encoded CA certificate, used to generate Tectonic Console's server certificate. - # If left blank, a CA certificate will be automatically generated. - # cert: - - # (optional) The content of the PEM-encoded CA key, used to generate Tectonic Console's server certificate. - # This field is mandatory if `ca_cert` is set. - # key: - - # (optional) The algorithm used to generate ca_key. - # The default value is currently recommended. - # This field is mandatory if `ca_cert` is set. - # keyAlg: RSA - -iscsi: - # (optional) Start iscsid.service to enable iscsi volume attachment. - # enabled: false - -master: - nodePools: - - master - -# The name of the cluster. -# If used in a cloud-environment, this will be prepended to `baseDomain` resulting in the URL to the Tectonic console. -# -# Note: This field MUST be set manually prior to creating the cluster. -# Warning: Special characters in the name like '.' may cause errors on OpenStack platforms due to resource name constraints. -name: - -networking: - # (optional) This declares the MTU used by Calico. - # mtu: - - # (optional) This declares the IP range to assign Kubernetes pod IPs in CIDR notation. - podCIDR: 10.2.0.0/16 - - # (optional) This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation. - # The maximum size of this IP range is /12 - serviceCIDR: 10.3.0.0/16 - - # (optional) Configures the network to be used in Tectonic. One of the following values can be used: - # - # - "flannel": enables overlay networking only. This is implemented by flannel using VXLAN. - # - # - "canal": enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico. - # - # - "calico-ipip": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on baremetal installations only. - # - # - "none": disables the installation of any Pod level networking layer provided by Tectonic. By setting this value, users are expected to deploy their own solution to enable network connectivity for Pods and Services. - # type: flannel - -nodePools: - # The number of master nodes to be created. - # This applies only to cloud platforms. - - count: 1 - name: master - - # The number of worker nodes to be created. - # This applies only to cloud platforms. - - count: 2 - name: worker - -# The platform used for deploying. -platform: libvirt - -# The pull secret in JSON format. -# This is known to be a "Docker pull secret" as produced by the docker login [1] command. -# A sample JSON content is shown in [2]. -# You can download the pull secret from your Account overview page at [3]. -# -# [1] https://docs.docker.com/engine/reference/commandline/login/ -# -# [2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup -# -# [3] https://account.coreos.com/overview -pullSecret: '{"auths": {}}' - -worker: - nodePools: - - worker diff --git a/hack/test-bazel-build-tarball.sh b/hack/test-bazel-build-tarball.sh deleted file mode 100755 index 763cb41ebe1..00000000000 --- a/hack/test-bazel-build-tarball.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh -if [ "$IS_CONTAINER" != "" ]; then - set -x - bazel --output_base=/tmp build "$@" tarball -else - podman run --rm \ - --env IS_CONTAINER=TRUE \ - --volume "${PWD}:${PWD}:z" \ - --workdir "${PWD}" \ - quay.io/coreos/tectonic-builder:bazel-v0.3 \ - ./hack/test-bazel-build-tarball.sh -fi diff --git a/installer/cmd/tectonic/main.go b/installer/cmd/tectonic/main.go deleted file mode 100644 index b880720c0eb..00000000000 --- a/installer/cmd/tectonic/main.go +++ /dev/null @@ -1,54 +0,0 @@ -package main - -import ( - "os" - - log "github.com/sirupsen/logrus" - "gopkg.in/alecthomas/kingpin.v2" - - "github.com/openshift/installer/installer/pkg/workflow" -) - -var ( - clusterInitCommand = kingpin.Command("init", "Initialize a new Tectonic cluster") - clusterInitConfigFlag = clusterInitCommand.Flag("config", "Cluster specification file").Required().ExistingFile() - - clusterInstallCommand = kingpin.Command("install", "Create a new Tectonic cluster") - clusterInstallDirFlag = clusterInstallCommand.Flag("dir", "Cluster directory").Default(".").ExistingDir() - - clusterDestroyCommand = kingpin.Command("destroy", "Destroy an existing Tectonic cluster") - clusterDestroyDirFlag = clusterDestroyCommand.Flag("dir", "Cluster directory").Default(".").ExistingDir() - clusterDestroyContOnErr = clusterDestroyCommand.Flag("continue-on-error", "Log errors, but attempt to continue cleaning up the cluster. THIS MAY LEAK RESOURCES, because you may not have enough state left after a partial removal to be able to perform a second destroy.").Default("false").Bool() - - convertCommand = kingpin.Command("convert", "Convert a tfvars.json to a Tectonic config.yaml") - convertConfigFlag = convertCommand.Flag("config", "tfvars.json file").Required().ExistingFile() - - logLevel = kingpin.Flag("log-level", "log level (e.g. \"debug\")").Default("info").Enum("debug", "info", "warn", "error", "fatal", "panic") -) - -func main() { - var w workflow.Workflow - - switch kingpin.Parse() { - case clusterInitCommand.FullCommand(): - w = workflow.InitWorkflow(*clusterInitConfigFlag) - case clusterInstallCommand.FullCommand(): - w = workflow.InstallWorkflow(*clusterInstallDirFlag) - case clusterDestroyCommand.FullCommand(): - w = workflow.DestroyWorkflow(*clusterDestroyDirFlag, *clusterDestroyContOnErr) - case convertCommand.FullCommand(): - w = workflow.ConvertWorkflow(*convertConfigFlag) - } - - l, err := log.ParseLevel(*logLevel) - if err != nil { - // By definition we should never enter this condition since kingpin should be guarding against incorrect values. - log.Fatalf("invalid log-level: %v", err) - } - log.SetLevel(l) - - if err := w.Execute(); err != nil { - log.Fatal(err) - os.Exit(1) - } -} diff --git a/installer/pkg/config-generator/fixtures/generated/tls/aggregator-ca.crt b/installer/pkg/config-generator/fixtures/generated/tls/aggregator-ca.crt deleted file mode 100644 index 27630f2cc4b..00000000000 --- a/installer/pkg/config-generator/fixtures/generated/tls/aggregator-ca.crt +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDMzCCAhugAwIBAgIISI9lAF1J/fMwDQYJKoZIhvcNAQELBQAwJjESMBAGA1UE -CxMJb3BlbnNoaWZ0MRAwDgYDVQQDEwdyb290LWNhMB4XDTE4MDkwNzIxNTIyMVoX -DTI4MDkwNDIxNTIyMlowKDERMA8GA1UECxMIYm9vdGt1YmUxEzARBgNVBAMTCmFn -Z3JlZ2F0b3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNm3Hre4oy -+J31X3i4dxy8hejzBISY8TSeoXMP9AZczQM1oyekSgf5EMSfh9DrDcKmPuTMhl6a -w6ZSjPDbrFlKrF2oIpq04Zn160i95QLC7zI8WzU/cNDS22J9pk8k9K47c/hZKRrd -AjT0rNI4qpVsDv43O1H2s5M6HiXB62rwakEALoATeufaPJEcAgP31nH9FhHft3uO -QkOur6iuXBDtv/FtPEFhmR5rDBYvUxaKXfB5c+TCevTZtjmP2bdRrvyHdWWapPtJ -auoGcV5s4Skp3tcEy24Qrl8FxR+Rsy7V+eYUPIWG1mDugmwpgq4SrB9idGoc/jqu -p6oDKRZUpxZdAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBRFMWpOb15dmp4aVFFdPtY/lrgkGTAfBgNVHSMEGDAWgBRF -MWpOb15dmp4aVFFdPtY/lrgkGTANBgkqhkiG9w0BAQsFAAOCAQEAjzLnsIZ+Jwqr -8h4TucqabLYkW1b/mk3k2GDg46xTOGpJo8UGesboclDxK/rtmDObgMLh2I9ekkPY -Yr1QNE5W4vqDOsmBm2MkqDVUg/EuOdSiNmWcJs/du452AVtk80yXTSaBQuW85SAe -AlG2CXadwTkxtLypLsUeOriKDRCV3hLCwd8lXwAHsjoU5aBLgin7lsXItoiM19LP -eJ06zH3FaOc4Kowf8JllJXel414DBsP8nX23snETIotxPXFol9xQIkjHCWaxKQSc -FWlmnA2exJprQHrt28C5W9x6odc27zKxS2D06IzETE4BtinwYhepb7P/qTAo+MX5 -FYZx86N7eg== ------END CERTIFICATE----- diff --git a/installer/pkg/config-generator/fixtures/kube-system.yaml b/installer/pkg/config-generator/fixtures/kube-system.yaml deleted file mode 100644 index 0c2934533c6..00000000000 --- a/installer/pkg/config-generator/fixtures/kube-system.yaml +++ /dev/null @@ -1,109 +0,0 @@ -apiVersion: v1 -data: - install-config: | - admin: - email: test@coreos.com - password: asd123 - baseDomain: cluster.com - clusterID: "" - machines: - - name: master - platform: - aws: - iamRoleName: "" - rootVolume: - iops: 100 - size: 30 - type: gp2 - type: t2.medium - replicas: 3 - - name: worker - platform: - aws: - iamRoleName: "" - rootVolume: - iops: 100 - size: 30 - type: gp2 - type: t2.medium - replicas: 3 - metadata: - creationTimestamp: null - name: test - networking: - podCIDR: 10.2.0.0/16 - serviceCIDR: 10.3.0.0/16 - type: canal - platform: - aws: - region: us-east-1 - vpcCIDRBlock: 10.0.0.0/16 - vpcID: "" - pullSecret: '{"auths": {}}' - kco-config: | - apiVersion: v1 - authConfig: - oidc_client_id: tectonic-kubectl - oidc_groups_claim: groups - oidc_issuer_url: https://test.cluster.com/identity - oidc_username_claim: email - cloudProviderConfig: - cloud_config_path: "" - cloud_provider_profile: aws - clusterConfig: - apiserver_url: https://test-api.cluster.com:6443 - dnsConfig: - clusterIP: 10.3.0.10 - kind: KubeCoreOperatorConfig - networkConfig: - advertise_address: 0.0.0.0 - cluster_cidr: 10.2.0.0/16 - etcd_servers: https://test-etcd-0.cluster.com:2379,https://test-etcd-1.cluster.com:2379,https://test-etcd-2.cluster.com:2379 - service_cidr: 10.3.0.0/16 - routingConfig: - subdomain: test.cluster.com - mao-config: | - apiServiceCA: | - -----BEGIN CERTIFICATE----- - MIIDMzCCAhugAwIBAgIISI9lAF1J/fMwDQYJKoZIhvcNAQELBQAwJjESMBAGA1UE - CxMJb3BlbnNoaWZ0MRAwDgYDVQQDEwdyb290LWNhMB4XDTE4MDkwNzIxNTIyMVoX - DTI4MDkwNDIxNTIyMlowKDERMA8GA1UECxMIYm9vdGt1YmUxEzARBgNVBAMTCmFn - Z3JlZ2F0b3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNm3Hre4oy - +J31X3i4dxy8hejzBISY8TSeoXMP9AZczQM1oyekSgf5EMSfh9DrDcKmPuTMhl6a - w6ZSjPDbrFlKrF2oIpq04Zn160i95QLC7zI8WzU/cNDS22J9pk8k9K47c/hZKRrd - AjT0rNI4qpVsDv43O1H2s5M6HiXB62rwakEALoATeufaPJEcAgP31nH9FhHft3uO - QkOur6iuXBDtv/FtPEFhmR5rDBYvUxaKXfB5c+TCevTZtjmP2bdRrvyHdWWapPtJ - auoGcV5s4Skp3tcEy24Qrl8FxR+Rsy7V+eYUPIWG1mDugmwpgq4SrB9idGoc/jqu - p6oDKRZUpxZdAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTAD - AQH/MB0GA1UdDgQWBBRFMWpOb15dmp4aVFFdPtY/lrgkGTAfBgNVHSMEGDAWgBRF - MWpOb15dmp4aVFFdPtY/lrgkGTANBgkqhkiG9w0BAQsFAAOCAQEAjzLnsIZ+Jwqr - 8h4TucqabLYkW1b/mk3k2GDg46xTOGpJo8UGesboclDxK/rtmDObgMLh2I9ekkPY - Yr1QNE5W4vqDOsmBm2MkqDVUg/EuOdSiNmWcJs/du452AVtk80yXTSaBQuW85SAe - AlG2CXadwTkxtLypLsUeOriKDRCV3hLCwd8lXwAHsjoU5aBLgin7lsXItoiM19LP - eJ06zH3FaOc4Kowf8JllJXel414DBsP8nX23snETIotxPXFol9xQIkjHCWaxKQSc - FWlmnA2exJprQHrt28C5W9x6odc27zKxS2D06IzETE4BtinwYhepb7P/qTAo+MX5 - FYZx86N7eg== - -----END CERTIFICATE----- - apiVersion: v1 - aws: - availabilityZone: "" - clusterID: "" - clusterName: test - image: ami-0af8953af3ec06b7c - region: us-east-1 - replicas: 3 - kind: machineAPIOperatorConfig - libvirt: null - provider: aws - targetNamespace: openshift-cluster-api - network-config: | - apiVersion: v1 - calicoConfig: - mtu: "1480" - kind: TectonicNetworkOperatorConfig - networkProfile: canal - podCIDR: 10.2.0.0/16 -kind: ConfigMap -metadata: - name: cluster-config-v1 - namespace: kube-system diff --git a/installer/pkg/config-generator/fixtures/test-aws.yaml b/installer/pkg/config-generator/fixtures/test-aws.yaml deleted file mode 100644 index 21fd9ed9130..00000000000 --- a/installer/pkg/config-generator/fixtures/test-aws.yaml +++ /dev/null @@ -1,40 +0,0 @@ -name: test -baseDomain: cluster.com -platform: aws -networking: - type: canal - mtu: 1480 - podCIDR: 10.2.0.0/16 - serviceCIDR: 10.3.0.0/16 -master: - nodePools: - - master -worker: - nodePools: - - worker -pullSecret: '{"auths": {}}' -admin: - email: test@coreos.com - password: asd123 -aws: - ec2AMIOverride: ami-0af8953af3ec06b7c - region: us-east-1 - sshKey: tectonic - vpcCIDRBlock: 10.0.0.0/16 - master: - ec2Type: t2.medium - rootVolume: - iops: 100 - size: 30 - type: gp2 - worker: - ec2Type: t2.medium - rootVolume: - iops: 100 - size: 30 - type: gp2 -nodePools: - - name: master - count: 3 - - name: worker - count: 3 diff --git a/installer/pkg/config-generator/fixtures/test.yaml b/installer/pkg/config-generator/fixtures/test.yaml deleted file mode 100644 index e8784ac3f2c..00000000000 --- a/installer/pkg/config-generator/fixtures/test.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: test -platform: aws -baseDomain: cluster.com -master: - nodePools: - - master -nodePools: - - name: master - count: 3 -aws: - ec2AMIOverride: ami-0af8953af3ec06b7c diff --git a/installer/pkg/config-generator/generator.go b/installer/pkg/config-generator/generator.go deleted file mode 100644 index 68e9278437b..00000000000 --- a/installer/pkg/config-generator/generator.go +++ /dev/null @@ -1,521 +0,0 @@ -package configgenerator - -import ( - "context" - "crypto/rand" - "encoding/base64" - "encoding/hex" - "errors" - "fmt" - "io/ioutil" - "net" - "path/filepath" - "strings" - - "github.com/apparentlymart/go-cidr/cidr" - "github.com/coreos/tectonic-config/config/kube-addon" - "github.com/coreos/tectonic-config/config/kube-core" - "github.com/coreos/tectonic-config/config/tectonic-network" - "github.com/coreos/tectonic-config/config/tectonic-utility" - "github.com/ghodss/yaml" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/openshift/installer/pkg/ipnet" - "github.com/openshift/installer/pkg/rhcos" - "github.com/openshift/installer/pkg/types" - "github.com/openshift/installer/pkg/types/config" -) - -const ( - authConfigOIDCClientID = "tectonic-kubectl" - authConfigOIDCGroupsClaim = "groups" - authConfigOIDCUsernameClaim = "email" - networkConfigAdvertiseAddress = "0.0.0.0" - identityConfigConsoleClientID = "tectonic-console" - identityConfigKubectlClientID = "tectonic-kubectl" - ingressConfigIngressKind = "haproxy-router" - certificatesStrategy = "userProvidedCA" - identityAPIService = "tectonic-identity-api.tectonic-system.svc.cluster.local" - maoTargetNamespace = "openshift-cluster-api" -) - -// ConfigGenerator defines the cluster config generation for a cluster. -type ConfigGenerator struct { - config.Cluster -} - -type configurationObject struct { - metav1.TypeMeta - - Metadata metadata `json:"metadata,omitempty"` - Data data `json:"data,omitempty"` -} - -type data map[string]string - -type metadata struct { - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` -} - -type genericData map[string]interface{} - -// New returns a ConfigGenerator for a cluster. -func New(cluster config.Cluster) ConfigGenerator { - return ConfigGenerator{ - Cluster: cluster, - } -} - -// maoOperatorConfig contains configuration for mao managed stack -// TODO(enxebre): move up to "github.com/coreos/tectonic-config -type maoOperatorConfig struct { - metav1.TypeMeta `json:",inline"` - TargetNamespace string `json:"targetNamespace"` - APIServiceCA string `json:"apiServiceCA"` - Provider string `json:"provider"` - AWS *awsConfig `json:"aws"` - Libvirt *libvirtConfig `json:"libvirt"` -} - -type libvirtConfig struct { - ClusterName string `json:"clusterName"` - URI string `json:"uri"` - NetworkName string `json:"networkName"` - IPRange string `json:"iprange"` - Replicas int `json:"replicas"` -} - -type awsConfig struct { - ClusterName string `json:"clusterName"` - ClusterID string `json:"clusterID"` - Region string `json:"region"` - AvailabilityZone string `json:"availabilityZone"` - Image string `json:"image"` - Replicas int `json:"replicas"` -} - -func (c *ConfigGenerator) maoConfig(clusterDir string) (*maoOperatorConfig, error) { - cfg := maoOperatorConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "machineAPIOperatorConfig", - }, - - TargetNamespace: maoTargetNamespace, - } - - ca, err := ioutil.ReadFile(filepath.Join(clusterDir, aggregatorCACertPath)) - if err != nil { - return nil, fmt.Errorf("could not read aggregator CA: %v", err) - } - - cfg.APIServiceCA = string(ca) - cfg.Provider = tectonicCloudProvider(c.Platform) - - switch c.Platform { - case config.PlatformAWS: - var ami string - - if c.AWS.EC2AMIOverride != "" { - ami = c.AWS.EC2AMIOverride - } else { - ami, err = rhcos.AMI(context.TODO(), rhcos.DefaultChannel, c.Region) - if err != nil { - return nil, fmt.Errorf("failed to lookup RHCOS AMI: %v", err) - } - } - - cfg.AWS = &awsConfig{ - ClusterName: c.Name, - ClusterID: c.ClusterID, - Region: c.Region, - AvailabilityZone: "", - Image: ami, - Replicas: c.NodeCount(c.Worker.NodePools), - } - - case config.PlatformLibvirt: - cfg.Libvirt = &libvirtConfig{ - ClusterName: c.Name, - URI: c.Libvirt.URI, - NetworkName: c.Libvirt.Network.Name, - IPRange: c.Libvirt.IPRange, - Replicas: c.NodeCount(c.Worker.NodePools), - } - - default: - return nil, fmt.Errorf("unknown provider for machine-api-operator: %v", cfg.Provider) - } - - return &cfg, nil -} - -// KubeSystem returns, if successful, a yaml string for the kube-system. -func (c *ConfigGenerator) KubeSystem(clusterDir string) (string, error) { - coreConfig, err := c.coreConfig() - if err != nil { - return "", err - } - installConfig, err := c.installConfig() - if err != nil { - return "", err - } - maoConfig, err := c.maoConfig(clusterDir) - if err != nil { - return "", err - } - - return configMap("kube-system", genericData{ - "kco-config": coreConfig, - "network-config": c.networkConfig(), - "install-config": installConfig, - "mao-config": maoConfig, - }) -} - -// TectonicSystem returns, if successful, a yaml string for the tectonic-system. -func (c *ConfigGenerator) TectonicSystem() (string, error) { - utilityConfig, err := c.utilityConfig() - if err != nil { - return "", err - } - addonConfig, err := c.addonConfig() - if err != nil { - return "", err - } - return configMap("tectonic-system", genericData{ - "addon-config": addonConfig, - "utility-config": utilityConfig, - }) -} - -// InstallConfig returns a YAML-rendered Kubernetes object with the user-supplied cluster configuration. -func (c *ConfigGenerator) InstallConfig() (string, error) { - ic, err := c.installConfig() - if err != nil { - return "", err - } - return marshalYAML(ic) -} - -func (c *ConfigGenerator) installConfig() (*types.InstallConfig, error) { - _, podCIDR, err := net.ParseCIDR(c.Networking.PodCIDR) - if err != nil { - return nil, err - } - _, serviceCIDR, err := net.ParseCIDR(c.Networking.ServiceCIDR) - if err != nil { - return nil, err - } - - var ( - platform types.Platform - masterPlatform types.MachinePoolPlatform - workerPlatform types.MachinePoolPlatform - ) - switch c.Platform { - case config.PlatformAWS: - platform.AWS = &types.AWSPlatform{ - Region: c.Region, - VPCID: c.VPCID, - VPCCIDRBlock: c.VPCCIDRBlock, - } - masterPlatform.AWS = &types.AWSMachinePoolPlatform{ - InstanceType: c.AWS.Master.EC2Type, - IAMRoleName: c.AWS.Master.IAMRoleName, - EC2RootVolume: types.EC2RootVolume{ - IOPS: c.AWS.Master.MasterRootVolume.IOPS, - Size: c.AWS.Master.MasterRootVolume.Size, - Type: c.AWS.Master.MasterRootVolume.Type, - }, - } - workerPlatform.AWS = &types.AWSMachinePoolPlatform{ - InstanceType: c.AWS.Worker.EC2Type, - IAMRoleName: c.AWS.Worker.IAMRoleName, - EC2RootVolume: types.EC2RootVolume{ - IOPS: c.AWS.Worker.WorkerRootVolume.IOPS, - Size: c.AWS.Worker.WorkerRootVolume.Size, - Type: c.AWS.Worker.WorkerRootVolume.Type, - }, - } - case config.PlatformLibvirt: - platform.Libvirt = &types.LibvirtPlatform{ - URI: c.URI, - Network: types.LibvirtNetwork{ - Name: c.Network.Name, - IfName: c.Network.IfName, - IPRange: c.Network.IPRange, - }, - } - masterPlatform.Libvirt = &types.LibvirtMachinePoolPlatform{ - ImagePool: "default", - ImageVolume: "coreos_base", - } - workerPlatform.Libvirt = &types.LibvirtMachinePoolPlatform{ - ImagePool: "default", - ImageVolume: "coreos_base", - } - default: - return nil, fmt.Errorf("installconfig: invalid platform %s", c.Platform) - } - masterCount := int64(c.NodeCount(c.Master.NodePools)) - workerCount := int64(c.NodeCount(c.Worker.NodePools)) - - return &types.InstallConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.Name, - }, - ClusterID: c.ClusterID, - Admin: types.Admin{ - Email: c.Admin.Email, - Password: c.Admin.Password, - SSHKey: c.Admin.SSHKey, - }, - BaseDomain: c.BaseDomain, - PullSecret: c.PullSecret, - Networking: types.Networking{ - Type: types.NetworkType(string(c.Networking.Type)), - ServiceCIDR: ipnet.IPNet{IPNet: *serviceCIDR}, - PodCIDR: ipnet.IPNet{IPNet: *podCIDR}, - }, - Platform: platform, - Machines: []types.MachinePool{{ - Name: "master", - Replicas: &masterCount, - Platform: masterPlatform, - }, { - Name: "worker", - Replicas: &workerCount, - Platform: workerPlatform, - }}, - }, nil -} - -// CoreConfig returns, if successful, a yaml string for the on-disk kco-config. -func (c *ConfigGenerator) CoreConfig() (string, error) { - coreConfig, err := c.coreConfig() - if err != nil { - return "", err - } - return marshalYAML(coreConfig) -} - -func (c *ConfigGenerator) addonConfig() (*kubeaddon.OperatorConfig, error) { - addonConfig := kubeaddon.OperatorConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: kubeaddon.APIVersion, - Kind: kubeaddon.Kind, - }, - } - addonConfig.CloudProvider = tectonicCloudProvider(c.Platform) - addonConfig.ClusterConfig.APIServerURL = c.getAPIServerURL() - registrySecret, err := generateRandomID(16) - if err != nil { - return nil, err - } - addonConfig.RegistryHTTPSecret = registrySecret - return &addonConfig, nil -} - -func (c *ConfigGenerator) coreConfig() (*kubecore.OperatorConfig, error) { - coreConfig := kubecore.OperatorConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: kubecore.APIVersion, - Kind: kubecore.Kind, - }, - } - coreConfig.ClusterConfig.APIServerURL = c.getAPIServerURL() - coreConfig.AuthConfig.OIDCClientID = authConfigOIDCClientID - coreConfig.AuthConfig.OIDCIssuerURL = c.getOicdIssuerURL() - coreConfig.AuthConfig.OIDCGroupsClaim = authConfigOIDCGroupsClaim - coreConfig.AuthConfig.OIDCUsernameClaim = authConfigOIDCUsernameClaim - - cidrhost, err := cidrhost(c.Cluster.Networking.ServiceCIDR, 10) - if err != nil { - return nil, err - } - coreConfig.DNSConfig.ClusterIP = cidrhost - - coreConfig.CloudProviderConfig.CloudConfigPath = "" - coreConfig.CloudProviderConfig.CloudProviderProfile = k8sCloudProvider(c.Cluster.Platform) - - coreConfig.RoutingConfig.Subdomain = c.getBaseAddress() - - coreConfig.NetworkConfig.ClusterCIDR = c.Cluster.Networking.PodCIDR - coreConfig.NetworkConfig.ServiceCIDR = c.Cluster.Networking.ServiceCIDR - coreConfig.NetworkConfig.AdvertiseAddress = networkConfigAdvertiseAddress - coreConfig.NetworkConfig.EtcdServers = c.getEtcdServersURLs() - - return &coreConfig, nil -} - -func (c *ConfigGenerator) networkConfig() *tectonicnetwork.OperatorConfig { - networkConfig := tectonicnetwork.OperatorConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: tectonicnetwork.APIVersion, - Kind: tectonicnetwork.Kind, - }, - } - - networkConfig.PodCIDR = c.Cluster.Networking.PodCIDR - networkConfig.CalicoConfig.MTU = c.Cluster.Networking.MTU - networkConfig.NetworkProfile = tectonicnetwork.NetworkType(c.Cluster.Networking.Type) - - return &networkConfig -} - -func (c *ConfigGenerator) utilityConfig() (*tectonicutility.OperatorConfig, error) { - utilityConfig := tectonicutility.OperatorConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: tectonicutility.APIVersion, - Kind: tectonicutility.Kind, - }, - } - - utilityConfig.TectonicConfigMapConfig.CertificatesStrategy = certificatesStrategy - utilityConfig.TectonicConfigMapConfig.ClusterID = c.Cluster.Internal.ClusterID - utilityConfig.TectonicConfigMapConfig.ClusterName = c.Cluster.Name - utilityConfig.TectonicConfigMapConfig.InstallerPlatform = tectonicCloudProvider(c.Platform) - utilityConfig.TectonicConfigMapConfig.KubeAPIServerURL = c.getAPIServerURL() - // TODO: Speficy what's a version in ut2 and set it here - utilityConfig.TectonicConfigMapConfig.TectonicVersion = "ut2" - - return &utilityConfig, nil -} - -func configMap(namespace string, unmarshaledData genericData) (string, error) { - data := make(data) - - for key, obj := range unmarshaledData { - str, err := marshalYAML(obj) - if err != nil { - return "", err - } - data[key] = str - } - - configurationObject := configurationObject{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - Metadata: metadata{ - Name: "cluster-config-v1", - Namespace: namespace, - }, - Data: data, - } - - str, err := marshalYAML(configurationObject) - if err != nil { - return "", err - } - return str, nil -} - -func marshalYAML(obj interface{}) (string, error) { - data, err := yaml.Marshal(&obj) - if err != nil { - return "", err - } - - return string(data), nil -} - -func (c *ConfigGenerator) getEtcdServersURLs() string { - etcdServers := make([]string, c.Cluster.NodeCount(c.Cluster.Master.NodePools)) - for i := range etcdServers { - etcdServers[i] = fmt.Sprintf("https://%s-etcd-%v.%s:2379", c.Cluster.Name, i, c.Cluster.BaseDomain) - } - return strings.Join(etcdServers, ",") -} - -func (c *ConfigGenerator) getAPIServerURL() string { - return fmt.Sprintf("https://%s-api.%s:6443", c.Cluster.Name, c.Cluster.BaseDomain) -} - -func (c *ConfigGenerator) getBaseAddress() string { - return fmt.Sprintf("%s.%s", c.Cluster.Name, c.Cluster.BaseDomain) -} - -func (c *ConfigGenerator) getOicdIssuerURL() string { - return fmt.Sprintf("https://%s.%s/identity", c.Cluster.Name, c.Cluster.BaseDomain) -} - -// generateRandomID reproduce tf random_id behaviour -// TODO: re-evaluate solution -func generateRandomID(byteLength int) (string, error) { - bytes := make([]byte, byteLength) - - n, err := rand.Reader.Read(bytes) - if n != byteLength { - return "", errors.New("generated insufficient random bytes") - } - if err != nil { - return "", err - } - - b64Str := base64.RawURLEncoding.EncodeToString(bytes) - - return b64Str, nil -} - -// GenerateClusterID reproduce tf cluster_id behaviour -// https://github.com/coreos/tectonic-installer/blob/master/modules/tectonic/assets.tf#L81 -// TODO: re-evaluate solution -func GenerateClusterID(byteLength int) (string, error) { - randomID, err := generateRandomID(byteLength) - if err != nil { - return "", err - } - bytes, err := base64.RawURLEncoding.DecodeString(randomID) - hexStr := hex.EncodeToString(bytes) - return fmt.Sprintf("%s-%s-%s-%s-%s", - hexStr[0:8], - hexStr[8:12], - hexStr[12:16], - hexStr[16:20], - hexStr[20:32]), nil -} - -// cidrhost takes an IP address range in CIDR notation -// and creates an IP address with the given host number. -// If given host number is negative, the count starts from the end of the range -// Reproduces tf behaviour. -// TODO: re-evaluate solution -func cidrhost(iprange string, hostNum int) (string, error) { - _, network, err := net.ParseCIDR(iprange) - if err != nil { - return "", fmt.Errorf("invalid CIDR expression (%s): %s", iprange, err) - } - - ip, err := cidr.Host(network, hostNum) - if err != nil { - return "", err - } - - return ip.String(), nil -} - -// Converts a platform to the cloudProvider that k8s understands -func k8sCloudProvider(platform config.Platform) string { - switch platform { - case config.PlatformAWS: - return "aws" - case config.PlatformLibvirt: - return "" - } - panic("invalid platform") -} - -// Converts a platform to the cloudProvider that Tectonic understands -func tectonicCloudProvider(platform config.Platform) string { - switch platform { - case config.PlatformAWS: - return "aws" - case config.PlatformLibvirt: - return "libvirt" - } - panic("invalid platform") -} diff --git a/installer/pkg/config-generator/generator_test.go b/installer/pkg/config-generator/generator_test.go deleted file mode 100644 index eb5e67b89fa..00000000000 --- a/installer/pkg/config-generator/generator_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package configgenerator - -import ( - "crypto/x509" - "crypto/x509/pkix" - "io/ioutil" - "os" - "testing" - - "github.com/openshift/installer/pkg/asset/tls" - "github.com/openshift/installer/pkg/types/config" - "github.com/stretchr/testify/assert" -) - -func initConfig(t *testing.T, file string) ConfigGenerator { - cluster, err := config.ParseConfigFile("./fixtures/" + file) - if err != nil { - t.Fatalf("Test case TestUrlFunctions: failed to parse test config, %s", err) - } - - return ConfigGenerator{ - *cluster, - } -} -func TestUrlFunctions(t *testing.T) { - config := initConfig(t, "test.yaml") - - testCases := []struct { - test string - got string - expected string - }{ - { - test: "getAPIServerURL", - got: config.getAPIServerURL(), - expected: "https://test-api.cluster.com:6443", - }, - { - test: "getBaseAddress", - got: config.getBaseAddress(), - expected: "test.cluster.com", - }, - { - test: "getOicdIssuerURL", - got: config.getOicdIssuerURL(), - expected: "https://test.cluster.com/identity", - }, - } - for _, tc := range testCases { - assert.Equal(t, tc.expected, tc.got) - } -} - -func TestGetEtcdServersURLs(t *testing.T) { - testCases := []struct { - test string - configFile string - expected string - }{ - { - test: "No ExternalServers", - configFile: "test.yaml", - expected: "https://test-etcd-0.cluster.com:2379,https://test-etcd-1.cluster.com:2379,https://test-etcd-2.cluster.com:2379", - }, - } - for _, tc := range testCases { - - config := initConfig(t, tc.configFile) - got := config.getEtcdServersURLs() - assert.Equal(t, tc.expected, got) - } -} - -func TestKubeSystem(t *testing.T) { - config := initConfig(t, "test-aws.yaml") - got, err := config.KubeSystem("./fixtures") - if err != nil { - t.Errorf("Test case TestKubeSystem: failed to get KubeSystem(): %s", err) - } - expected, err := ioutil.ReadFile("./fixtures/kube-system.yaml") - if err != nil { - t.Errorf("Test case TestKubeSystem: failed to ReadFile(): %s", err) - } - - assert.Equal(t, string(expected), got) -} - -func TestCIDRHost(t *testing.T) { - testCases := []struct { - test string - iprange string - hostNum int - expected string - }{ - { - test: "10.0.0.0/8", - iprange: "10.0.0.0/8", - hostNum: 8, - expected: "10.0.0.8", - }, - { - test: "10.3.0.0/16", - iprange: "10.3.0.0/16", - hostNum: 10, - expected: "10.3.0.10", - }, - } - for _, tc := range testCases { - got, err := cidrhost(tc.iprange, tc.hostNum) - if err != nil { - t.Errorf("Test case %s: failed to run cidrhost(): %s", tc.test, err) - } - assert.Equal(t, tc.expected, got) - } -} - -func TestGenerateCert(t *testing.T) { - caKey, err := tls.PrivateKey() - if err != nil { - t.Fatalf("Failed to generate Private Key: %v", err) - } - caCfg := &tls.CertCfg{ - Subject: pkix.Name{ - CommonName: "test-self-signed-ca", - OrganizationalUnit: []string{"openshift"}, - }, - Validity: tls.ValidityTenYears, - } - caCert, err := tls.SelfSignedCACert(caCfg, caKey) - if err != nil { - t.Fatalf("failed to generate self signed certificate: %v", err) - } - keyPath := "./test.key" - certPath := "./test.crt" - - cases := []struct { - cfg *tls.CertCfg - clusterDir string - err bool - }{ - { - cfg: &tls.CertCfg{ - Subject: pkix.Name{CommonName: "test-cert", OrganizationalUnit: []string{"test"}}, - KeyUsages: x509.KeyUsageKeyEncipherment, - DNSNames: []string{"test-api.kubernetes.default"}, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - Validity: tls.ValidityTenYears, - IsCA: false, - }, - clusterDir: "./", - err: false, - }, - } - for i, c := range cases { - _, _, err := generateCert(c.clusterDir, caKey, caCert, keyPath, certPath, c.cfg, false) - if err != nil { - no := "no" - if c.err { - no = "an" - } - t.Errorf("test case %d: expected %s error, got %v", i, no, err) - } - - if err := os.Remove(keyPath); err != nil { - t.Errorf("test case %d: failed to cleanup test key: %s, got %v", i, keyPath, err) - } - if err := os.Remove(certPath); err != nil { - t.Errorf("test case %d: failed to cleanup test certificate: %s, got %v", i, certPath, err) - } - } -} diff --git a/installer/pkg/config-generator/ignition.go b/installer/pkg/config-generator/ignition.go deleted file mode 100644 index a19a5ef49c9..00000000000 --- a/installer/pkg/config-generator/ignition.go +++ /dev/null @@ -1,146 +0,0 @@ -package configgenerator - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/url" - "path/filepath" - - ignconfig "github.com/coreos/ignition/config/v2_2" - ignconfigtypes "github.com/coreos/ignition/config/v2_2/types" - "github.com/openshift/installer/pkg/types/config" - "github.com/vincent-petithory/dataurl" -) - -const ( - caPath = "generated/tls/root-ca.crt" -) - -// GenerateIgnConfig generates Ignition configs for the workers and masters. -// It returns the content of the ignition files. -func (c *ConfigGenerator) GenerateIgnConfig(clusterDir string) (masterIgns []string, workerIgn string, err error) { - var masters config.NodePool - var workers config.NodePool - for _, pool := range c.NodePools { - switch pool.Name { - case "master": - masters = pool - case "worker": - workers = pool - default: - return nil, "", fmt.Errorf("unrecognized role: %s", pool.Name) - } - } - - ca, err := ioutil.ReadFile(filepath.Join(clusterDir, caPath)) - if err != nil { - return nil, "", err - } - - workerCfg, err := parseIgnFile(workers.IgnitionFile) - if err != nil { - return nil, "", fmt.Errorf("failed to parse Ignition config for workers: %v", err) - } - - // XXX(crawford): The SSH key should only be added to the bootstrap - // node. After that, MCO should be responsible for - // distributing SSH keys. - c.embedUserBlock(&workerCfg) - c.appendCertificateAuthority(&workerCfg, ca) - c.embedAppendBlock(&workerCfg, "worker", "") - - ign, err := json.Marshal(&workerCfg) - if err != nil { - return nil, "", fmt.Errorf("failed to marshal worker ignition: %v", err) - } - workerIgn = string(ign) - - masterCfg, err := parseIgnFile(masters.IgnitionFile) - if err != nil { - return nil, "", fmt.Errorf("failed to parse Ignition config for masters: %v", err) - } - - for i := 0; i < masters.Count; i++ { - ignCfg := masterCfg - - // XXX(crawford): The SSH key should only be added to the bootstrap - // node. After that, MCO should be responsible for - // distributing SSH keys. - c.embedUserBlock(&ignCfg) - c.appendCertificateAuthority(&ignCfg, ca) - c.embedAppendBlock(&ignCfg, "master", fmt.Sprintf("etcd_index=%d", i)) - - masterIgn, err := json.Marshal(&ignCfg) - if err != nil { - return nil, "", fmt.Errorf("failed to marshal master ignition: %v", err) - } - masterIgns = append(masterIgns, string(masterIgn)) - } - - return masterIgns, workerIgn, nil -} - -func parseIgnFile(filePath string) (ignconfigtypes.Config, error) { - if filePath == "" { - return ignconfigtypes.Config{ - Ignition: ignconfigtypes.Ignition{ - Version: ignconfigtypes.MaxVersion.String(), - }, - }, nil - } - - data, err := ioutil.ReadFile(filePath) - if err != nil { - return ignconfigtypes.Config{}, err - } - - cfg, rpt, _ := ignconfig.Parse(data) - if len(rpt.Entries) > 0 { - return ignconfigtypes.Config{}, fmt.Errorf("failed to parse ignition file %s: %s", filePath, rpt.String()) - } - - return cfg, nil -} - -func (c *ConfigGenerator) embedAppendBlock(ignCfg *ignconfigtypes.Config, role string, query string) { - appendBlock := ignconfigtypes.ConfigReference{ - Source: c.getMCSURL(role, query), - Verification: ignconfigtypes.Verification{Hash: nil}, - } - ignCfg.Ignition.Config.Append = append(ignCfg.Ignition.Config.Append, appendBlock) -} - -func (c *ConfigGenerator) appendCertificateAuthority(ignCfg *ignconfigtypes.Config, ca []byte) { - ignCfg.Ignition.Security.TLS.CertificateAuthorities = append(ignCfg.Ignition.Security.TLS.CertificateAuthorities, ignconfigtypes.CaReference{ - Source: dataurl.EncodeBytes(ca), - }) -} - -func (c *ConfigGenerator) embedUserBlock(ignCfg *ignconfigtypes.Config) { - userBlock := ignconfigtypes.PasswdUser{ - Name: "core", - SSHAuthorizedKeys: []ignconfigtypes.SSHAuthorizedKey{ - ignconfigtypes.SSHAuthorizedKey(c.SSHKey), - }, - } - - ignCfg.Passwd.Users = append(ignCfg.Passwd.Users, userBlock) -} - -func (c *ConfigGenerator) getMCSURL(role string, query string) string { - var u string - port := 49500 - - if role == "master" || role == "worker" { - u = func() *url.URL { - return &url.URL{ - Scheme: "https", - Host: fmt.Sprintf("%s-api.%s:%d", c.Name, c.BaseDomain, port), - Path: fmt.Sprintf("/config/%s", role), - RawQuery: query, - } - }().String() - } - return u -} diff --git a/installer/pkg/config-generator/tls.go b/installer/pkg/config-generator/tls.go deleted file mode 100644 index ee64315190c..00000000000 --- a/installer/pkg/config-generator/tls.go +++ /dev/null @@ -1,406 +0,0 @@ -package configgenerator - -import ( - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io/ioutil" - "net" - "path/filepath" - - "github.com/openshift/installer/installer/pkg/copy" - "github.com/openshift/installer/pkg/asset/tls" -) - -const ( - adminCertPath = "generated/tls/admin.crt" - adminKeyPath = "generated/tls/admin.key" - aggregatorCACertPath = "generated/tls/aggregator-ca.crt" - aggregatorCAKeyPath = "generated/tls/aggregator-ca.key" - apiServerCertPath = "generated/tls/apiserver.crt" - apiServerKeyPath = "generated/tls/apiserver.key" - apiServerProxyCertPath = "generated/tls/apiserver-proxy.crt" - apiServerProxyKeyPath = "generated/tls/apiserver-proxy.key" - etcdCACertPath = "generated/tls/etcd-ca.crt" - etcdCAKeyPath = "generated/tls/etcd-ca.key" - etcdClientCertPath = "generated/tls/etcd-client.crt" - etcdClientKeyPath = "generated/tls/etcd-client.key" - ingressCACertPath = "generated/tls/ingress-ca.crt" - ingressCertPath = "generated/tls/ingress.crt" - ingressKeyPath = "generated/tls/ingress.key" - kubeCACertPath = "generated/tls/kube-ca.crt" - kubeCAKeyPath = "generated/tls/kube-ca.key" - kubeletCertPath = "generated/tls/kubelet.crt" - kubeletKeyPath = "generated/tls/kubelet.key" - clusterAPIServerCertPath = "generated/tls/cluster-apiserver-ca.crt" - clusterAPIServerKeyPath = "generated/tls/cluster-apiserver-ca.key" - osAPIServerCertPath = "generated/tls/openshift-apiserver.crt" - osAPIServerKeyPath = "generated/tls/openshift-apiserver.key" - rootCACertPath = "generated/tls/root-ca.crt" - rootCAKeyPath = "generated/tls/root-ca.key" - serviceServingCACertPath = "generated/tls/service-serving-ca.crt" - serviceServingCAKeyPath = "generated/tls/service-serving-ca.key" - machineConfigServerCertPath = "generated/tls/machine-config-server.crt" - machineConfigServerKeyPath = "generated/tls/machine-config-server.key" - serviceAccountPubkeyPath = "generated/tls/service-account.pub" - serviceAccountPrivateKeyPath = "generated/tls/service-account.key" -) - -// GenerateTLSConfig fetches and validates the TLS cert files -// If no file paths were provided, the certs will be auto-generated -func (c *ConfigGenerator) GenerateTLSConfig(clusterDir string) error { - var caKey *rsa.PrivateKey - var caCert *x509.Certificate - var err error - - if c.CA.RootCAKeyPath == "" && c.CA.RootCACertPath == "" { - caCert, caKey, err = generateRootCert(clusterDir) - if err != nil { - return fmt.Errorf("failed to generate root CA certificate and key pair: %v", err) - } - } else { - // copy key and certificates - caCert, caKey, err = getCertFiles(clusterDir, c.CA.RootCACertPath, c.CA.RootCAKeyPath) - if err != nil { - return fmt.Errorf("failed to process CA certificate and key pair: %v", err) - } - } - - // generate kube CA - cfg := &tls.CertCfg{ - Subject: pkix.Name{CommonName: "kube-ca", OrganizationalUnit: []string{"bootkube"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: tls.ValidityTenYears, - IsCA: true, - } - kubeCAKey, kubeCACert, err := generateCert(clusterDir, caKey, caCert, kubeCAKeyPath, kubeCACertPath, cfg, false) - if err != nil { - return fmt.Errorf("failed to generate kubernetes CA: %v", err) - } - - // generate etcd CA - cfg = &tls.CertCfg{ - Subject: pkix.Name{CommonName: "etcd", OrganizationalUnit: []string{"etcd"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - IsCA: true, - Validity: tls.ValidityTenYears, - } - etcdCAKey, etcdCACert, err := generateCert(clusterDir, caKey, caCert, etcdCAKeyPath, etcdCACertPath, cfg, false) - if err != nil { - return fmt.Errorf("failed to generate etcd CA: %v", err) - } - - if err := copy.Copy(filepath.Join(clusterDir, etcdCAKeyPath), filepath.Join(clusterDir, "generated/tls/etcd-client-ca.key")); err != nil { - return fmt.Errorf("failed to import kube CA cert into ingress-ca.crt: %v", err) - } - if err := copy.Copy(filepath.Join(clusterDir, etcdCACertPath), filepath.Join(clusterDir, "generated/tls/etcd-client-ca.crt")); err != nil { - return fmt.Errorf("failed to import kube CA cert into ingress-ca.crt: %v", err) - } - - // generate etcd client certificate - cfg = &tls.CertCfg{ - Subject: pkix.Name{CommonName: "etcd", OrganizationalUnit: []string{"etcd"}}, - KeyUsages: x509.KeyUsageKeyEncipherment, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - Validity: tls.ValidityTenYears, - } - if _, _, err := generateCert(clusterDir, etcdCAKey, etcdCACert, etcdClientKeyPath, etcdClientCertPath, cfg, false); err != nil { - return fmt.Errorf("failed to generate etcd client certificate: %v", err) - } - - // generate aggregator CA - cfg = &tls.CertCfg{ - Subject: pkix.Name{CommonName: "aggregator", OrganizationalUnit: []string{"bootkube"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: tls.ValidityTenYears, - IsCA: true, - } - aggregatorCAKey, aggregatorCACert, err := generateCert(clusterDir, caKey, caCert, aggregatorCAKeyPath, aggregatorCACertPath, cfg, false) - if err != nil { - return fmt.Errorf("failed to generate aggregator CA: %v", err) - } - - // generate service-serving CA - cfg = &tls.CertCfg{ - Subject: pkix.Name{CommonName: "service-serving", OrganizationalUnit: []string{"bootkube"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: tls.ValidityTenYears, - IsCA: true, - } - if _, _, err := generateCert(clusterDir, caKey, caCert, serviceServingCAKeyPath, serviceServingCACertPath, cfg, false); err != nil { - return fmt.Errorf("failed to generate service-serving CA: %v", err) - } - - // Ingress certs - if err := copy.Copy(filepath.Join(clusterDir, kubeCACertPath), filepath.Join(clusterDir, ingressCACertPath)); err != nil { - return fmt.Errorf("failed to import kube CA cert into ingress-ca.crt: %v", err) - } - - baseAddress := c.getBaseAddress() - cfg = &tls.CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - DNSNames: []string{ - baseAddress, - fmt.Sprintf("*.%s", baseAddress), - }, - Subject: pkix.Name{CommonName: baseAddress, Organization: []string{"ingress"}}, - Validity: tls.ValidityTenYears, - IsCA: false, - } - - if _, _, err := generateCert(clusterDir, kubeCAKey, kubeCACert, ingressKeyPath, ingressCertPath, cfg, true); err != nil { - return fmt.Errorf("failed to generate ingress CA: %v", err) - } - - // Kube admin certs - cfg = &tls.CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - Subject: pkix.Name{CommonName: "system:admin", Organization: []string{"system:masters"}}, - Validity: tls.ValidityTenYears, - IsCA: false, - } - - if _, _, err = generateCert(clusterDir, kubeCAKey, kubeCACert, adminKeyPath, adminCertPath, cfg, false); err != nil { - return fmt.Errorf("failed to generate kube admin certificate: %v", err) - } - - // Kube API server certs - apiServerAddress, err := cidrhost(c.Cluster.Networking.ServiceCIDR, 1) - if err != nil { - return fmt.Errorf("can't resolve api server host address: %v", err) - } - cfg = &tls.CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - Subject: pkix.Name{CommonName: "kube-apiserver", Organization: []string{"kube-master"}}, - DNSNames: []string{ - fmt.Sprintf("%s-api.%s", c.Name, c.BaseDomain), - "kubernetes", "kubernetes.default", - "kubernetes.default.svc", - "kubernetes.default.svc.cluster.local", - }, - Validity: tls.ValidityTenYears, - IPAddresses: []net.IP{net.ParseIP(apiServerAddress)}, - IsCA: false, - } - - if _, _, err := generateCert(clusterDir, kubeCAKey, kubeCACert, apiServerKeyPath, apiServerCertPath, cfg, true); err != nil { - return fmt.Errorf("failed to generate kube api server certificate: %v", err) - } - - // Kube API openshift certs - cfg = &tls.CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - Subject: pkix.Name{CommonName: "openshift-apiserver", Organization: []string{"kube-master"}}, - DNSNames: []string{ - fmt.Sprintf("%s-api.%s", c.Name, c.BaseDomain), - "openshift-apiserver", - "openshift-apiserver.kube-system", - "openshift-apiserver.kube-system.svc", - "openshift-apiserver.kube-system.svc.cluster.local", - "localhost", "127.0.0.1"}, - Validity: tls.ValidityTenYears, - IPAddresses: []net.IP{net.ParseIP(apiServerAddress)}, - IsCA: false, - } - - if _, _, err := generateCert(clusterDir, aggregatorCAKey, aggregatorCACert, osAPIServerKeyPath, osAPIServerCertPath, cfg, true); err != nil { - return fmt.Errorf("failed to generate openshift api server certificate: %v", err) - } - - // Kube API proxy certs - cfg = &tls.CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - Subject: pkix.Name{CommonName: "kube-apiserver-proxy", Organization: []string{"kube-master"}}, - Validity: tls.ValidityTenYears, - IsCA: false, - } - - if _, _, err := generateCert(clusterDir, aggregatorCAKey, aggregatorCACert, apiServerProxyKeyPath, apiServerProxyCertPath, cfg, false); err != nil { - return fmt.Errorf("failed to generate kube api proxy certificate: %v", err) - } - - // Kubelet certs - cfg = &tls.CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - Subject: pkix.Name{CommonName: "system:serviceaccount:kube-system:default", Organization: []string{"system:serviceaccounts:kube-system"}}, - Validity: tls.ValidityThirtyMinutes, - IsCA: false, - } - - if _, _, err := generateCert(clusterDir, kubeCAKey, kubeCACert, kubeletKeyPath, kubeletCertPath, cfg, false); err != nil { - return fmt.Errorf("failed to generate kubelet certificate: %v", err) - } - - // MachineConfigServer certs - mcsDomain := fmt.Sprintf("%s-api.%s", c.Name, c.BaseDomain) - cfg = &tls.CertCfg{ - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - DNSNames: []string{mcsDomain}, - Subject: pkix.Name{CommonName: mcsDomain}, - Validity: tls.ValidityTenYears, - IsCA: false, - } - - if _, _, err := generateCert(clusterDir, caKey, caCert, machineConfigServerKeyPath, machineConfigServerCertPath, cfg, false); err != nil { - return fmt.Errorf("failed to generate machine-config-server certificate: %v", err) - } - - // Cluster API cert - cfg = &tls.CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - Subject: pkix.Name{CommonName: "clusterapi", OrganizationalUnit: []string{"bootkube"}}, - DNSNames: []string{ - "clusterapi", - fmt.Sprintf("clusterapi.%s", maoTargetNamespace), - fmt.Sprintf("clusterapi.%s.svc", maoTargetNamespace), - fmt.Sprintf("clusterapi.%s.svc.cluster.local", maoTargetNamespace), - }, - Validity: tls.ValidityTenYears, - IsCA: false, - } - - if _, _, err := generateCert(clusterDir, aggregatorCAKey, aggregatorCACert, clusterAPIServerKeyPath, clusterAPIServerCertPath, cfg, true); err != nil { - return fmt.Errorf("failed to generate cluster-apiserver certificate: %v", err) - } - - // Service Account private and public key. - svcAccountPrivKey, err := generatePrivateKey(clusterDir, serviceAccountPrivateKeyPath) - if err != nil { - return fmt.Errorf("failed to generate service-account private key: %v", err) - } - - pubkeyPath := filepath.Join(clusterDir, serviceAccountPubkeyPath) - pubkeyData, err := tls.PublicKeyToPem(&svcAccountPrivKey.PublicKey) - if err != nil { - return fmt.Errorf("failed to generate service-account public key: %v", err) - } - if err := ioutil.WriteFile(pubkeyPath, []byte(pubkeyData), 0600); err != nil { - return fmt.Errorf("failed to write service-account public key: %v", err) - } - - return nil -} - -// generatePrivateKey generates and writes the private key to disk -func generatePrivateKey(clusterDir string, path string) (*rsa.PrivateKey, error) { - fileTargetPath := filepath.Join(clusterDir, path) - key, err := tls.PrivateKey() - if err != nil { - return nil, fmt.Errorf("error writing private key: %v", err) - } - if err := ioutil.WriteFile(fileTargetPath, []byte(tls.PrivateKeyToPem(key)), 0600); err != nil { - return nil, err - } - return key, nil -} - -// generateRootCert creates the rootCAKey and rootCACert -func generateRootCert(clusterDir string) (cert *x509.Certificate, key *rsa.PrivateKey, err error) { - targetKeyPath := filepath.Join(clusterDir, rootCAKeyPath) - targetCertPath := filepath.Join(clusterDir, rootCACertPath) - - cfg := &tls.CertCfg{ - Subject: pkix.Name{ - CommonName: "root-ca", - OrganizationalUnit: []string{"openshift"}, - }, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: tls.ValidityTenYears, - IsCA: true, - } - - caKey, caCert, err := tls.GenerateRootCertKey(cfg) - if err != nil { - return nil, nil, err - } - - if err := ioutil.WriteFile(targetKeyPath, []byte(tls.PrivateKeyToPem(caKey)), 0600); err != nil { - return nil, nil, err - } - - if err := ioutil.WriteFile(targetCertPath, []byte(tls.CertToPem(caCert)), 0666); err != nil { - return nil, nil, err - } - - return caCert, caKey, nil -} - -// getCertFiles copies the given cert/key files into the generated folder and returns their contents -func getCertFiles(clusterDir string, certPath string, keyPath string) (*x509.Certificate, *rsa.PrivateKey, error) { - keyDst := filepath.Join(clusterDir, rootCAKeyPath) - if err := copy.Copy(keyPath, keyDst); err != nil { - return nil, nil, fmt.Errorf("failed to write file: %v", err) - } - - certDst := filepath.Join(clusterDir, rootCACertPath) - if err := copy.Copy(certPath, certDst); err != nil { - return nil, nil, fmt.Errorf("failed to write file: %v", err) - } - // content validation occurs in pkg/config/validate.go - // if it fails here, something went wrong - certData, err := ioutil.ReadFile(certPath) - if err != nil { - panic(err) - } - certPem, _ := pem.Decode([]byte(string(certData))) - keyData, err := ioutil.ReadFile(keyPath) - if err != nil { - panic(err) - } - keyPem, _ := pem.Decode([]byte(string(keyData))) - key, err := x509.ParsePKCS1PrivateKey(keyPem.Bytes) - if err != nil { - return nil, nil, fmt.Errorf("failed to process private key: %v", err) - } - certs, err := x509.ParseCertificates(certPem.Bytes) - if err != nil { - return nil, nil, fmt.Errorf("failed to process certificate: %v", err) - } - - return certs[0], key, nil -} - -// generateCert creates a key, csr & a signed cert -// If appendCA is true, then also append the CA cert into the result cert. -// This is useful for apiserver and openshift-apiser cert which will be -// authenticated by the kubeconfig using root-ca. -func generateCert(clusterDir string, - caKey *rsa.PrivateKey, - caCert *x509.Certificate, - keyPath string, - certPath string, - cfg *tls.CertCfg, - appendCA bool) (*rsa.PrivateKey, *x509.Certificate, error) { - - targetKeyPath := filepath.Join(clusterDir, keyPath) - targetCertPath := filepath.Join(clusterDir, certPath) - - key, cert, err := tls.GenerateCert(caKey, caCert, cfg) - if err != nil { - return nil, nil, err - } - - if err := ioutil.WriteFile(targetKeyPath, []byte(tls.PrivateKeyToPem(key)), 0600); err != nil { - return nil, nil, err - } - - content := []byte(tls.CertToPem(cert)) - if appendCA { - content = append(content, '\n') - content = append(content, []byte(tls.CertToPem(caCert))...) - } - if err := ioutil.WriteFile(targetCertPath, content, 0666); err != nil { - return nil, nil, err - } - - return key, cert, nil -} diff --git a/installer/pkg/copy/copy.go b/installer/pkg/copy/copy.go deleted file mode 100644 index 9036adf9580..00000000000 --- a/installer/pkg/copy/copy.go +++ /dev/null @@ -1,26 +0,0 @@ -// Package copy supports copying a file to another path. -package copy - -import ( - "io" - "os" -) - -// Copy creates a new file at toFilePath with with mode 0666 (before -// umask) and the same content as fromFilePath. -func Copy(fromFilePath, toFilePath string) error { - from, err := os.Open(fromFilePath) - if err != nil { - return err - } - defer from.Close() - - to, err := os.OpenFile(toFilePath, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - return err - } - defer to.Close() - - _, err = io.Copy(to, from) - return err -} diff --git a/installer/pkg/copy/copy_test.go b/installer/pkg/copy/copy_test.go deleted file mode 100644 index 72b570f7063..00000000000 --- a/installer/pkg/copy/copy_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package copy - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestCopyFile(t *testing.T) { - dir, err := ioutil.TempDir("", "workflow-test-") - if err != nil { - t.Error(err) - } - defer os.RemoveAll(dir) - - sourcePath := filepath.Join(dir, "source") - sourceContent := []byte("Hello, World!\n") - err = ioutil.WriteFile(sourcePath, sourceContent, 0600) - if err != nil { - t.Error(err) - } - - targetPath := filepath.Join(dir, "target") - err = Copy(sourcePath, targetPath) - if err != nil { - t.Error(err) - } - - targetContent, err := ioutil.ReadFile(targetPath) - if err != nil { - t.Error(err) - } - - if string(targetContent) != string(sourceContent) { - t.Errorf("target %q != source %q", string(targetContent), string(sourceContent)) - } -} diff --git a/installer/pkg/validate/fixtures/exists b/installer/pkg/validate/fixtures/exists deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/installer/pkg/validate/last_ip_test.go b/installer/pkg/validate/last_ip_test.go deleted file mode 100644 index d4c8610c19e..00000000000 --- a/installer/pkg/validate/last_ip_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package validate - -import ( - "net" - "testing" -) - -func TestLastIP(t *testing.T) { - cases := []struct { - in net.IPNet - out net.IP - }{ - { - in: net.IPNet{ - IP: net.ParseIP("192.168.0.0").To4(), - Mask: net.CIDRMask(24, 32), - }, - out: net.ParseIP("192.168.0.255"), - }, - { - in: net.IPNet{ - IP: net.ParseIP("192.168.0.0").To4(), - Mask: net.CIDRMask(22, 32), - }, - out: net.ParseIP("192.168.3.255"), - }, - { - in: net.IPNet{ - IP: net.ParseIP("192.168.0.0").To4(), - Mask: net.CIDRMask(32, 32), - }, - out: net.ParseIP("192.168.0.0"), - }, - { - in: net.IPNet{ - IP: net.ParseIP("0.0.0.0").To4(), - Mask: net.CIDRMask(0, 32), - }, - out: net.ParseIP("255.255.255.255"), - }, - } - - var out net.IP - for i, c := range cases { - if out = lastIP(&c.in); out.String() != c.out.String() { - t.Errorf("test case %d: expected %s but got %s", i, c.out, out) - } - } -} diff --git a/installer/pkg/validate/validate.go b/installer/pkg/validate/validate.go deleted file mode 100644 index 416cc02392f..00000000000 --- a/installer/pkg/validate/validate.go +++ /dev/null @@ -1,470 +0,0 @@ -package validate - -import ( - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "net" - "os" - "regexp" - "strconv" - "strings" - "unicode/utf8" -) - -func isMatch(re string, v string) bool { - return regexp.MustCompile(re).MatchString(v) -} - -// PrefixError wraps an error with a prefix or returns nil if there was no error. -// This is useful for wrapping errors returned by generic error funcs like `NonEmpty` so that the error includes the offending field name. -func PrefixError(prefix string, err error) error { - if err != nil { - return fmt.Errorf("%s: %v", prefix, err) - } - return nil -} - -// JSONFile validates that the file at the given path is valid JSON. -func JSONFile(path string) error { - b, err := ioutil.ReadFile(path) - if err != nil { - return err - } - err = JSON(b) - if err != nil { - return fmt.Errorf("file %q contains invalid JSON: %v", path, err) - } - return nil -} - -// JSON validates that the given data is valid JSON. -func JSON(data []byte) error { - var dummy interface{} - return json.Unmarshal(data, &dummy) -} - -// FileExists validates a file exists at the given path. -func FileExists(path string) error { - _, err := os.Stat(path) - return err -} - -// NonEmpty checks if the given string contains at least one non-whitespace character and returns an error if not. -func NonEmpty(v string) error { - if utf8.RuneCountInString(strings.TrimSpace(v)) == 0 { - return errors.New("cannot be empty") - } - return nil -} - -// Int checks if the given string is a valid integer and returns an error if not. -func Int(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - if _, err := strconv.Atoi(v); err != nil { - return errors.New("invalid integer") - } - return nil -} - -// IntRange checks if the given string is a valid integer between `min` and `max` and returns an error if not. -func IntRange(v string, min int, max int) error { - i, err := strconv.Atoi(v) - if err != nil { - return Int(v) - } - if i < min { - return fmt.Errorf("cannot be less than %v", min) - } - if i > max { - return fmt.Errorf("cannot be greater than %v", max) - } - return nil -} - -// IntOdd checks if the given string is a valid integer and that it is odd and returns an error if not. -func IntOdd(v string) error { - i, err := strconv.Atoi(v) - if err != nil { - return Int(v) - } - if i%2 != 1 { - return errors.New("must be an odd integer") - } - return nil -} - -// ClusterName checks if the given string is a valid name for a cluster and returns an error if not. -func ClusterName(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - if length := utf8.RuneCountInString(v); length < 1 || length > 253 { - return errors.New("must be between 1 and 253 characters") - } - - if strings.ToLower(v) != v { - return errors.New("must be lower case") - } - - if !isMatch("^[a-z0-9-.]*$", v) { - return errors.New("only lower case alphanumeric [a-z0-9], dashes and dots are allowed") - } - - isAlphaNum := regexp.MustCompile("^[a-z0-9]$").MatchString - - // If we got this far, we know the string is ASCII and has at least one character - if !isAlphaNum(v[:1]) || !isAlphaNum(v[len(v)-1:]) { - return errors.New("must start and end with a lower case alphanumeric character [a-z0-9]") - } - - for _, segment := range strings.Split(v, ".") { - // Each segment can have up to 63 characters - if utf8.RuneCountInString(segment) > 63 { - return errors.New("no segment between dots can be more than 63 characters") - } - if !isAlphaNum(segment[:1]) || !isAlphaNum(segment[len(segment)-1:]) { - return errors.New("segments between dots must start and end with a lower case alphanumeric character [a-z0-9]") - } - } - - return nil -} - -// AWSClusterName checks if the given string is a valid name for a cluster on AWS and returns an error if not. -// See AWS docs: -// http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-using-console-create-stack-parameters.html -// http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-loadbalancer.html#cfn-elasticloadbalancingv2-loadbalancer-name -func AWSClusterName(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - if length := utf8.RuneCountInString(v); length < 1 || length > 28 { - return errors.New("must be between 1 and 28 characters") - } - - if strings.ToLower(v) != v { - return errors.New("must be lower case") - } - - if strings.HasPrefix(v, "-") || strings.HasSuffix(v, "-") { - return errors.New("must not start or end with '-'") - } - - if !isMatch("^[a-z][-a-z0-9]*$", v) { - return errors.New("must be a lower case AWS Stack Name: [a-z][-a-z0-9]*") - } - - return nil -} - -// MAC checks if the given string is a valid MAC address and returns an error if not. -// Based on net.ParseMAC. -func MAC(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - if _, err := net.ParseMAC(v); err != nil { - return errors.New("invalid MAC Address") - } - return nil -} - -// IPv4 checks if the given string is a valid IP v4 address and returns an error if not. -// Based on net.ParseIP. -func IPv4(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - if ip := net.ParseIP(v); ip == nil || !strings.Contains(v, ".") { - return errors.New("invalid IPv4 address") - } - return nil -} - -// SubnetCIDR checks if the given string is a valid CIDR for a master nodes or worker nodes subnet and returns an error if not. -func SubnetCIDR(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - split := strings.Split(v, "/") - - if len(split) == 1 { - return errors.New("must provide a CIDR netmask (eg, /24)") - } - - if len(split) != 2 { - return errors.New("invalid IPv4 address") - } - - ip := split[0] - - if err := IPv4(ip); err != nil { - return errors.New("invalid IPv4 address") - } - - if mask, err := strconv.Atoi(split[1]); err != nil || mask < 0 || mask > 32 { - return errors.New("invalid netmask size (must be between 0 and 32)") - } - - // Catch any invalid CIDRs not caught by the checks above - if _, _, err := net.ParseCIDR(v); err != nil { - return errors.New("invalid CIDR") - } - - if strings.HasPrefix(ip, "172.17.") { - return errors.New("overlaps with default Docker Bridge subnet (172.17.0.0/16)") - } - - return nil -} - -// AWSSubnetCIDR checks if the given string is a valid CIDR for a master nodes or worker nodes subnet in an AWS VPC and returns an error if not. -func AWSSubnetCIDR(v string) error { - if err := SubnetCIDR(v); err != nil { - return err - } - - _, network, err := net.ParseCIDR(v) - if err != nil { - return errors.New("invalid CIDR") - } - if mask, _ := network.Mask.Size(); mask < 16 || mask > 28 { - return errors.New("AWS subnets must be between /16 and /28") - } - - return nil -} - -// DomainName checks if the given string is a valid domain name and returns an error if not. -func DomainName(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - split := strings.Split(v, ".") - for i, segment := range split { - // Trailing dot is OK - if len(segment) == 0 && i == len(split)-1 { - continue - } - if !isMatch("^[a-zA-Z0-9-]{1,63}$", segment) { - return errors.New("invalid domain name") - } - } - return nil -} - -// Host checks if the given string is either a valid IPv4 address or a valid domain name and returns an error if not. -func Host(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - // Either a valid IP address or domain name - if IPv4(v) != nil && DomainName(v) != nil { - return errors.New("invalid host (must be a domain name or IP address)") - } - return nil -} - -// Port checks if the given string is a valid port number and returns an error if not. -func Port(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - if IntRange(v, 1, 65535) != nil { - return errors.New("invalid port number") - } - return nil -} - -// HostPort checks if the given string is valid : format and returns an error if not. -func HostPort(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - split := strings.Split(v, ":") - if len(split) != 2 { - return errors.New("must use : format") - } - if err := Host(split[0]); err != nil { - return err - } - return Port(split[1]) -} - -// Email checks if the given string is a valid email address and returns an error if not. -func Email(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - invalidError := errors.New("invalid email address") - - split := strings.Split(v, "@") - if len(split) != 2 { - return invalidError - } - localPart := split[0] - domain := split[1] - - if NonEmpty(localPart) != nil { - return invalidError - } - - // No whitespace allowed in local-part - if isMatch(`\s`, localPart) { - return invalidError - } - - return DomainName(domain) -} - -const base64RegExp = `[A-Za-z0-9+\/]+={0,2}` - -// Certificate checks if the given string is a valid certificate in PEM format and returns an error if not. -// Ignores leading and trailing whitespace. -func Certificate(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - trimmed := strings.TrimSpace(v) - - // Don't let users hang themselves - if err := PrivateKey(trimmed); err == nil { - return errors.New("invalid certificate (appears to be a private key)") - } - block, _ := pem.Decode([]byte(trimmed)) - if block == nil { - return errors.New("failed to parse certificate") - } - if _, err := x509.ParseCertificate(block.Bytes); err != nil { - return errors.New("invalid certificate") - } - return nil -} - -// PrivateKey checks if the given string is a valid private key in PEM format and returns an error if not. -// Ignores leading and trailing whitespace. -func PrivateKey(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - // try to decode the private key pem block - block, _ := pem.Decode([]byte(v)) - if block == nil { - return errors.New("failed to parse private key") - } - // if input can be decoded, let's verify the pem input is a key (and not a certificate) - if block.Type != "RSA PRIVATE KEY" { - return errors.New("invalid private key") - } - - return nil -} - -// OpenSSHPublicKey checks if the given string is a valid OpenSSH public key and returns an error if not. -// Ignores leading and trailing whitespace. -func OpenSSHPublicKey(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - trimmed := strings.TrimSpace(v) - - // Don't let users hang themselves - if isMatch(`-BEGIN [\w-]+ PRIVATE KEY-`, trimmed) { - return errors.New("invalid SSH public key (appears to be a private key)") - } - - if strings.Contains(trimmed, "\n") { - return errors.New("invalid SSH public key (should not contain any newline characters)") - } - - invalidError := errors.New("invalid SSH public key") - - keyParts := regexp.MustCompile(`\s+`).Split(trimmed, -1) - if len(keyParts) < 2 { - return invalidError - } - - keyType := keyParts[0] - keyBase64 := keyParts[1] - if !isMatch(`^[\w-]+$`, keyType) || !isMatch("^"+base64RegExp+"$", keyBase64) { - return invalidError - } - - return nil -} - -// CIDRsDontOverlap ensures two given CIDRs don't overlap -// with one another. CIDR starting IPs are canonicalized -// before being compared. -func CIDRsDontOverlap(acidr, bcidr string) error { - _, a, err := net.ParseCIDR(acidr) - if err != nil { - return fmt.Errorf("invalid CIDR %q: %v", acidr, err) - } - if err := CanonicalizeIP(&a.IP); err != nil { - return fmt.Errorf("invalid CIDR %q: %v", acidr, err) - } - _, b, err := net.ParseCIDR(bcidr) - if err != nil { - return fmt.Errorf("invalid CIDR %q: %v", bcidr, err) - } - if err := CanonicalizeIP(&b.IP); err != nil { - return fmt.Errorf("invalid CIDR %q: %v", bcidr, err) - } - err = fmt.Errorf("%q and %q overlap", acidr, bcidr) - // IPs are of different families. - if len(a.IP) != len(b.IP) { - return nil - } - if a.Contains(b.IP) { - return err - } - if a.Contains(lastIP(b)) { - return err - } - if b.Contains(a.IP) { - return err - } - if b.Contains(lastIP(a)) { - return err - } - return nil -} - -// CanonicalizeIP ensures that the given IP is in standard form -// and returns an error otherwise. -func CanonicalizeIP(ip *net.IP) error { - if ip.To4() != nil { - *ip = ip.To4() - return nil - } - if ip.To16() != nil { - *ip = ip.To16() - return nil - } - return fmt.Errorf("IP %q is of unknown type", ip) -} - -func lastIP(cidr *net.IPNet) net.IP { - var last net.IP - for i := 0; i < len(cidr.IP); i++ { - last = append(last, cidr.IP[i]|^cidr.Mask[i]) - } - return last -} diff --git a/installer/pkg/validate/validate_test.go b/installer/pkg/validate/validate_test.go deleted file mode 100644 index 8499be2b0fa..00000000000 --- a/installer/pkg/validate/validate_test.go +++ /dev/null @@ -1,612 +0,0 @@ -package validate_test - -import ( - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/openshift/installer/installer/pkg/validate" - "github.com/openshift/installer/pkg/asset/tls" -) - -const caseMsg = "must be lower case" -const emptyMsg = "cannot be empty" -const invalidDomainMsg = "invalid domain name" -const invalidHostMsg = "invalid host (must be a domain name or IP address)" -const invalidIPMsg = "invalid IPv4 address" -const invalidIntMsg = "invalid integer" -const invalidPortMsg = "invalid port number" -const noCIDRNetmaskMsg = "must provide a CIDR netmask (eg, /24)" - -type test struct { - in string - expected string -} - -type validator func(string) error - -func runTests(t *testing.T, funcName string, fn validator, tests []test) { - for _, test := range tests { - err := fn(test.in) - if (err == nil && test.expected != "") || (err != nil && err.Error() != test.expected) { - t.Errorf("For %s(%q), expected %q, got %q", funcName, test.in, test.expected, err) - } - } -} - -func TestNonEmpty(t *testing.T) { - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", ""}, - {".", ""}, - {"日本語", ""}, - } - runTests(t, "NonEmpty", validate.NonEmpty, tests) -} - -func TestInt(t *testing.T) { - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"2 3", invalidIntMsg}, - {"1.1", invalidIntMsg}, - {"abc", invalidIntMsg}, - {"日本語", invalidIntMsg}, - {"1 abc", invalidIntMsg}, - {"日本語2", invalidIntMsg}, - {"0", ""}, - {"1", ""}, - {"999999", ""}, - {"-1", ""}, - } - runTests(t, "Int", validate.Int, tests) -} - -func TestIntRange(t *testing.T) { - tests := []struct { - in string - min int - max int - expected string - }{ - {"", 4, 6, emptyMsg}, - {" ", 4, 6, emptyMsg}, - {"2 3", 1, 2, invalidIntMsg}, - {"1.1", 0, 0, invalidIntMsg}, - {"abc", -2, -1, invalidIntMsg}, - {"日本語", 99, 100, invalidIntMsg}, - {"5", 4, 6, ""}, - {"5", 5, 5, ""}, - {"5", 6, 8, "cannot be less than 6"}, - {"5", 6, 4, "cannot be less than 6"}, - {"5", 2, 4, "cannot be greater than 4"}, - } - - for _, test := range tests { - err := validate.IntRange(test.in, test.min, test.max) - if (err == nil && test.expected != "") || (err != nil && err.Error() != test.expected) { - t.Errorf("For IntRange(%q, %v, %v), expected %q, got %q", test.in, test.min, test.max, test.expected, err) - } - } -} - -func TestIntOdd(t *testing.T) { - notOddMsg := "must be an odd integer" - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"0", notOddMsg}, - {"1", ""}, - {"2", notOddMsg}, - {"99", ""}, - {"100", notOddMsg}, - {"abc", invalidIntMsg}, - {"1 abc", invalidIntMsg}, - {"日本語", invalidIntMsg}, - } - runTests(t, "IntOdd", validate.IntOdd, tests) -} - -func TestClusterName(t *testing.T) { - const charsMsg = "only lower case alphanumeric [a-z0-9], dashes and dots are allowed" - const lengthMsg = "must be between 1 and 253 characters" - const segmentLengthMsg = "no segment between dots can be more than 63 characters" - const startEndCharMsg = "must start and end with a lower case alphanumeric character [a-z0-9]" - const segmentStartEndCharMsg = "segments between dots must start and end with a lower case alphanumeric character [a-z0-9]" - - maxSizeName := strings.Repeat("123456789.", 25) + "123" - maxSizeSegment := strings.Repeat("1234567890", 6) + "123" - - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", ""}, - {"A", caseMsg}, - {"abc D", caseMsg}, - {"1", ""}, - {".", startEndCharMsg}, - {"a.", startEndCharMsg}, - {".a", startEndCharMsg}, - {"a.a", ""}, - {"-a", startEndCharMsg}, - {"a-", startEndCharMsg}, - {"a.-a", segmentStartEndCharMsg}, - {"a-.a", segmentStartEndCharMsg}, - {"a%a", charsMsg}, - {"日本語", charsMsg}, - {"a日本語a", charsMsg}, - {maxSizeName, ""}, - {maxSizeName + "a", lengthMsg}, - {maxSizeSegment + ".abc", ""}, - {maxSizeSegment + "a.abc", segmentLengthMsg}, - } - runTests(t, "ClusterName", validate.ClusterName, tests) -} - -func TestAWSClusterName(t *testing.T) { - const charsMsg = "must be a lower case AWS Stack Name: [a-z][-a-z0-9]*" - const lengthMsg = "must be between 1 and 28 characters" - const hyphenMsg = "must not start or end with '-'" - - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", ""}, - {"A", caseMsg}, - {"abc D", caseMsg}, - {"1", charsMsg}, - {".", charsMsg}, - {"a.", charsMsg}, - {".a", charsMsg}, - {"a.a", charsMsg}, - {"a%a", charsMsg}, - {"a-a", ""}, - {"-abc", hyphenMsg}, - {"abc-", hyphenMsg}, - {"日本語", charsMsg}, - {"a日本語a", charsMsg}, - {"a234567890123456789012345678", ""}, - {"12345678901234567890123456789", lengthMsg}, - {"A2345678901234567890123456789", lengthMsg}, - } - runTests(t, "AWSClusterName", validate.AWSClusterName, tests) -} - -func TestMAC(t *testing.T) { - const invalidMsg = "invalid MAC Address" - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"abc", invalidMsg}, - {"12:34:45:78:9A:BC", ""}, - {"12-34-45-78-9A-BC", ""}, - {"12:34:45:78:9a:bc", ""}, - {"12:34:45:78:9X:YZ", invalidMsg}, - {"12.34.45.78.9A.BC", invalidMsg}, - } - runTests(t, "MAC", validate.MAC, tests) -} - -func TestIPv4(t *testing.T) { - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"0.0.0.0", ""}, - {"1.2.3.4", ""}, - {"1.2.3.", invalidIPMsg}, - {"1.2.3.4.", invalidIPMsg}, - {"1.2.3.a", invalidIPMsg}, - {"255.255.255.255", ""}, - } - runTests(t, "IPv4", validate.IPv4, tests) -} - -func TestSubnetCIDR(t *testing.T) { - const netmaskSizeMsg = "invalid netmask size (must be between 0 and 32)" - - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"/16", invalidIPMsg}, - {"0.0.0.0/0", ""}, - {"0.0.0.0/32", ""}, - {"1.2.3.4", noCIDRNetmaskMsg}, - {"1.2.3.", noCIDRNetmaskMsg}, - {"1.2.3.4.", noCIDRNetmaskMsg}, - {"1.2.3.4/0", ""}, - {"1.2.3.4/1", ""}, - {"1.2.3.4/31", ""}, - {"1.2.3.4/32", ""}, - {"1.2.3./16", invalidIPMsg}, - {"1.2.3.4./16", invalidIPMsg}, - {"1.2.3.4/33", netmaskSizeMsg}, - {"1.2.3.4/-1", netmaskSizeMsg}, - {"1.2.3.4/abc", netmaskSizeMsg}, - {"172.17.1.2", noCIDRNetmaskMsg}, - {"172.17.1.2/", netmaskSizeMsg}, - {"172.17.1.2/33", netmaskSizeMsg}, - {"172.17.1.2/20", "overlaps with default Docker Bridge subnet (172.17.0.0/16)"}, - {"255.255.255.255/1", ""}, - {"255.255.255.255/32", ""}, - } - runTests(t, "SubnetCIDR", validate.SubnetCIDR, tests) -} - -func TestAWSsubnetCIDR(t *testing.T) { - const awsNetmaskSizeMsg = "AWS subnets must be between /16 and /28" - - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"/20", invalidIPMsg}, - {"1.2.3.4", noCIDRNetmaskMsg}, - {"1.2.3.4/15", awsNetmaskSizeMsg}, - {"1.2.3.4/16", ""}, - {"1.2.3.4/28", ""}, - {"1.2.3.4/29", awsNetmaskSizeMsg}, - } - runTests(t, "AWSSubnetCIDR", validate.AWSSubnetCIDR, tests) -} - -func TestDomainName(t *testing.T) { - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", ""}, - {".", invalidDomainMsg}, - {"日本語", invalidDomainMsg}, - {"日本語.com", invalidDomainMsg}, - {"abc.日本語.com", invalidDomainMsg}, - {"a日本語a.com", invalidDomainMsg}, - {"abc", ""}, - {"ABC", ""}, - {"ABC123", ""}, - {"ABC123.COM123", ""}, - {"1", ""}, - {"0.0", ""}, - {"1.2.3.4", ""}, - {"1.2.3.4.", ""}, - {"abc.", ""}, - {"abc.com", ""}, - {"abc.com.", ""}, - {"a.b.c.d.e.f", ""}, - {".abc", invalidDomainMsg}, - {".abc.com", invalidDomainMsg}, - {".abc.com", invalidDomainMsg}, - } - runTests(t, "DomainName", validate.DomainName, tests) -} - -func TestHost(t *testing.T) { - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", ""}, - {".", invalidHostMsg}, - {"日本語", invalidHostMsg}, - {"日本語.com", invalidHostMsg}, - {"abc.日本語.com", invalidHostMsg}, - {"a日本語a.com", invalidHostMsg}, - {"abc", ""}, - {"ABC", ""}, - {"ABC123", ""}, - {"ABC123.COM123", ""}, - {"1", ""}, - {"0.0", ""}, - {"1.2.3.4", ""}, - {"1.2.3.4.", ""}, - {"abc.", ""}, - {"abc.com", ""}, - {"abc.com.", ""}, - {"a.b.c.d.e.f", ""}, - {".abc", invalidHostMsg}, - {".abc.com", invalidHostMsg}, - {".abc.com", invalidHostMsg}, - } - runTests(t, "Host", validate.Host, tests) -} - -func TestPort(t *testing.T) { - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", invalidPortMsg}, - {".", invalidPortMsg}, - {"日本語", invalidPortMsg}, - {"0", invalidPortMsg}, - {"1", ""}, - {"123", ""}, - {"12345", ""}, - {"65535", ""}, - {"65536", invalidPortMsg}, - } - runTests(t, "Port", validate.Port, tests) -} - -func TestHostPort(t *testing.T) { - const invalidHostPortMsg = "must use : format" - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {".", invalidHostPortMsg}, - {"日本語", invalidHostPortMsg}, - {"abc.com", invalidHostPortMsg}, - {"abc.com:0", invalidPortMsg}, - {"abc.com:1", ""}, - {"abc.com:65535", ""}, - {"abc.com:65536", invalidPortMsg}, - {"abc.com:abc", invalidPortMsg}, - {"1.2.3.4:1234", ""}, - {"1.2.3.4:abc", invalidPortMsg}, - {"日本語:1234", invalidHostMsg}, - } - runTests(t, "HostPort", validate.HostPort, tests) -} - -func TestEmail(t *testing.T) { - const invalidMsg = "invalid email address" - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", invalidMsg}, - {".", invalidMsg}, - {"日本語", invalidMsg}, - {"a@abc.com", ""}, - {"A@abc.com", ""}, - {"1@abc.com", ""}, - {"a.B.1.あ@abc.com", ""}, - {"ア@abc.com", ""}, - {"中文@abc.com", ""}, - {"a@abc.com", ""}, - {"a@ABC.com", ""}, - {"a@123.com", ""}, - {"a@日本語.com", invalidDomainMsg}, - {"a@.com", invalidDomainMsg}, - {"@abc.com", invalidMsg}, - } - runTests(t, "Email", validate.Email, tests) -} - -func TestCertificate(t *testing.T) { - const invalidMsg = "invalid certificate" - const privateKeyMsg = "invalid certificate (appears to be a private key)" - const badPem = "failed to parse certificate" - - // throwaway rsa key - rsaKey, err := tls.PrivateKey() - if err != nil { - t.Fatalf("failed to generate private key: %v", err) - } - keyInBytes := x509.MarshalPKCS1PrivateKey(rsaKey) - keyinPem := pem.EncodeToMemory( - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: keyInBytes, - }, - ) - - // throwaway certificate - cfg := &tls.CertCfg{ - Subject: pkix.Name{ - CommonName: "test-ca", - OrganizationalUnit: []string{"openshift"}, - }, - } - cert, err := tls.SelfSignedCACert(cfg, rsaKey) - if err != nil { - t.Fatalf("failed to generate self signed certificate: %v", err) - } - certInPem := pem.EncodeToMemory( - &pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - }, - ) - - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", badPem}, - {".", badPem}, - {"日本語", badPem}, - {string(certInPem), ""}, - - {"-----BEGIN CERTIFICATE-----\na\n-----END CERTIFICATE-----", badPem}, - {"-----BEGIN CERTIFICATE-----\nabc\n-----END CERTIFICATE-----", badPem}, - {"-----BEGIN CERTIFICATE-----\nabc=\n-----END CERTIFICATE-----", invalidMsg}, - {"-----BEGIN CERTIFICATE-----\nabc===\n-----END CERTIFICATE-----", badPem}, - {"-----BEGIN CERTIFICATE-----\na%a\n-----END CERTIFICATE-----", badPem}, - {"-----BEGIN CERTIFICATE-----\n\nab\n-----END CERTIFICATE-----", badPem}, - {"-----BEGIN CERTIFICATE-----\nab\n\n-----END CERTIFICATE-----", badPem}, - {"-----BEGIN CERTIFICATE-----\na\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\na\n-----END CERTIFICATE-----", badPem}, - {string(keyinPem), privateKeyMsg}, - } - runTests(t, "Certificate", validate.Certificate, tests) -} - -func TestPrivateKey(t *testing.T) { - const invalidMsg = "failed to parse private key" - - // throw-away rsa key - rsaKey, err := tls.PrivateKey() - if err != nil { - t.Fatalf("failed to generate private key: %v", err) - } - keyInBytes := x509.MarshalPKCS1PrivateKey(rsaKey) - keyinPem := pem.EncodeToMemory( - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: keyInBytes, - }, - ) - - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", invalidMsg}, - {".", invalidMsg}, - {"日本語", invalidMsg}, - {string(keyinPem), ""}, - {"-----BEGIN RSA PRIVATE KEY-----\na\n-----END RSA PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\nabc\n-----END RSA PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\nabc==\n-----END RSA PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\nabc===\n-----END RSA PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN EC PRIVATE KEY-----\nabc\n-----END EC PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\na%a\n-----END RSA PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\n\nab\n-----END RSA PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\nab\n\n-----END RSA PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\na\n-----END RSA PRIVATE KEY-----\n-----BEGIN CERTIFICATE-----\na\n-----END CERTIFICATE-----", invalidMsg}, - {"-----BEGIN CERTIFICATE-----\na\n-----END CERTIFICATE-----", invalidMsg}, - } - runTests(t, "PrivateKey", validate.PrivateKey, tests) -} - -func TestOpenSSHPublicKey(t *testing.T) { - const invalidMsg = "invalid SSH public key" - const multiLineMsg = "invalid SSH public key (should not contain any newline characters)" - const privateKeyMsg = "invalid SSH public key (appears to be a private key)" - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", invalidMsg}, - {".", invalidMsg}, - {"日本語", invalidMsg}, - {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL", ""}, - {"ssh-rsa \t AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL", ""}, - {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL you@example.com", ""}, - {"\nssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL you@example.com", ""}, - {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL you@example.com\n", ""}, - {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL\nssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL", multiLineMsg}, - {"ssh-rsa\nAAAAB3NzaC1yc2EAAAADAQABAAACAQDxL you@example.com", multiLineMsg}, - {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL\nyou@example.com", multiLineMsg}, - {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL", ""}, - {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCt3BebCHqnSsgpLjo4kVvyfY/z2BS8t27r/7du+O2pb4xYkr7n+KFpbOz523vMTpQ+o1jY4u4TgexglyT9nqasWgLOvo1qjD1agHme8LlTPQSk07rXqOB85Uq5p7ig2zoOejF6qXhcc3n1c7+HkxHrgpBENjLVHOBpzPBIAHkAGaZcl07OCqbsG5yxqEmSGiAlh/IiUVOZgdDMaGjCRFy0wk0mQaGD66DmnFc1H5CzcPjsxr0qO65e7lTGsE930KkO1Vc+RHCVwvhdXs+c2NhJ2/3740Kpes9n1/YullaWZUzlCPDXtRuy6JRbFbvy39JUgHWGWzB3d+3f8oJ/N4qZ cardno:000603633110", ""}, - {"-----BEGIN CERTIFICATE-----abcd-----END CERTIFICATE-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\nabc\n-----END RSA PRIVATE KEY-----", privateKeyMsg}, - } - runTests(t, "OpenSSHPublicKey", validate.OpenSSHPublicKey, tests) -} - -func TestCIDRsDontOverlap(t *testing.T) { - cases := []struct { - a string - b string - err bool - }{ - { - a: "192.168.0.0/24", - b: "192.168.0.0/24", - err: true, - }, - { - a: "192.168.0.0/24", - b: "192.168.0.3/24", - err: true, - }, - { - a: "192.168.0.0/30", - b: "192.168.0.3/30", - err: true, - }, - { - a: "192.168.0.0/30", - b: "192.168.0.4/30", - err: false, - }, - { - a: "0.0.0.0/0", - b: "192.168.0.0/24", - err: true, - }, - } - - for i, c := range cases { - if err := validate.CIDRsDontOverlap(c.a, c.b); (err != nil) != c.err { - no := "no" - if c.err { - no = "an" - } - t.Errorf("test case %d: expected %s error, got %v", i, no, err) - } - } -} - -func TestJSONFile(t *testing.T) { - cases := []struct { - buf []byte - err bool - }{ - { - buf: []byte(""), - err: true, - }, - { - buf: []byte("[]"), - err: false, - }, - { - buf: []byte("foobar"), - err: true, - }, - { - buf: []byte("}}}}"), - err: true, - }, - { - buf: []byte("{}"), - err: false, - }, - { - buf: []byte(`{"foo": "bar"}`), - err: false, - }, - } - for i, c := range cases { - f, err := ioutil.TempFile("", "validate") - if err != nil { - t.Fatalf("test case %d: failed to create temporary file: %v", i, err) - } - if _, err := f.Write(c.buf); err != nil { - t.Errorf("test case %d: failed to write to temporary file: %v", i, err) - } - if err := validate.JSONFile(f.Name()); (err != nil) != c.err { - no := "no" - if c.err { - no = "an" - } - t.Errorf("test case %d: expected %s error, got %v", i, no, err) - } - f.Close() - os.Remove(f.Name()) - } -} - -func TestFileExists(t *testing.T) { - cases := []struct { - path string - err bool - }{ - { - path: "./fixtures/doesnotexist", - err: true, - }, - { - path: "./fixtures/exists", - err: false, - }, - } - for i, c := range cases { - if err := validate.FileExists(c.path); (err != nil) != c.err { - no := "no" - if c.err { - no = "an" - } - t.Errorf("test case %d: expected %s error, got %v", i, no, err) - } - } -} diff --git a/installer/pkg/workflow/convert.go b/installer/pkg/workflow/convert.go deleted file mode 100644 index 7be19738a94..00000000000 --- a/installer/pkg/workflow/convert.go +++ /dev/null @@ -1,43 +0,0 @@ -package workflow - -import ( - "encoding/json" - "fmt" - "io/ioutil" - - "github.com/openshift/installer/pkg/types/config" -) - -// ConvertWorkflow creates new instances of the 'convert' workflow, -// responsible for converting an old cluster config. -func ConvertWorkflow(configFilePath string) Workflow { - return Workflow{ - metadata: metadata{configFilePath: configFilePath}, - steps: []step{ - readTFVarsConfigStep, - printYAMLConfigStep, - }, - } -} - -func readTFVarsConfigStep(m *metadata) error { - data, err := ioutil.ReadFile(m.configFilePath) - if err != nil { - return err - } - - m.cluster = config.Cluster{} - - return json.Unmarshal([]byte(data), &m.cluster) -} - -func printYAMLConfigStep(m *metadata) error { - yaml, err := m.cluster.YAML() - if err != nil { - return err - } - - fmt.Println(yaml) - - return nil -} diff --git a/installer/pkg/workflow/destroy.go b/installer/pkg/workflow/destroy.go deleted file mode 100644 index 3c7c6e2354b..00000000000 --- a/installer/pkg/workflow/destroy.go +++ /dev/null @@ -1,160 +0,0 @@ -package workflow - -import ( - "encoding/json" - "fmt" - "path/filepath" - "time" - - "github.com/openshift/installer/pkg/terraform" - log "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/clientcmd" - "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" -) - -const ( - machineSetNamespace = "openshift-cluster-api" - workerMachineSet = "worker" -) - -// DestroyWorkflow creates new instances of the 'destroy' workflow, -// responsible for running the actions required to remove resources -// of an existing cluster and clean up any remaining artefacts. -func DestroyWorkflow(clusterDir string, contOnErr bool) Workflow { - return Workflow{ - metadata: metadata{ - clusterDir: clusterDir, - contOnErr: contOnErr, - }, - steps: []step{ - readClusterConfigStep, - destroyWorkersStep, - destroyInfraStep, - destroyAssetsStep, - }, - } -} - -func destroyAssetsStep(m *metadata) error { - return runDestroyStep(m, terraform.AssetsStep) -} - -func destroyInfraStep(m *metadata) error { - return runDestroyStep(m, terraform.InfraStep) -} - -func destroyWorkersStep(m *metadata) error { - kubeconfig := filepath.Join(m.clusterDir, generatedPath, "auth", "kubeconfig") - - client, err := buildClusterClient(kubeconfig) - if err != nil { - return fmt.Errorf("failed to build cluster-api client: %v", err) - } - - if err := scaleDownWorkers(client); err != nil { - return fmt.Errorf("failed to scale worker MachineSet: %v", err) - } - - if err := waitForWorkers(client); err != nil { - return fmt.Errorf("worker MachineSet failed to scale down: %v", err) - } - - if err := deleteWorkerMachineSet(client); err != nil { - return fmt.Errorf("failed to delete worker MachineSet: %v", err) - } - - return nil -} - -func scaleDownWorkers(client *clientset.Clientset) error { - // Unfortunately, MachineSets don't yet support the scale - // subresource. So we have to patch the object to set the - // replicas to zero. - patch := []struct { - Op string `json:"op"` - Path string `json:"path"` - Value uint32 `json:"value"` - }{{ - Op: "replace", - Path: "/spec/replicas", - Value: 0, - }} - - patchBytes, err := json.Marshal(patch) - if err != nil { - return err - } - - _, err = client.ClusterV1alpha1(). - MachineSets(machineSetNamespace). - Patch(workerMachineSet, types.JSONPatchType, patchBytes) - - return err -} - -func waitForWorkers(client *clientset.Clientset) error { - interval := 3 * time.Second - timeout := 60 * time.Second - - log.Info("Waiting for worker MachineSet to scale down...") - - err := wait.PollImmediate(interval, timeout, func() (bool, error) { - machineSet, err := client.ClusterV1alpha1(). - MachineSets(machineSetNamespace). - Get(workerMachineSet, v1.GetOptions{}) - - if err != nil { - return false, err - } - - if machineSet.Status.Replicas > 0 { - return false, nil - } - - return true, nil - }) - - return err -} - -func deleteWorkerMachineSet(client *clientset.Clientset) error { - return client.ClusterV1alpha1(). - MachineSets(machineSetNamespace). - Delete(workerMachineSet, &v1.DeleteOptions{}) -} - -func runDestroyStep(m *metadata, step string, extraArgs ...string) error { - if !terraform.HasStateFile(m.clusterDir, step) { - // there is no statefile, therefore nothing to destroy for this step - return nil - } - - dir, err := terraform.BaseLocation() - if err != nil { - return err - } - - templateDir, err := terraform.FindStepTemplates(dir, step, m.cluster.Platform) - if err != nil { - return err - } - - return terraform.Destroy(m.clusterDir, step, templateDir, extraArgs...) -} - -func buildClusterClient(kubeconfig string) (*clientset.Clientset, error) { - config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) - if err != nil { - return nil, fmt.Errorf("failed to build config: %v", err) - } - - client, err := clientset.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("failed to build client: %v", err) - } - - return client, nil -} diff --git a/installer/pkg/workflow/fixtures/aws.basic.yaml b/installer/pkg/workflow/fixtures/aws.basic.yaml deleted file mode 100644 index a2f2ca7f46d..00000000000 --- a/installer/pkg/workflow/fixtures/aws.basic.yaml +++ /dev/null @@ -1,39 +0,0 @@ -admin: - email: fake-email@example.com - password: fake-password -aws: - ec2AMIOverride: ami-0af8953af3ec06b7c - master: - ec2Type: m4.large - rootVolume: - iops: 100 - size: 30 - type: gp2 - worker: - ec2Type: m4.large - rootVolume: - iops: 100 - size: 30 - type: gp2 -baseDomain: tectonic-ci.de -master: - nodePools: - - master -name: aws-basic -networking: - mtu: 1480 - podCIDR: 10.2.0.0/16 - serviceCIDR: 10.3.0.0/16 - type: canal -nodePools: - - name: master - count: 2 - - name: worker - count: 3 -platform: aws -pullSecret: '{"auths": {}}' -worker: - nodePools: - - worker -CA: - rootCAKeyAlg: DES diff --git a/installer/pkg/workflow/fixtures/terraform.tfvars b/installer/pkg/workflow/fixtures/terraform.tfvars deleted file mode 100644 index c427ff5be44..00000000000 --- a/installer/pkg/workflow/fixtures/terraform.tfvars +++ /dev/null @@ -1,27 +0,0 @@ -{ - "tectonic_admin_email": "fake-email@example.com", - "tectonic_admin_password": "fake-password", - "tectonic_aws_ec2_ami_override": "ami-0af8953af3ec06b7c", - "tectonic_aws_endpoints": "all", - "tectonic_aws_master_ec2_type": "m4.large", - "tectonic_aws_master_root_volume_iops": 100, - "tectonic_aws_master_root_volume_size": 30, - "tectonic_aws_master_root_volume_type": "gp2", - "tectonic_aws_profile": "default", - "tectonic_aws_region": "us-east-1", - "tectonic_aws_vpc_cidr_block": "10.0.0.0/16", - "tectonic_aws_worker_ec2_type": "m4.large", - "tectonic_aws_worker_root_volume_iops": 100, - "tectonic_aws_worker_root_volume_size": 30, - "tectonic_aws_worker_root_volume_type": "gp2", - "tectonic_base_domain": "tectonic-ci.de", - "tectonic_libvirt_network_if": "osbr0", - "tectonic_master_count": 2, - "tectonic_cluster_name": "aws-basic", - "tectonic_networking": "canal", - "tectonic_service_cidr": "10.3.0.0/16", - "tectonic_cluster_cidr": "10.2.0.0/16", - "tectonic_platform": "aws", - "tectonic_pull_secret": "{\"auths\": {}}", - "tectonic_worker_count": 3 -} diff --git a/installer/pkg/workflow/init.go b/installer/pkg/workflow/init.go deleted file mode 100644 index c778f6a0c1f..00000000000 --- a/installer/pkg/workflow/init.go +++ /dev/null @@ -1,122 +0,0 @@ -package workflow - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - yaml "gopkg.in/yaml.v2" - - configgenerator "github.com/openshift/installer/installer/pkg/config-generator" - "github.com/openshift/installer/pkg/types/config" -) - -const ( - generatedPath = "generated" - kcoConfigFileName = "kco-config.yaml" - maoConfigFileName = "mao-config.yaml" - kubeSystemPath = "generated/manifests" - kubeSystemFileName = "cluster-config.yaml" - tectonicSystemPath = "generated/tectonic" - tlsPath = "generated/tls" - tectonicSystemFileName = "cluster-config.yaml" - terraformVariablesFileName = "terraform.tfvars" -) - -// InitWorkflow creates new instances of the 'init' workflow, -// responsible for initializing a new cluster. -func InitWorkflow(configFilePath string) Workflow { - return Workflow{ - metadata: metadata{configFilePath: configFilePath}, - steps: []step{ - prepareWorspaceStep, - }, - } -} - -func buildInternalConfig(clusterDir string) error { - if clusterDir == "" { - return errors.New("no cluster dir given for building internal config") - } - - // fill the internal struct - clusterID, err := configgenerator.GenerateClusterID(16) - if err != nil { - return err - } - internalCfg := config.Internal{ - ClusterID: clusterID, - } - - // store the content - yamlContent, err := yaml.Marshal(internalCfg) - internalFileContent := []byte("# Do not touch, auto-generated\n") - internalFileContent = append(internalFileContent, yamlContent...) - if err != nil { - return err - } - return ioutil.WriteFile(filepath.Join(clusterDir, internalFileName), internalFileContent, 0666) -} - -func generateTerraformVariablesStep(m *metadata) error { - vars, err := m.cluster.TFVars() - if err != nil { - return err - } - - terraformVariablesFilePath := filepath.Join(m.clusterDir, terraformVariablesFileName) - return ioutil.WriteFile(terraformVariablesFilePath, []byte(vars), 0666) -} - -func prepareWorspaceStep(m *metadata) error { - dir, err := os.Getwd() - if err != nil { - return fmt.Errorf("failed to get current directory: %v", err) - } - - if m.configFilePath == "" { - return errors.New("a path to a config file is required") - } - - // load initial cluster config to get cluster.Name - cluster, err := readClusterConfig(m.configFilePath, "") - if err != nil { - return fmt.Errorf("failed to get configuration from file %q: %v", m.configFilePath, err) - } - - if err := cluster.ValidateAndLog(); err != nil { - return err - } - - if cluster.Platform == config.PlatformLibvirt { - if err := cluster.Libvirt.UseCachedImage(); err != nil { - return err - } - } - - // generate clusterDir folder - clusterDir := filepath.Join(dir, cluster.Name) - m.clusterDir = clusterDir - if stat, err := os.Stat(clusterDir); err == nil && stat.IsDir() { - return fmt.Errorf("cluster directory already exists at %q", clusterDir) - } - - if err := os.MkdirAll(clusterDir, os.ModeDir|0755); err != nil { - return fmt.Errorf("failed to create cluster directory at %q", clusterDir) - } - - // put config file under the clusterDir folder - configFilePath := filepath.Join(clusterDir, configFileName) - configContent, err := yaml.Marshal(cluster) - if err != nil { - return err - } - if err := ioutil.WriteFile(configFilePath, configContent, 0666); err != nil { - return fmt.Errorf("failed to create cluster config at %q: %v", clusterDir, err) - } - - // generate the internal config file under the clusterDir folder - return buildInternalConfig(clusterDir) -} diff --git a/installer/pkg/workflow/init_test.go b/installer/pkg/workflow/init_test.go deleted file mode 100644 index 4501572c779..00000000000 --- a/installer/pkg/workflow/init_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package workflow - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "testing" - - "github.com/openshift/installer/pkg/types/config" -) - -func initTestCluster(cfg string) (*config.Cluster, error) { - testConfig, err := config.ParseConfigFile(cfg) - if err != nil { - return nil, fmt.Errorf("failed to parse test config: %v", err) - } - testConfig.PullSecret = "{\"auths\": {}}" - if len(testConfig.Validate()) != 0 { - return nil, errors.New("failed to validate test conifg") - } - return testConfig, nil -} - -func TestGenerateTerraformVariablesStep(t *testing.T) { - expectedTfVarsFilePath := "./fixtures/terraform.tfvars" - clusterDir := "." - gotTfVarsFilePath := filepath.Join(clusterDir, terraformVariablesFileName) - - // clean up - defer func() { - if err := os.Remove(gotTfVarsFilePath); err != nil { - t.Errorf("failed to clean up generated tf vars file: %v", err) - } - }() - - cluster, err := initTestCluster("./fixtures/aws.basic.yaml") - if err != nil { - t.Fatalf("failed to init cluster: %v", err) - } - - m := &metadata{ - cluster: *cluster, - clusterDir: clusterDir, - } - - generateTerraformVariablesStep(m) - gotData, err := ioutil.ReadFile(gotTfVarsFilePath) - if err != nil { - t.Errorf("failed to load generated tf vars file: %v", err) - } - got := string(gotData) - - expectedData, err := ioutil.ReadFile(expectedTfVarsFilePath) - if err != nil { - t.Errorf("failed to load expected tf vars file: %v", err) - } - expected := string(expectedData) - - if got+"\n" != expected { - t.Errorf("expected: %s, got: %s", expected, got) - } -} - -func TestBuildInternalConfig(t *testing.T) { - testClusterDir := "." - internalFilePath := filepath.Join(testClusterDir, internalFileName) - - // clean up - defer func() { - if err := os.Remove(internalFilePath); err != nil { - t.Errorf("failed to remove temp file: %v", err) - } - }() - - errorTestCases := []struct { - test string - got string - expected string - }{ - { - test: "no clusterDir exists", - got: buildInternalConfig("").Error(), - expected: "no cluster dir given for building internal config", - }, - } - - for _, tc := range errorTestCases { - if tc.got != tc.expected { - t.Errorf("test case %s: expected: %s, got: %s", tc.test, tc.expected, tc.got) - } - } - - if err := buildInternalConfig(testClusterDir); err != nil { - t.Errorf("failed to run buildInternalStep, %v", err) - } - - if _, err := os.Stat(internalFilePath); err != nil { - t.Errorf("failed to create internal file, %v", err) - } - - testInternal, err := config.ParseInternalFile(internalFilePath) - if err != nil { - t.Errorf("failed to parse internal file, %v", err) - } - testCases := []struct { - test string - got string - expected string - }{ - { - test: "clusterId", - got: testInternal.ClusterID, - expected: "^[a-zA-Z0-9_-]*$", - }, - } - - for _, tc := range testCases { - match, _ := regexp.MatchString(tc.expected, tc.got) - if !match { - t.Errorf("test case %s: expected: %s, got: %s", tc.test, tc.expected, tc.got) - } - } -} diff --git a/installer/pkg/workflow/install.go b/installer/pkg/workflow/install.go deleted file mode 100644 index 27f71bda063..00000000000 --- a/installer/pkg/workflow/install.go +++ /dev/null @@ -1,92 +0,0 @@ -package workflow - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/openshift/installer/installer/pkg/config-generator" - "github.com/openshift/installer/pkg/terraform" - "github.com/openshift/installer/pkg/types/config" -) - -// InstallWorkflow creates new instances of the 'install' workflow, -// responsible for running the actions necessary to install a new cluster. -func InstallWorkflow(clusterDir string) Workflow { - return Workflow{ - metadata: metadata{clusterDir: clusterDir}, - steps: []step{ - readClusterConfigStep, - generateTerraformVariablesStep, - generateTLSConfigStep, - generateClusterConfigMaps, - generateIgnConfigStep, - installAssetsStep, - installInfraStep, - }, - } -} - -func installAssetsStep(m *metadata) error { - return runInstallStep(m, terraform.AssetsStep) -} - -func installInfraStep(m *metadata) error { - return runInstallStep(m, terraform.InfraStep) -} - -func runInstallStep(m *metadata, step string, extraArgs ...string) error { - dir, err := terraform.BaseLocation() - if err != nil { - return err - } - templateDir, err := terraform.FindStepTemplates(dir, step, m.cluster.Platform) - if err != nil { - return err - } - if err := terraform.Init(m.clusterDir, templateDir); err != nil { - return err - } - _, err = terraform.Apply(m.clusterDir, step, templateDir, extraArgs...) - return err -} - -func generateIgnConfigStep(m *metadata) error { - c := configgenerator.New(m.cluster) - masterIgns, workerIgn, err := c.GenerateIgnConfig(m.clusterDir) - if err != nil { - return fmt.Errorf("failed to generate ignition configs: %v", err) - } - - terraformVariablesFilePath := filepath.Join(m.clusterDir, terraformVariablesFileName) - data, err := ioutil.ReadFile(terraformVariablesFilePath) - if err != nil { - return fmt.Errorf("failed to read terraform.tfvars: %v", err) - } - - var cluster config.Cluster - if err := json.Unmarshal(data, &cluster); err != nil { - return fmt.Errorf("failed to unmarshal terraform.tfvars: %v", err) - } - - cluster.IgnitionMasters = masterIgns - cluster.IgnitionWorker = workerIgn - - data, err = json.MarshalIndent(&cluster, "", " ") - if err != nil { - return fmt.Errorf("failed to marshal terraform.tfvars: %v", err) - } - - return ioutil.WriteFile(terraformVariablesFilePath, data, 0666) -} - -func generateTLSConfigStep(m *metadata) error { - if err := os.MkdirAll(filepath.Join(m.clusterDir, tlsPath), os.ModeDir|0755); err != nil { - return fmt.Errorf("failed to create TLS directory at %s", tlsPath) - } - - c := configgenerator.New(m.cluster) - return c.GenerateTLSConfig(m.clusterDir) -} diff --git a/installer/pkg/workflow/utils.go b/installer/pkg/workflow/utils.go deleted file mode 100644 index b0345bf656e..00000000000 --- a/installer/pkg/workflow/utils.go +++ /dev/null @@ -1,102 +0,0 @@ -package workflow - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - configgenerator "github.com/openshift/installer/installer/pkg/config-generator" - "github.com/openshift/installer/pkg/types/config" -) - -const ( - configFileName = "config.yaml" - internalFileName = "internal.yaml" -) - -func generateClusterConfigMaps(m *metadata) error { - clusterGeneratedPath := filepath.Join(m.clusterDir, generatedPath) - if err := os.MkdirAll(clusterGeneratedPath, os.ModeDir|0755); err != nil { - return fmt.Errorf("failed to create cluster generated directory at %s", clusterGeneratedPath) - } - - configGenerator := configgenerator.New(m.cluster) - - kcoConfig, err := configGenerator.CoreConfig() - if err != nil { - return err - } - - kcoConfigFilePath := filepath.Join(clusterGeneratedPath, kcoConfigFileName) - if err := ioutil.WriteFile(kcoConfigFilePath, []byte(kcoConfig), 0666); err != nil { - return err - } - - kubeSystem, err := configGenerator.KubeSystem(m.clusterDir) - if err != nil { - return err - } - - kubePath := filepath.Join(m.clusterDir, kubeSystemPath) - if err := os.MkdirAll(kubePath, os.ModeDir|0755); err != nil { - return fmt.Errorf("failed to create manifests directory at %s", kubePath) - } - - kubeSystemConfigFilePath := filepath.Join(kubePath, kubeSystemFileName) - if err := ioutil.WriteFile(kubeSystemConfigFilePath, []byte(kubeSystem), 0666); err != nil { - return err - } - - tectonicSystem, err := configGenerator.TectonicSystem() - if err != nil { - return err - } - - tectonicPath := filepath.Join(m.clusterDir, tectonicSystemPath) - if err := os.MkdirAll(tectonicPath, os.ModeDir|0755); err != nil { - return fmt.Errorf("failed to create tectonic directory at %s", tectonicPath) - } - - tectonicSystemConfigFilePath := filepath.Join(tectonicPath, tectonicSystemFileName) - return ioutil.WriteFile(tectonicSystemConfigFilePath, []byte(tectonicSystem), 0666) -} - -func readClusterConfig(configFilePath string, internalFilePath string) (*config.Cluster, error) { - cfg, err := config.ParseConfigFile(configFilePath) - if err != nil { - return nil, fmt.Errorf("%s is not a valid config file: %s", configFilePath, err) - } - - if internalFilePath != "" { - internal, err := config.ParseInternalFile(internalFilePath) - if err != nil { - return nil, fmt.Errorf("%s is not a valid internal file: %s", internalFilePath, err) - } - cfg.Internal = *internal - } - - return cfg, nil -} - -func readClusterConfigStep(m *metadata) error { - if m.clusterDir == "" { - return errors.New("no cluster dir given for reading config") - } - configFilePath := filepath.Join(m.clusterDir, configFileName) - internalFilePath := filepath.Join(m.clusterDir, internalFileName) - - cluster, err := readClusterConfig(configFilePath, internalFilePath) - if err != nil { - return err - } - - if err := cluster.ValidateAndLog(); err != nil { - return err - } - - m.cluster = *cluster - - return nil -} diff --git a/installer/pkg/workflow/workflow.go b/installer/pkg/workflow/workflow.go deleted file mode 100644 index f700533b8c8..00000000000 --- a/installer/pkg/workflow/workflow.go +++ /dev/null @@ -1,49 +0,0 @@ -package workflow - -import ( - "github.com/openshift/installer/pkg/types/config" - log "github.com/sirupsen/logrus" -) - -// metadata is the state store of the current workflow execution. -// It is meant to carry state for one step to another. -// When creating a new workflow, initial state from external parameters -// is also injected by when initializing the metadata object. -// Steps take their inputs from the metadata object and persist -// results onto it for later consumption. -type metadata struct { - cluster config.Cluster - configFilePath string - clusterDir string - contOnErr bool -} - -// step is the entrypoint of a workflow step implementation. -// To add a new step, put your logic in a function that matches this signature. -// Next, add a reference to this new function in a Workflow's steps list. -type step func(*metadata) error - -// Workflow is a high-level representation -// of a set of actions performed in a predictable order. -type Workflow struct { - metadata metadata - steps []step -} - -// Execute runs all steps in order. -func (w Workflow) Execute() error { - var firstError error - for _, step := range w.steps { - if err := step(&w.metadata); err != nil { - if !w.metadata.contOnErr { - return err - } - if firstError == nil { - firstError = err - } - log.Warn(err) - } - } - - return firstError -} diff --git a/installer/pkg/workflow/workflow_test.go b/installer/pkg/workflow/workflow_test.go deleted file mode 100644 index ad02879712f..00000000000 --- a/installer/pkg/workflow/workflow_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package workflow - -import ( - "errors" - "testing" -) - -func test1Step(m *metadata) error { - return nil -} - -func test2Step(m *metadata) error { - return nil -} - -func test3Step(m *metadata) error { - return errors.New("step failed") -} - -func TestWorkflowTypeExecute(t *testing.T) { - m := metadata{} - - testCases := []struct { - test string - steps []step - m metadata - expectedError bool - }{ - { - test: "All steps succeed", - steps: []step{test1Step, test2Step}, - m: m, - expectedError: false, - }, - { - test: "At least one step fails", - steps: []step{test1Step, test2Step, test3Step}, - m: m, - expectedError: true, - }, - } - - for _, tc := range testCases { - wf := Workflow{ - metadata: tc.m, - steps: tc.steps, - } - err := wf.Execute() - if (err != nil) != tc.expectedError { - t.Errorf("Test case %s: WorkflowType.Execute() expected error: %v, got: %v", tc.test, tc.expectedError, (err != nil)) - } - } -}