diff --git a/examples/tectonic.libvirt.yaml b/examples/tectonic.libvirt.yaml index bc3d489fa7b..33a06c32659 100644 --- a/examples/tectonic.libvirt.yaml +++ b/examples/tectonic.libvirt.yaml @@ -11,7 +11,10 @@ admin: baseDomain: libvirt: - uri: qemu:///system + # You must specify an IP address here that libvirtd is listening on, + # and that the cluster-api controller pod will be able to connect + # to. Often 192.168.122.1 is the default for the virbr0 interface. + uri: qemu+tcp://192.168.122.1/system network: name: tectonic ifName: tt0 diff --git a/installer/pkg/config-generator/BUILD.bazel b/installer/pkg/config-generator/BUILD.bazel index 500505f48fa..689171cbb38 100644 --- a/installer/pkg/config-generator/BUILD.bazel +++ b/installer/pkg/config-generator/BUILD.bazel @@ -10,6 +10,7 @@ go_library( importpath = "github.com/openshift/installer/installer/pkg/config-generator", visibility = ["//visibility:public"], deps = [ + "//pkg/rhcos:go_default_library", "//installer/pkg/config:go_default_library", "//installer/pkg/copy:go_default_library", "//pkg/asset/tls:go_default_library", diff --git a/installer/pkg/config-generator/fixtures/generated/tls/aggregator-ca.crt b/installer/pkg/config-generator/fixtures/generated/tls/aggregator-ca.crt new file mode 100644 index 00000000000..27630f2cc4b --- /dev/null +++ b/installer/pkg/config-generator/fixtures/generated/tls/aggregator-ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDMzCCAhugAwIBAgIISI9lAF1J/fMwDQYJKoZIhvcNAQELBQAwJjESMBAGA1UE +CxMJb3BlbnNoaWZ0MRAwDgYDVQQDEwdyb290LWNhMB4XDTE4MDkwNzIxNTIyMVoX +DTI4MDkwNDIxNTIyMlowKDERMA8GA1UECxMIYm9vdGt1YmUxEzARBgNVBAMTCmFn +Z3JlZ2F0b3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNm3Hre4oy ++J31X3i4dxy8hejzBISY8TSeoXMP9AZczQM1oyekSgf5EMSfh9DrDcKmPuTMhl6a +w6ZSjPDbrFlKrF2oIpq04Zn160i95QLC7zI8WzU/cNDS22J9pk8k9K47c/hZKRrd +AjT0rNI4qpVsDv43O1H2s5M6HiXB62rwakEALoATeufaPJEcAgP31nH9FhHft3uO +QkOur6iuXBDtv/FtPEFhmR5rDBYvUxaKXfB5c+TCevTZtjmP2bdRrvyHdWWapPtJ +auoGcV5s4Skp3tcEy24Qrl8FxR+Rsy7V+eYUPIWG1mDugmwpgq4SrB9idGoc/jqu +p6oDKRZUpxZdAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBRFMWpOb15dmp4aVFFdPtY/lrgkGTAfBgNVHSMEGDAWgBRF +MWpOb15dmp4aVFFdPtY/lrgkGTANBgkqhkiG9w0BAQsFAAOCAQEAjzLnsIZ+Jwqr +8h4TucqabLYkW1b/mk3k2GDg46xTOGpJo8UGesboclDxK/rtmDObgMLh2I9ekkPY +Yr1QNE5W4vqDOsmBm2MkqDVUg/EuOdSiNmWcJs/du452AVtk80yXTSaBQuW85SAe +AlG2CXadwTkxtLypLsUeOriKDRCV3hLCwd8lXwAHsjoU5aBLgin7lsXItoiM19LP +eJ06zH3FaOc4Kowf8JllJXel414DBsP8nX23snETIotxPXFol9xQIkjHCWaxKQSc +FWlmnA2exJprQHrt28C5W9x6odc27zKxS2D06IzETE4BtinwYhepb7P/qTAo+MX5 +FYZx86N7eg== +-----END CERTIFICATE----- diff --git a/installer/pkg/config-generator/fixtures/kube-system.yaml b/installer/pkg/config-generator/fixtures/kube-system.yaml index 42b45050bff..0652cef9d5a 100644 --- a/installer/pkg/config-generator/fixtures/kube-system.yaml +++ b/installer/pkg/config-generator/fixtures/kube-system.yaml @@ -62,6 +62,40 @@ data: service_cidr: 10.3.0.0/16 routingConfig: subdomain: test.cluster.com + mao-config: | + apiServiceCA: | + -----BEGIN CERTIFICATE----- + MIIDMzCCAhugAwIBAgIISI9lAF1J/fMwDQYJKoZIhvcNAQELBQAwJjESMBAGA1UE + CxMJb3BlbnNoaWZ0MRAwDgYDVQQDEwdyb290LWNhMB4XDTE4MDkwNzIxNTIyMVoX + DTI4MDkwNDIxNTIyMlowKDERMA8GA1UECxMIYm9vdGt1YmUxEzARBgNVBAMTCmFn + Z3JlZ2F0b3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNm3Hre4oy + +J31X3i4dxy8hejzBISY8TSeoXMP9AZczQM1oyekSgf5EMSfh9DrDcKmPuTMhl6a + w6ZSjPDbrFlKrF2oIpq04Zn160i95QLC7zI8WzU/cNDS22J9pk8k9K47c/hZKRrd + AjT0rNI4qpVsDv43O1H2s5M6HiXB62rwakEALoATeufaPJEcAgP31nH9FhHft3uO + QkOur6iuXBDtv/FtPEFhmR5rDBYvUxaKXfB5c+TCevTZtjmP2bdRrvyHdWWapPtJ + auoGcV5s4Skp3tcEy24Qrl8FxR+Rsy7V+eYUPIWG1mDugmwpgq4SrB9idGoc/jqu + p6oDKRZUpxZdAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTAD + AQH/MB0GA1UdDgQWBBRFMWpOb15dmp4aVFFdPtY/lrgkGTAfBgNVHSMEGDAWgBRF + MWpOb15dmp4aVFFdPtY/lrgkGTANBgkqhkiG9w0BAQsFAAOCAQEAjzLnsIZ+Jwqr + 8h4TucqabLYkW1b/mk3k2GDg46xTOGpJo8UGesboclDxK/rtmDObgMLh2I9ekkPY + Yr1QNE5W4vqDOsmBm2MkqDVUg/EuOdSiNmWcJs/du452AVtk80yXTSaBQuW85SAe + AlG2CXadwTkxtLypLsUeOriKDRCV3hLCwd8lXwAHsjoU5aBLgin7lsXItoiM19LP + eJ06zH3FaOc4Kowf8JllJXel414DBsP8nX23snETIotxPXFol9xQIkjHCWaxKQSc + FWlmnA2exJprQHrt28C5W9x6odc27zKxS2D06IzETE4BtinwYhepb7P/qTAo+MX5 + FYZx86N7eg== + -----END CERTIFICATE----- + apiVersion: v1 + aws: + availabilityZone: "" + clusterID: "" + clusterName: test + image: ami-07307c397daf4d02e + region: us-east-1 + replicas: 3 + kind: machineAPIOperatorConfig + libvirt: null + provider: aws + targetNamespace: openshift-cluster-api network-config: | apiVersion: v1 calicoConfig: diff --git a/installer/pkg/config-generator/generator.go b/installer/pkg/config-generator/generator.go index 9946d4b7e3d..513bbc472eb 100644 --- a/installer/pkg/config-generator/generator.go +++ b/installer/pkg/config-generator/generator.go @@ -6,7 +6,9 @@ import ( "encoding/hex" "errors" "fmt" + "io/ioutil" "net" + "path/filepath" "strings" "github.com/apparentlymart/go-cidr/cidr" @@ -19,6 +21,7 @@ import ( "github.com/openshift/installer/installer/pkg/config" "github.com/openshift/installer/pkg/ipnet" + "github.com/openshift/installer/pkg/rhcos" "github.com/openshift/installer/pkg/types" ) @@ -32,6 +35,7 @@ const ( ingressConfigIngressKind = "haproxy-router" certificatesStrategy = "userProvidedCA" identityAPIService = "tectonic-identity-api.tectonic-system.svc.cluster.local" + maoTargetNamespace = "openshift-cluster-api" ) // ConfigGenerator defines the cluster config generation for a cluster. @@ -62,8 +66,92 @@ func New(cluster config.Cluster) ConfigGenerator { } } +// maoOperatorConfig contains configuration for mao managed stack +// TODO(enxebre): move up to "github.com/coreos/tectonic-config +type maoOperatorConfig struct { + metav1.TypeMeta `json:",inline"` + TargetNamespace string `json:"targetNamespace"` + APIServiceCA string `json:"apiServiceCA"` + Provider string `json:"provider"` + AWS *awsConfig `json:"aws"` + Libvirt *libvirtConfig `json:"libvirt"` +} + +type libvirtConfig struct { + ClusterName string `json:"clusterName"` + URI string `json:"uri"` + NetworkName string `json:"networkName"` + IPRange string `json:"iprange"` + Replicas int `json:"replicas"` +} + +type awsConfig struct { + ClusterName string `json:"clusterName"` + ClusterID string `json:"clusterID"` + Region string `json:"region"` + AvailabilityZone string `json:"availabilityZone"` + Image string `json:"image"` + Replicas int `json:"replicas"` +} + +func (c *ConfigGenerator) maoConfig(clusterDir string) (*maoOperatorConfig, error) { + cfg := maoOperatorConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "machineAPIOperatorConfig", + }, + + TargetNamespace: maoTargetNamespace, + } + + ca, err := ioutil.ReadFile(filepath.Join(clusterDir, aggregatorCACertPath)) + if err != nil { + return nil, fmt.Errorf("could not read aggregator CA: %v", err) + } + + cfg.APIServiceCA = string(ca) + cfg.Provider = tectonicCloudProvider(c.Platform) + + switch c.Platform { + case config.PlatformAWS: + var ami string + + if c.AWS.EC2AMIOverride != "" { + ami = c.AWS.EC2AMIOverride + } else { + ami, err = rhcos.AMI(config.DefaultChannel, c.Region) + if err != nil { + return nil, fmt.Errorf("failed to lookup RHCOS AMI: %v", err) + } + } + + cfg.AWS = &awsConfig{ + ClusterName: c.Name, + ClusterID: c.ClusterID, + Region: c.Region, + AvailabilityZone: "", + Image: ami, + Replicas: c.NodeCount(c.Worker.NodePools), + } + + case config.PlatformLibvirt: + cfg.Libvirt = &libvirtConfig{ + ClusterName: c.Name, + URI: c.Libvirt.URI, + NetworkName: c.Libvirt.Network.Name, + IPRange: c.Libvirt.IPRange, + Replicas: c.NodeCount(c.Worker.NodePools), + } + + default: + return nil, fmt.Errorf("unknown provider for machine-api-operator: %v", cfg.Provider) + } + + return &cfg, nil +} + // KubeSystem returns, if successful, a yaml string for the kube-system. -func (c *ConfigGenerator) KubeSystem() (string, error) { +func (c *ConfigGenerator) KubeSystem(clusterDir string) (string, error) { coreConfig, err := c.coreConfig() if err != nil { return "", err @@ -72,11 +160,16 @@ func (c *ConfigGenerator) KubeSystem() (string, error) { if err != nil { return "", err } + maoConfig, err := c.maoConfig(clusterDir) + if err != nil { + return "", err + } return configMap("kube-system", genericData{ "kco-config": coreConfig, "network-config": c.networkConfig(), "install-config": installConfig, + "mao-config": maoConfig, }) } diff --git a/installer/pkg/config-generator/generator_test.go b/installer/pkg/config-generator/generator_test.go index 88d4d8eab67..f55f7b9fd6c 100644 --- a/installer/pkg/config-generator/generator_test.go +++ b/installer/pkg/config-generator/generator_test.go @@ -73,7 +73,7 @@ func TestGetEtcdServersURLs(t *testing.T) { func TestKubeSystem(t *testing.T) { config := initConfig(t, "test-aws.yaml") - got, err := config.KubeSystem() + got, err := config.KubeSystem("./fixtures") if err != nil { t.Errorf("Test case TestKubeSystem: failed to get KubeSystem(): %s", err) } diff --git a/installer/pkg/config-generator/tls.go b/installer/pkg/config-generator/tls.go index f328995c424..48130fd1b07 100644 --- a/installer/pkg/config-generator/tls.go +++ b/installer/pkg/config-generator/tls.go @@ -255,13 +255,21 @@ func (c *ConfigGenerator) GenerateTLSConfig(clusterDir string) error { // Cluster API cert cfg = &tls.CertCfg{ - Subject: pkix.Name{CommonName: "cluster-apiserver", OrganizationalUnit: []string{"bootkube"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: tls.ValidityTenYears, - IsCA: true, + KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + Subject: pkix.Name{CommonName: "clusterapi", OrganizationalUnit: []string{"bootkube"}}, + DNSNames: []string{ + "clusterapi", + fmt.Sprintf("clusterapi.%s", maoTargetNamespace), + fmt.Sprintf("clusterapi.%s.svc", maoTargetNamespace), + fmt.Sprintf("clusterapi.%s.svc.cluster.local", maoTargetNamespace), + }, + Validity: tls.ValidityTenYears, + IsCA: false, } + if _, _, err := generateCert(clusterDir, aggregatorCAKey, aggregatorCACert, clusterAPIServerKeyPath, clusterAPIServerCertPath, cfg, true); err != nil { - return fmt.Errorf("failed to generate cluster-apiserver CA: %v", err) + return fmt.Errorf("failed to generate cluster-apiserver certificate: %v", err) } // Service Account private and public key. diff --git a/installer/pkg/workflow/BUILD.bazel b/installer/pkg/workflow/BUILD.bazel index 66b002c067b..6fe008d7c0e 100644 --- a/installer/pkg/workflow/BUILD.bazel +++ b/installer/pkg/workflow/BUILD.bazel @@ -21,6 +21,8 @@ go_library( "//vendor/github.com/Sirupsen/logrus:go_default_library", "//vendor/gopkg.in/yaml.v2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset:go_default_library", ], diff --git a/installer/pkg/workflow/destroy.go b/installer/pkg/workflow/destroy.go index 188c7a8593b..c4193e542e6 100644 --- a/installer/pkg/workflow/destroy.go +++ b/installer/pkg/workflow/destroy.go @@ -1,5 +1,24 @@ package workflow +import ( + "encoding/json" + "fmt" + "path/filepath" + "time" + + log "github.com/Sirupsen/logrus" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" +) + +const ( + machineSetNamespace = "openshift-cluster-api" + workerMachineSet = "worker" +) + // DestroyWorkflow creates new instances of the 'destroy' workflow, // responsible for running the actions required to remove resources // of an existing cluster and clean up any remaining artefacts. @@ -10,6 +29,7 @@ func DestroyWorkflow(clusterDir string) Workflow { readClusterConfigStep, generateTerraformVariablesStep, destroyBootstrapStep, + destroyWorkersStep, destroyInfraStep, destroyAssetsStep, }, @@ -28,6 +48,86 @@ func destroyBootstrapStep(m *metadata) error { return runDestroyStep(m, bootstrapStep) } +func destroyWorkersStep(m *metadata) error { + kubeconfig := filepath.Join(m.clusterDir, generatedPath, "auth", "kubeconfig") + + client, err := buildClusterClient(kubeconfig) + if err != nil { + return fmt.Errorf("failed to build cluster-api client: %v", err) + } + + if err := scaleDownWorkers(client); err != nil { + return fmt.Errorf("failed to scale worker MachineSet: %v", err) + } + + if err := waitForWorkers(client); err != nil { + return fmt.Errorf("worker MachineSet failed to scale down: %v", err) + } + + if err := deleteWorkerMachineSet(client); err != nil { + return fmt.Errorf("failed to delete worker MachineSet: %v", err) + } + + return nil +} + +func scaleDownWorkers(client *clientset.Clientset) error { + // Unfortunately, MachineSets don't yet support the scale + // subresource. So we have to patch the object to set the + // replicas to zero. + patch := []struct { + Op string `json:"op"` + Path string `json:"path"` + Value uint32 `json:"value"` + }{{ + Op: "replace", + Path: "/spec/replicas", + Value: 0, + }} + + patchBytes, err := json.Marshal(patch) + if err != nil { + return err + } + + _, err = client.ClusterV1alpha1(). + MachineSets(machineSetNamespace). + Patch(workerMachineSet, types.JSONPatchType, patchBytes) + + return err +} + +func waitForWorkers(client *clientset.Clientset) error { + interval := 3 * time.Second + timeout := 60 * time.Second + + log.Info("Waiting for worker MachineSet to scale down...") + + err := wait.PollImmediate(interval, timeout, func() (bool, error) { + machineSet, err := client.ClusterV1alpha1(). + MachineSets(machineSetNamespace). + Get(workerMachineSet, v1.GetOptions{}) + + if err != nil { + return false, err + } + + if machineSet.Status.Replicas > 0 { + return false, nil + } + + return true, nil + }) + + return err +} + +func deleteWorkerMachineSet(client *clientset.Clientset) error { + return client.ClusterV1alpha1(). + MachineSets(machineSetNamespace). + Delete(workerMachineSet, &v1.DeleteOptions{}) +} + func runDestroyStep(m *metadata, step string, extraArgs ...string) error { if !hasStateFile(m.clusterDir, step) { // there is no statefile, therefore nothing to destroy for this step @@ -40,3 +140,17 @@ func runDestroyStep(m *metadata, step string, extraArgs ...string) error { return tfDestroy(m.clusterDir, step, templateDir, extraArgs...) } + +func buildClusterClient(kubeconfig string) (*clientset.Clientset, error) { + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, fmt.Errorf("failed to build config: %v", err) + } + + client, err := clientset.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("failed to build client: %v", err) + } + + return client, nil +} diff --git a/installer/pkg/workflow/init.go b/installer/pkg/workflow/init.go index 8145ef58736..d79a2aa58ac 100644 --- a/installer/pkg/workflow/init.go +++ b/installer/pkg/workflow/init.go @@ -18,6 +18,7 @@ const ( generatedPath = "generated" kcoConfigFileName = "kco-config.yaml" tncoConfigFileName = "tnco-config.yaml" + maoConfigFileName = "mao-config.yaml" kubeSystemPath = "generated/manifests" kubeSystemFileName = "cluster-config.yaml" tectonicSystemPath = "generated/tectonic" diff --git a/installer/pkg/workflow/install.go b/installer/pkg/workflow/install.go index 40de68ff72e..f186686907b 100644 --- a/installer/pkg/workflow/install.go +++ b/installer/pkg/workflow/install.go @@ -18,8 +18,8 @@ func InstallWorkflow(clusterDir string) Workflow { generateTerraformVariablesStep, generateTLSConfigStep, generateClusterConfigMaps, - installAssetsStep, generateIgnConfigStep, + installAssetsStep, installInfraStep, installBootstrapStep, }, diff --git a/installer/pkg/workflow/utils.go b/installer/pkg/workflow/utils.go index 80f8896143b..bc51fe26b34 100644 --- a/installer/pkg/workflow/utils.go +++ b/installer/pkg/workflow/utils.go @@ -68,7 +68,7 @@ func generateClusterConfigMaps(m *metadata) error { return err } - kubeSystem, err := configGenerator.KubeSystem() + kubeSystem, err := configGenerator.KubeSystem(m.clusterDir) if err != nil { return err } diff --git a/modules/aws/master/main.tf b/modules/aws/master/main.tf index b91968be81d..c0bc084e456 100644 --- a/modules/aws/master/main.tf +++ b/modules/aws/master/main.tf @@ -48,19 +48,14 @@ resource "aws_iam_role_policy" "master_policy" { "Version": "2012-10-17", "Statement": [ { - "Action": "ec2:Describe*", + "Action": "ec2:*", "Resource": "*", "Effect": "Allow" }, { - "Effect": "Allow", - "Action": "ec2:AttachVolume", - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": "ec2:DetachVolume", - "Resource": "*" + "Action": "iam:PassRole", + "Resource": "*", + "Effect": "Allow" }, { "Action" : [ diff --git a/modules/aws/worker/main.tf b/modules/aws/worker/main.tf deleted file mode 100644 index 185d433a1ab..00000000000 --- a/modules/aws/worker/main.tf +++ /dev/null @@ -1,122 +0,0 @@ -locals { - arn = "aws" -} - -resource "aws_iam_instance_profile" "worker" { - name = "${var.cluster_name}-worker-profile" - - role = "${var.worker_iam_role == "" ? - join("|", aws_iam_role.worker_role.*.name) : - join("|", data.aws_iam_role.worker_role.*.name) - }" -} - -data "aws_iam_role" "worker_role" { - count = "${var.worker_iam_role == "" ? 0 : 1}" - name = "${var.worker_iam_role}" -} - -resource "aws_iam_role" "worker_role" { - count = "${var.worker_iam_role == "" ? 1 : 0}" - name = "${var.cluster_name}-worker-role" - path = "/" - - assume_role_policy = <