Skip to content

Commit

Permalink
Add feature: Custom IAM Instance Profile
Browse files Browse the repository at this point in the history
This way Cluster IAM roles can be managed externally, either manually,
using cloudformation or any other tool.
  • Loading branch information
sp-borja-juncosa authored and chrislovecnm committed Oct 1, 2017
1 parent bd4ba4d commit e88c6f5
Show file tree
Hide file tree
Showing 20 changed files with 1,081 additions and 163 deletions.
61 changes: 34 additions & 27 deletions cmd/kops/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,26 +47,24 @@ import (

// TestMinimal runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
func TestMinimal(t *testing.T) {
runTestAWS(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha0", false, 1)
runTestAWS(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha1", false, 1)
runTestAWS(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha2", false, 1)
runTestAWS(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha0", false, 1, true)
runTestAWS(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha1", false, 1, true)
runTestAWS(t, "minimal.example.com", "../../tests/integration/minimal", "v1alpha2", false, 1, true)
}

// TestHA runs the test on a simple HA configuration, similar to kops create cluster minimal.example.com --zones us-west-1a,us-west-1b,us-west-1c --master-count=3
func TestHA(t *testing.T) {
runTestAWS(t, "ha.example.com", "../../tests/integration/ha", "v1alpha1", false, 3)
runTestAWS(t, "ha.example.com", "../../tests/integration/ha", "v1alpha2", false, 3)
runTestAWS(t, "ha.example.com", "../../tests/integration/ha", "v1alpha1", false, 3, true)
runTestAWS(t, "ha.example.com", "../../tests/integration/ha", "v1alpha2", false, 3, true)
}

// TestHighAvailabilityGCE runs the test on a simple HA GCE configuration, similar to kops create cluster ha-gce.example.com
// --zones us-test1-a,us-test1-b,us-test1-c --master-count=3
func TestHighAvailabilityGCE(t *testing.T) {
runTestGCE(t, "ha-gce.example.com", "../../tests/integration/ha_gce", "v1alpha2", false, 3)
}

// TestComplex runs the test on a more complex configuration, intended to hit more of the edge cases
func TestComplex(t *testing.T) {
runTestAWS(t, "complex.example.com", "../../tests/integration/complex", "v1alpha2", false, 1)
runTestAWS(t, "complex.example.com", "../../tests/integration/complex", "v1alpha2", false, 1, true)
}

// TestMinimalCloudformation runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
Expand All @@ -78,56 +76,62 @@ func TestMinimalCloudformation(t *testing.T) {

// TestMinimal_141 runs the test on a configuration from 1.4.1 release
func TestMinimal_141(t *testing.T) {
runTestAWS(t, "minimal-141.example.com", "../../tests/integration/minimal-141", "v1alpha0", false, 1)
runTestAWS(t, "minimal-141.example.com", "../../tests/integration/minimal-141", "v1alpha0", false, 1, true)
}

// TestPrivateWeave runs the test on a configuration with private topology, weave networking
func TestPrivateWeave(t *testing.T) {
runTestAWS(t, "privateweave.example.com", "../../tests/integration/privateweave", "v1alpha1", true, 1)
runTestAWS(t, "privateweave.example.com", "../../tests/integration/privateweave", "v1alpha2", true, 1)
runTestAWS(t, "privateweave.example.com", "../../tests/integration/privateweave", "v1alpha1", true, 1, true)
runTestAWS(t, "privateweave.example.com", "../../tests/integration/privateweave", "v1alpha2", true, 1, true)
}

// TestPrivateFlannel runs the test on a configuration with private topology, flannel networking
func TestPrivateFlannel(t *testing.T) {
runTestAWS(t, "privateflannel.example.com", "../../tests/integration/privateflannel", "v1alpha1", true, 1)
runTestAWS(t, "privateflannel.example.com", "../../tests/integration/privateflannel", "v1alpha2", true, 1)
runTestAWS(t, "privateflannel.example.com", "../../tests/integration/privateflannel", "v1alpha1", true, 1, true)
runTestAWS(t, "privateflannel.example.com", "../../tests/integration/privateflannel", "v1alpha2", true, 1, true)
}

// TestPrivateCalico runs the test on a configuration with private topology, calico networking
func TestPrivateCalico(t *testing.T) {
runTestAWS(t, "privatecalico.example.com", "../../tests/integration/privatecalico", "v1alpha1", true, 1)
runTestAWS(t, "privatecalico.example.com", "../../tests/integration/privatecalico", "v1alpha2", true, 1)
runTestAWS(t, "privatecalico.example.com", "../../tests/integration/privatecalico", "v1alpha1", true, 1, true)
runTestAWS(t, "privatecalico.example.com", "../../tests/integration/privatecalico", "v1alpha2", true, 1, true)
}

// TestPrivateCanal runs the test on a configuration with private topology, canal networking
func TestPrivateCanal(t *testing.T) {
runTestAWS(t, "privatecanal.example.com", "../../tests/integration/privatecanal", "v1alpha1", true, 1)
runTestAWS(t, "privatecanal.example.com", "../../tests/integration/privatecanal", "v1alpha2", true, 1)
runTestAWS(t, "privatecanal.example.com", "../../tests/integration/privatecanal", "v1alpha1", true, 1, true)
runTestAWS(t, "privatecanal.example.com", "../../tests/integration/privatecanal", "v1alpha2", true, 1, true)
}

// TestPrivateKopeio runs the test on a configuration with private topology, kopeio networking
func TestPrivateKopeio(t *testing.T) {
runTestAWS(t, "privatekopeio.example.com", "../../tests/integration/privatekopeio", "v1alpha2", true, 1)
runTestAWS(t, "privatekopeio.example.com", "../../tests/integration/privatekopeio", "v1alpha2", true, 1, true)
}

// TestPrivateDns1 runs the test on a configuration with private topology, private dns
func TestPrivateDns1(t *testing.T) {
runTestAWS(t, "privatedns1.example.com", "../../tests/integration/privatedns1", "v1alpha2", true, 1)
runTestAWS(t, "privatedns1.example.com", "../../tests/integration/privatedns1", "v1alpha2", true, 1, true)
}

// TestPrivateDns2 runs the test on a configuration with private topology, private dns, extant vpc
func TestPrivateDns2(t *testing.T) {
runTestAWS(t, "privatedns2.example.com", "../../tests/integration/privatedns2", "v1alpha2", true, 1)
runTestAWS(t, "privatedns2.example.com", "../../tests/integration/privatedns2", "v1alpha2", true, 1, true)
}

// TestSharedSubnet runs the test on a configuration with a shared subnet (and VPC)
func TestSharedSubnet(t *testing.T) {
runTestAWS(t, "sharedsubnet.example.com", "../../tests/integration/shared_subnet", "v1alpha2", false, 1)
runTestAWS(t, "sharedsubnet.example.com", "../../tests/integration/shared_subnet", "v1alpha2", false, 1, true)
}

// TestSharedVPC runs the test on a configuration with a shared VPC
func TestSharedVPC(t *testing.T) {
runTestAWS(t, "sharedvpc.example.com", "../../tests/integration/shared_vpc", "v1alpha2", false, 1)
runTestAWS(t, "sharedvpc.example.com", "../../tests/integration/shared_vpc", "v1alpha2", false, 1, true)
}

// TestCreateClusterCustomAuthProfile runs kops create cluster custom_iam_role.example.com --zones us-test-1a
func TestCreateClusterCustomAuthProfile(t *testing.T) {
featureflag.ParseFlags("+CustomAuthProfileSupport")
runTestAWS(t, "custom-iam-role.example.com", "../../tests/integration/custom_iam_role", "v1alpha2", false, 1, false)
}

func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName string, srcDir string, version string, private bool, zones int, expectedFilenames []string) {
Expand Down Expand Up @@ -236,20 +240,23 @@ func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName stri
}
}

func runTestAWS(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int) {
func runTestAWS(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, expectPolicies bool) {
h := testutils.NewIntegrationTestHarness(t)
defer h.Close()

h.SetupMockAWS()

expectedFilenames := []string{
"aws_iam_role_masters." + clusterName + "_policy",
"aws_iam_role_nodes." + clusterName + "_policy",
"aws_iam_role_policy_masters." + clusterName + "_policy",
"aws_iam_role_policy_nodes." + clusterName + "_policy",
"aws_key_pair_kubernetes." + clusterName + "-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key",
"aws_launch_configuration_nodes." + clusterName + "_user_data",
}
if expectPolicies {
expectedFilenames = append(expectedFilenames,
"aws_iam_role_masters."+clusterName+"_policy",
"aws_iam_role_nodes."+clusterName+"_policy",
"aws_iam_role_policy_masters."+clusterName+"_policy",
"aws_iam_role_policy_nodes."+clusterName+"_policy")
}

for i := 0; i < zones; i++ {
zone := "us-test-1" + string([]byte{byte('a') + byte(i)})
Expand Down
17 changes: 17 additions & 0 deletions docs/cluster_spec.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,23 @@ spec:
dns: {}
```
### authProfile ALPHA SUPPORT
This configuration allows a cluster to utilize existing IAM instance profiles. Currently this configuration only supports aws.
In order to use this feature you have to have to have the instance profile arn of a pre-existing role, and use the kops feature flag by setting
`export KOPS_FEATURE_FLAGS=+CustomAuthProfileSupport`. This feature is in ALPHA release only, and can cause very unusual behavior
with Kubernetes if use incorrectly.

AuthRole example:

```yaml
spec:
authPofile:
master: arn:aws:iam::123417490108:instance-profile/kops-custom-master-role
node: arn:aws:iam::123417490108:instance-profile/kops-custom-node-role
```

### api

When configuring a LoadBalancer, you can also choose to have a public ELB or an internal (VPC only) ELB. The `type`
field should be `Public` or `Internal`.
Expand Down
31 changes: 29 additions & 2 deletions docs/iam_roles.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@

Two IAM roles are created for the cluster: one for the masters, and one for the nodes.

> Work is being done on scoping permissions to the minimum required to setup and maintain cluster.
> Work is being done on scoping permissions to the minimum required to setup and maintain cluster.
> Please note that currently all Pods running on your cluster have access to instance IAM role.
> Consider using projects such as [kube2iam](https://github.com/jtblin/kube2iam) to prevent that.
> Consider using projects such as [kube2iam](https://github.com/jtblin/kube2iam) to prevent that.
Master permissions:

Expand Down Expand Up @@ -136,3 +136,30 @@ You can have an additional policy for each kops role (node, master, bastion). Fo
}
]
```

## Reusing Existing Instance Profile

Sometimes you may need to reuse existing IAM Instance Profiles. You can do this
through the `authProfile` cluster spec API field. This setting is highly advanced
and only enabled via CustomAuthProfileSupport`` feature flag. Setting the wrong role
permissions can impact various components inside of Kubernetes, and cause
unexpected issues. This feature is in place to support the initial documenting and testing the creation of custom roles. Again, use the existing kops functionality, or reach out
if you want to help!

At this point, we do not have a full definition of the fine grain roles. Please refer
[to](https://github.com/kubernetes/kops/issues/1873) for more information.

Please use this feature wisely! Enable the feature flag by:

```console
$ export KOPS_FEATURE_FLAGS="+CustomAuthProfileSupport"
```
Inside the cluster spec define one or two instance profiles specific to the master and
a node.

```yaml
spec:
authPofile:
master: arn:aws:iam::123417490108:instance-profile/kops-custom-master-role
node: arn:aws:iam::123417490108:instance-profile/kops-custom-node-role
```
27 changes: 27 additions & 0 deletions pkg/apis/kops/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,13 @@ type ClusterSpec struct {
// 'external' do not apply updates automatically - they are applied manually or by an external system
// missing: default policy (currently OS security upgrades that do not require a reboot)
UpdatePolicy *string `json:"updatePolicy,omitempty"`

// AutoProfile an existing custom cloud security policy name for the instances.
// Only supported for AWS
AuthProfile *AuthProfile `json:"authProfile,omitempty"`

// Additional policies to add for roles
// Map is keyed by: master, node
AdditionalPolicies *map[string]string `json:"additionalPolicies,omitempty"`
// A collection of files assets for deployed cluster wide
FileAssets []FileAssetSpec `json:"fileAssets,omitempty"`
Expand Down Expand Up @@ -279,6 +285,27 @@ type ExternalDNSConfig struct {
WatchNamespace string `json:"watchNamespace,omitempty"`
}

// AuthProfile are the names of different instance profiles to use for IAM
// At this point only AWS is supported for this option.
// This is a very advanced option, which can really impact a kubernets cluster if not used properly,
// or open security holes as wel. We recommend using kops to construct the profile, or re-using a
// duplicate profile that kops uses. If users are not able to create auth profiles, a user
// with the correct auth can run `kops update` using the iam phase.
type AuthProfile struct {

// Master is the name of the instance profile to use for the master
// Format expected is arn:aws:iam::123456789012:instance-profile/ExampleMasterRole
Master *string `json:"master,omitempty"`

// Node is the name of the instance profile to use for the node
// Format expected is arn:aws:iam::123456789012:instance-profile/ExampleNodeRole
Node *string `json:"node,omitempty"`

// Bastion is the name of the instance profile to use for the bastion
// Format expected is arn:aws:iam::123456789012:instance-profile/ExampleBastionRole
Bastion *string `json:"bastion,omitempty"`
}

// EtcdClusterSpec is the etcd cluster specification
type EtcdClusterSpec struct {
// Name is the name of the etcd cluster (main, events etc)
Expand Down
26 changes: 26 additions & 0 deletions pkg/apis/kops/v1alpha1/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,11 @@ type ClusterSpec struct {
// 'external' do not apply updates automatically - they are applied manually or by an external system
// missing: default policy (currently OS security upgrades that do not require a reboot)
UpdatePolicy *string `json:"updatePolicy,omitempty"`

// AutoProfile an existing custom cloud security policy name for the instances.
// Only supported for AWS
AuthProfile *AuthProfile `json:"authProfile,omitempty"`

// Additional policies to add for roles
AdditionalPolicies *map[string]string `json:"additionalPolicies,omitempty"`
// A collection of files assets for deployed cluster wide
Expand Down Expand Up @@ -278,6 +283,27 @@ type ExternalDNSConfig struct {
WatchNamespace string `json:"watchNamespace,omitempty"`
}

// AuthProfile are the names of different instance profiles to use for IAM
// At this point only AWS is supported for this option.
// This is a very advanced option, which can really impact a kubernets cluster if not used properly,
// or open security holes as wel. We recommend using kops to construct the profile, or re-using a
// duplicate profile that kops uses. If users are not able to create auth profiles, a user
// with the correct auth can run `kops update` using the iam phase.
type AuthProfile struct {

// Master is the name of the instance profile to use for the master
// Format expected is arn:aws:iam::123456789012:instance-profile/ExampleMasterRole
Master *string `json:"master,omitempty"`

// Node is the name of the instance profile to use for the node
// Format expected is arn:aws:iam::123456789012:instance-profile/ExampleNodeRole
Node *string `json:"node,omitempty"`

// Bastion is the name of the instance profile to use for the bastion
// Format expected is arn:aws:iam::123456789012:instance-profile/ExampleBastionRole
Bastion *string `json:"bastion,omitempty"`
}

// EtcdClusterSpec is the etcd cluster specification
type EtcdClusterSpec struct {
// Name is the name of the etcd cluster (main, events etc)
Expand Down
Loading

0 comments on commit e88c6f5

Please sign in to comment.