Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable inter-nodegroup DNS access #418

Merged
merged 3 commits into from
Jan 10, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Gopkg.toml
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ required = [

[[constraint]]
name = "github.com/dlespiau/kube-test-harness"
branch = "kubernetes-1.11"
branch = "master"

[[constraint]]
name = "github.com/awslabs/goformation"
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ integration-test-dev: build ## Run the integration tests without cluster teardow
-eksctl.kubeconfig=$(HOME)/.kube/eksctl/clusters/$(TEST_CLUSTER)

create-integration-test-dev-cluster: build ## Create a test cluster for use when developing integration tests
@./eksctl create cluster --name=integration-test-dev --auto-kubeconfig --nodegroup-name=ng-0
@./eksctl create cluster --name=integration-test-dev --auto-kubeconfig --nodes=1 --nodegroup-name=ng-0

delete-integration-test-dev-cluster: build ## Delete the test cluster for use when developing integration tests
@./eksctl delete cluster --name=integration-test-dev --auto-kubeconfig
Expand Down
123 changes: 82 additions & 41 deletions integration/creategetdelete_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,12 @@ import (
"time"

awseks "github.com/aws/aws-sdk-go/service/eks"
harness "github.com/dlespiau/kube-test-harness"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"

api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha3"

harness "github.com/dlespiau/kube-test-harness"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
Expand Down Expand Up @@ -100,45 +99,6 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() {
Expect(config.CurrentContext).To(ContainSubstring(region))
})

Context("and we create a deployment using kubectl", func() {
var (
err error
test *harness.Test
)

BeforeEach(func() {
test, err = newKubeTest()
Expect(err).ShouldNot(HaveOccurred())
test.CreateNamespace(test.Namespace)
})

AfterEach(func() {
test.Close()
})

It("should deploy the service to the cluster", func() {
d := test.CreateDeploymentFromFile(test.Namespace, "podinfo.yaml")
test.WaitForDeploymentReady(d, 1*time.Minute)

pods := test.ListPodsFromDeployment(d)
Expect(len(pods.Items)).To(Equal(2))

// For each pod of the Deployment, check we receive a sensible response to a
// GET request on /version.
for _, pod := range pods.Items {
Expect(pod.Namespace).To(Equal(test.Namespace))

req := test.PodProxyGet(&pod, "", "/version")
fmt.Fprintf(GinkgoWriter, "url = %#v", req.URL())

var js interface{}
test.PodProxyGetJSON(&pod, "", "/version", &js)

Expect(js.(map[string]interface{})).To(HaveKeyWithValue("version", "1.0.1"))
}
})
})

Context("and listing clusters", func() {
It("should return the previously created cluster", func() {
cmdSession := eksctl("get", "clusters", "--region", region)
Expand Down Expand Up @@ -194,6 +154,61 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() {
Expect(len(nodes.Items)).To(Equal(3))
})

Context("and we create a deployment using kubectl", func() {
var (
err error
test *harness.Test
)

BeforeEach(func() {
test, err = newKubeTest()
Expect(err).ShouldNot(HaveOccurred())
test.CreateNamespace(test.Namespace)
})

AfterEach(func() {
test.Close()
})

It("should deploy the service to the cluster", func() {
d := test.CreateDeploymentFromFile(test.Namespace, "podinfo.yaml")
test.WaitForDeploymentReady(d, 1*time.Minute)

pods := test.ListPodsFromDeployment(d)
Expect(len(pods.Items)).To(Equal(2))

// For each pod of the Deployment, check we receive a sensible response to a
// GET request on /version.
for _, pod := range pods.Items {
Expect(pod.Namespace).To(Equal(test.Namespace))

req := test.PodProxyGet(&pod, "", "/version")
fmt.Fprintf(GinkgoWriter, "url = %#v", req.URL())

var js interface{}
test.PodProxyGetJSON(&pod, "", "/version", &js)

Expect(js.(map[string]interface{})).To(HaveKeyWithValue("version", "1.0.1"))
}
})

It("should have functional DNS", func() {
test, err := newKubeTest()
Expect(err).ShouldNot(HaveOccurred())
defer test.Close()

d := test.CreateDaemonSetFromFile(test.Namespace, "dns-test.yaml")

test.WaitForDaemonSetReady(d, 3*time.Minute)

{
ds, err := test.GetDaemonSet(test.Namespace, d.Name)
Expect(err).ShouldNot(HaveOccurred())
fmt.Fprintf(GinkgoWriter, "ds.Status = %#v", ds.Status)
}
})
})

Context("and delete the second nodegroup", func() {
It("should not return an error", func() {
eksctl("delete", "nodegroup",
Expand All @@ -220,6 +235,32 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() {
})
})

Context("and scale the initial nodegroup back to 1 node", func() {
It("should not return an error", func() {
eksctl("scale", "nodegroup",
"--verbose", "4",
"--cluster", clusterName,
"--region", region,
"--nodes", "1",
"--name", initNG,
)
})

It("should make it 1 nodes total", func() {
test, err := newKubeTest()
Expect(err).ShouldNot(HaveOccurred())
defer test.Close()

test.WaitForNodesReady(1, commonTimeout)

nodes := test.ListNodes((metav1.ListOptions{
LabelSelector: api.NodeGroupNameLabel + "=" + initNG,
}))

Expect(len(nodes.Items)).To(Equal(1))
})
})

Context("and deleting the cluster", func() {
It("should not return an error", func() {
if !doDelete {
Expand Down
31 changes: 31 additions & 0 deletions integration/dns-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
run: dns-test
name: dns-test
spec:
selector:
matchLabels:
run: dns-test
template:
metadata:
creationTimestamp: null
labels:
run: dns-test
spec:
containers:
- image: tutum/dnsutils
name: dns-test
stdin: true
tty: true
readinessProbe:
initialDelaySeconds: 0
periodSeconds: 3
failureThreshold: 3
timeoutSeconds: 1
exec:
command:
- nslookup
- kubernetes.default.svc.cluster.local.

2 changes: 1 addition & 1 deletion pkg/cfn/builder/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
)

const (
totalNodeResources = 11
totalNodeResources = 13
clusterName = "ferocious-mushroom-1532594698"
endpoint = "https://DE37D8AFB23F7275D2361AD6B2599143.yl4.us-west-2.eks.amazonaws.com"
caCert = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNE1EWXdOekExTlRBMU5Wb1hEVEk0TURZd05EQTFOVEExTlZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTWJoCnpvZElYR0drckNSZE1jUmVEN0YvMnB1NFZweTdvd3FEVDgrdk9zeGs2bXFMNWxQd3ZicFhmYkE3R0xzMDVHa0wKaDdqL0ZjcU91cnMwUFZSK3N5REtuQXltdDFORWxGNllGQktSV1dUQ1hNd2lwN1pweW9XMXdoYTlJYUlPUGxCTQpPTEVlckRabFVrVDFVV0dWeVdsMmxPeFgxa2JhV2gvakptWWdkeW5jMXhZZ3kxa2JybmVMSkkwLzVUVTRCajJxClB1emtrYW5Xd3lKbGdXQzhBSXlpWW82WFh2UVZmRzYrM3RISE5XM1F1b3ZoRng2MTFOYnl6RUI3QTdtZGNiNmgKR0ZpWjdOeThHZnFzdjJJSmI2Nk9FVzBSdW9oY1k3UDZPdnZmYnlKREhaU2hqTStRWFkxQXN5b3g4Ri9UelhHSgpQUWpoWUZWWEVhZU1wQmJqNmNFQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFCa2hKRVd4MHk1LzlMSklWdXJ1c1hZbjN6Z2EKRkZ6V0JsQU44WTlqUHB3S2t0Vy9JNFYyUGg3bWY2Z3ZwZ3Jhc2t1Slk1aHZPcDdBQmcxSTFhaHUxNUFpMUI0ZApuMllRaDlOaHdXM2pKMmhuRXk0VElpb0gza2JFdHRnUVB2bWhUQzNEYUJreEpkbmZJSEJCV1RFTTU1czRwRmxUClpzQVJ3aDc1Q3hYbjdScVU0akpKcWNPaTRjeU5qeFVpRDBqR1FaTmNiZWEyMkRCeTJXaEEzUWZnbGNScGtDVGUKRDVPS3NOWlF4MW9MZFAwci9TSmtPT1NPeUdnbVJURTIrODQxN21PRW02Z3RPMCszdWJkbXQ0aENsWEtFTTZYdwpuQWNlK0JxVUNYblVIN2ZNS3p2TDE5UExvMm5KbFU1TnlCbU1nL1pNVHVlUy80eFZmKy94WnpsQ0Q1WT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="
Expand Down
21 changes: 20 additions & 1 deletion pkg/cfn/builder/vpc.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,10 +113,13 @@ func (n *NodeGroupResourceSet) addResourcesForSecurityGroups() {
desc := "worker nodes in group " + n.nodeGroupName

tcp := gfn.NewString("tcp")
udp := gfn.NewString("udp")
allInternalIPv4 := gfn.NewString(n.clusterSpec.VPC.CIDR.String())
anywhereIPv4 := gfn.NewString("0.0.0.0/0")
anywhereIPv6 := gfn.NewString("::/0")
var (
apiPort = gfn.NewInteger(443)
dnsPort = gfn.NewInteger(53)
sshPort = gfn.NewInteger(22)

portZero = gfn.NewInteger(0)
Expand Down Expand Up @@ -193,7 +196,7 @@ func (n *NodeGroupResourceSet) addResourcesForSecurityGroups() {
if n.spec.PrivateNetworking {
n.newResource("SSHIPv4", &gfn.AWSEC2SecurityGroupIngress{
GroupId: refSG,
CidrIp: gfn.NewString(n.clusterSpec.VPC.CIDR.String()),
CidrIp: allInternalIPv4,
Description: gfn.NewString("Allow SSH access to " + desc + " (private, only inside VPC)"),
IpProtocol: tcp,
FromPort: sshPort,
Expand All @@ -218,4 +221,20 @@ func (n *NodeGroupResourceSet) addResourcesForSecurityGroups() {
})
}
}
n.newResource("DNSUDPIPv4", &gfn.AWSEC2SecurityGroupIngress{
GroupId: refSG,
CidrIp: allInternalIPv4,
Description: gfn.NewString("Allow DNS access to " + desc + " inside VPC"),
IpProtocol: udp,
FromPort: dnsPort,
ToPort: dnsPort,
})
n.newResource("DNSTCPIPv4", &gfn.AWSEC2SecurityGroupIngress{
GroupId: refSG,
CidrIp: allInternalIPv4,
Description: gfn.NewString("Allow DNS access to " + desc + " inside VPC"),
IpProtocol: tcp,
FromPort: dnsPort,
ToPort: dnsPort,
})
}
Loading