Skip to content

Commit

Permalink
Merge pull request #548 from pravisankar/remove-kube-dns
Browse files Browse the repository at this point in the history
Remove kube-dns from installer
  • Loading branch information
openshift-merge-robot authored Nov 5, 2018
2 parents 336c87e + 70991dd commit 503d150
Show file tree
Hide file tree
Showing 6 changed files with 2 additions and 265 deletions.
18 changes: 0 additions & 18 deletions pkg/asset/ignition/bootstrap/bootstrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ const (
// template files.
type bootstrapTemplateData struct {
BootkubeImage string
ClusterDNSIP string
EtcdCertSignerImage string
EtcdCluster string
EtcdctlImage string
Expand Down Expand Up @@ -137,10 +136,6 @@ func (a *Bootstrap) Files() []*asset.File {

// getTemplateData returns the data to use to execute bootstrap templates.
func (a *Bootstrap) getTemplateData(installConfig *types.InstallConfig, adminKubeConfig []byte) (*bootstrapTemplateData, error) {
clusterDNSIP, err := installconfig.ClusterDNSIP(installConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to get ClusterDNSIP from InstallConfig")
}
etcdEndpoints := make([]string, installConfig.MasterCount())
for i := range etcdEndpoints {
etcdEndpoints[i] = fmt.Sprintf("https://%s-etcd-%d.%s:2379", installConfig.ObjectMeta.Name, i, installConfig.BaseDomain)
Expand All @@ -153,7 +148,6 @@ func (a *Bootstrap) getTemplateData(installConfig *types.InstallConfig, adminKub
}

return &bootstrapTemplateData{
ClusterDNSIP: clusterDNSIP,
EtcdCertSignerImage: "quay.io/coreos/kube-etcd-signer-server:678cc8e6841e2121ebfdb6e2db568fce290b67d6",
EtcdctlImage: "quay.io/coreos/etcd:v3.2.14",
BootkubeImage: "quay.io/coreos/bootkube:v0.14.0",
Expand Down Expand Up @@ -215,18 +209,6 @@ func (a *Bootstrap) addTemporaryBootkubeFiles(templateData *bootstrapTemplateDat
a.Config.Storage.Files,
ignition.FileFromString(filepath.Join(kubeProxyBootstrapDir, "kube-proxy-kubeconfig.yaml"), 0644, applyTemplateData(content.BootkubeKubeProxyKubeConfig, templateData)),
)

kubeDNSBootstrapDir := filepath.Join(rootDir, "kube-dns-operator-bootstrap")
for name, data := range content.KubeDNSBootkubeManifests {
a.Config.Storage.Files = append(
a.Config.Storage.Files,
ignition.FileFromString(filepath.Join(kubeDNSBootstrapDir, name), 0644, data),
)
}
a.Config.Storage.Files = append(
a.Config.Storage.Files,
ignition.FileFromString(filepath.Join(kubeDNSBootstrapDir, "kube-dns-svc.yaml"), 0644, applyTemplateData(content.BootkubeKubeDNSService, templateData)),
)
}

func (a *Bootstrap) addTectonicFiles(dependencies asset.Parents) {
Expand Down
3 changes: 1 addition & 2 deletions pkg/asset/ignition/bootstrap/content/bootkube.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,10 +123,9 @@ then
cp kube-scheduler-bootstrap/manifests/* manifests/
fi
# TODO: Remove this when kube-proxy and kube-dns are properly rendered by corresponding operators.
# TODO: Remove this when kube-proxy is properly rendered by corresponding operator.
echo "Installing temporary bootstrap manifests..."
cp kube-proxy-operator-bootstrap/* manifests/
cp kube-dns-operator-bootstrap/* manifests/
if [ ! -d mco-bootstrap ]
then
Expand Down
190 changes: 1 addition & 189 deletions pkg/asset/ignition/bootstrap/content/bootkube_temporary.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,39 +11,8 @@ var KubeProxyBootkubeManifests = map[string]string{
"kube-proxy-daemonset.yaml": bootkubeKubeProxyDaemonset,
}

// KubeDNSBootkubeManifests is a map of manifests needed by kube-dns to install.
// TODO: This must move to networking operator renderer.
var KubeDNSBootkubeManifests = map[string]string{
"kube-dns-deployment.yaml": bootkubeKubeDNSDeployment,
}

// BootkubeKubeDNSService is a template for kube-dns service.
// BootkubeKubeProxyKubeConfig is a template for kube-proxy-kubeconfig secret.
var (
BootkubeKubeDNSService = template.Must(template.New("bootkube.sh").Parse(`
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: KubeDNS
spec:
selector:
k8s-app: kube-dns
clusterIP: {{.ClusterDNSIP}}
ports:
- name: dns
port: 53
protocol: UDP
targetPort: 53
- name: dns-tcp
port: 53
protocol: TCP
targetPort: 53
`))

BootkubeKubeProxyKubeConfig = template.Must(template.New("kube-proxy-kubeconfig").Parse(`
apiVersion: v1
kind: Secret
Expand Down Expand Up @@ -154,162 +123,5 @@ subjects:
- kind: ServiceAccount
name: kube-proxy
namespace: kube-system
`
bootkubeKubeDNSDeployment = `
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- name: kubedns
image: "gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.8"
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain=cluster.local.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.8"
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --log-facility=-
- --server=/cluster.local/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: "gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.8"
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
nodeSelector:
node-role.kubernetes.io/master: ""
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
`
)
12 changes: 0 additions & 12 deletions pkg/asset/installconfig/installconfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"net"
"os"

"github.com/apparentlymart/go-cidr/cidr"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -145,17 +144,6 @@ func (a *InstallConfig) Files() []*asset.File {
return []*asset.File{}
}

// ClusterDNSIP returns the string representation of the DNS server's IP
// address.
func ClusterDNSIP(installConfig *types.InstallConfig) (string, error) {
ip, err := cidr.Host(&installConfig.ServiceCIDR.IPNet, 10)
if err != nil {
return "", err
}

return ip.String(), nil
}

func parseCIDR(s string) net.IPNet {
_, cidr, _ := net.ParseCIDR(s)
return *cidr
Expand Down
28 changes: 0 additions & 28 deletions pkg/asset/installconfig/installconfig_test.go

This file was deleted.

16 changes: 0 additions & 16 deletions pkg/asset/manifests/content/bootkube/cvo-overrides.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,6 @@ spec:
namespace: openshift-cluster-network-operator
name: cluster-network-operator
unmanaged: true
- kind: ServiceAccount # missing run level 0 on the namespace and has 0000_08
namespace: openshift-cluster-dns-operator
name: cluster-dns-operator
unmanaged: true
- kind: Deployment # this conflicts with kube-core-operator
namespace: openshift-cluster-dns-operator
name: cluster-dns-operator
unmanaged: true
- kind: APIService # packages.apps.redhat.com fails to start properly
name: v1alpha1.packages.apps.redhat.com
unmanaged: true
Expand All @@ -56,14 +48,6 @@ overrides:
namespace: openshift-cluster-network-operator
name: cluster-network-operator
unmanaged: true
- kind: ServiceAccount # missing run level 0 on the namespace and has 0000_08
namespace: openshift-cluster-dns-operator
name: cluster-dns-operator
unmanaged: true
- kind: Deployment # this conflicts with kube-core-operator
namespace: openshift-cluster-dns-operator
name: cluster-dns-operator
unmanaged: true
- kind: APIService # packages.apps.redhat.com fails to start properly
name: v1alpha1.packages.apps.redhat.com
unmanaged: true
Expand Down

0 comments on commit 503d150

Please sign in to comment.