Skip to content
This repository has been archived by the owner on May 16, 2023. It is now read-only.

[Metricbeat] DNS lookup failure for node host #394

Closed
4nte opened this issue Dec 6, 2019 · 5 comments · Fixed by #585
Closed

[Metricbeat] DNS lookup failure for node host #394

4nte opened this issue Dec 6, 2019 · 5 comments · Fixed by #585
Labels
bug Something isn't working triage/stale

Comments

@4nte
Copy link

4nte commented Dec 6, 2019

Chart version: 7.4.1

Kubernetes version: 1.14.2

Kubernetes provider: Digital Ocean

Helm Version: 2.13

helm get release output

REVISION: 8
RELEASED: Fri Dec  6 12:57:01 2019
CHART: metricbeat-7.4.1
USER-SUPPLIED VALUES:
metricbeatConfig:
  kube-state-metrics-metricbeat.yml: |-
    metricbeat.modules:
    - module: kubernetes
      enabled: true
      metricsets:
        - state_node
        - state_deployment
        - state_replicaset
        - state_pod
        - state_container
      period: 10s
      hosts: ["${KUBE_STATE_METRICS_HOSTS}"]
    output.elasticsearch:
      hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'
  metricbeat.yml: |
    system:
      hostfs: /hostfs
    metricbeat.modules:
    - module: kubernetes
      metricsets:
        - container
        - node
        - pod
        - system
        - volume
      period: 10s
      host: "${NODE_NAME}"
      hosts: ["${NODE_NAME}:10255"]
      processors:
      - add_kubernetes_metadata:
          in_cluster: true
    - module: kubernetes
      enabled: true
      metricsets:
        - event
    - module: system
      period: 10s
      metricsets:
        - cpu
        - load
        - memory
        - network
        - process
        - process_summary
      processes: ['.*']
      process.include_top_n:
        by_cpu: 5
        by_memory: 5
    - module: system
      period: 1m
      metricsets:
        - filesystem
        - fsstat
      processors:
      - drop_event.when.regexp:
          system.filesystem.mount_point: '^/(sys|cgroup|proc|dev|etc|host|lib)($|/)'
    - module: golang
      metricsets:
        - expvar
        - heap
      period: 10s
      hosts: ["${NODE_NAME:6060}"]
      heap.path: "/debug/vars"
      expvar:
        namespace: "example"
        path: "/debug/vars"
    output.elasticsearch:
      hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'

COMPUTED VALUES:
affinity: {}
clusterRoleRules:
- apiGroups:
  - extensions
  - apps
  - ""
  resources:
  - namespaces
  - pods
  - events
  - deployments
  - nodes
  - replicasets
  verbs:
  - get
  - list
  - watch
extraEnvs: []
extraVolumeMounts: ""
extraVolumes: ""
fullnameOverride: ""
hostPathRoot: /var/lib
image: docker.elastic.co/beats/metricbeat
imagePullPolicy: IfNotPresent
imagePullSecrets: []
imageTag: 7.4.1
kube-state-metrics:
  affinity: {}
  collectors:
    certificatesigningrequests: true
    configmaps: true
    cronjobs: true
    daemonsets: true
    deployments: true
    endpoints: true
    horizontalpodautoscalers: true
    ingresses: true
    jobs: true
    limitranges: true
    namespaces: true
    nodes: true
    persistentvolumeclaims: true
    persistentvolumes: true
    poddisruptionbudgets: true
    pods: true
    replicasets: true
    replicationcontrollers: true
    resourcequotas: true
    secrets: true
    services: true
    statefulsets: true
  global: {}
  hostNetwork: false
  image:
    pullPolicy: IfNotPresent
    repository: quay.io/coreos/kube-state-metrics
    tag: v1.6.0
  nodeSelector: {}
  podAnnotations: {}
  podSecurityPolicy:
    annotations: {}
    enabled: false
  prometheus:
    monitor:
      additionalLabels: {}
      enabled: false
      namespace: ""
  prometheusScrape: true
  rbac:
    create: true
  replicas: 1
  securityContext:
    enabled: true
    fsGroup: 65534
    runAsUser: 65534
  service:
    loadBalancerIP: ""
    nodePort: 0
    port: 8080
    type: ClusterIP
  serviceAccount:
    create: true
    imagePullSecrets: []
    name: null
  tolerations: []
labels: {}
livenessProbe:
  failureThreshold: 3
  initialDelaySeconds: 10
  periodSeconds: 10
  timeoutSeconds: 5
managedServiceAccount: true
metricbeatConfig:
  kube-state-metrics-metricbeat.yml: |-
    metricbeat.modules:
    - module: kubernetes
      enabled: true
      metricsets:
        - state_node
        - state_deployment
        - state_replicaset
        - state_pod
        - state_container
      period: 10s
      hosts: ["${KUBE_STATE_METRICS_HOSTS}"]
    output.elasticsearch:
      hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'
  metricbeat.yml: |
    system:
      hostfs: /hostfs
    metricbeat.modules:
    - module: kubernetes
      metricsets:
        - container
        - node
        - pod
        - system
        - volume
      period: 10s
      host: "${NODE_NAME}"
      hosts: ["${NODE_NAME}:10255"]
      processors:
      - add_kubernetes_metadata:
          in_cluster: true
    - module: kubernetes
      enabled: true
      metricsets:
        - event
    - module: system
      period: 10s
      metricsets:
        - cpu
        - load
        - memory
        - network
        - process
        - process_summary
      processes: ['.*']
      process.include_top_n:
        by_cpu: 5
        by_memory: 5
    - module: system
      period: 1m
      metricsets:
        - filesystem
        - fsstat
      processors:
      - drop_event.when.regexp:
          system.filesystem.mount_point: '^/(sys|cgroup|proc|dev|etc|host|lib)($|/)'
    - module: golang
      metricsets:
        - expvar
        - heap
      period: 10s
      hosts: ["${NODE_NAME:6060}"]
      heap.path: "/debug/vars"
      expvar:
        namespace: "example"
        path: "/debug/vars"
    output.elasticsearch:
      hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'
nameOverride: ""
nodeSelector: {}
podAnnotations: {}
podSecurityContext:
  privileged: false
  runAsUser: 0
readinessProbe:
  failureThreshold: 3
  initialDelaySeconds: 10
  periodSeconds: 10
  timeoutSeconds: 5
replicas: 1
resources:
  limits:
    cpu: 1000m
    memory: 200Mi
  requests:
    cpu: 100m
    memory: 100Mi
secretMounts: []
serviceAccount: ""
terminationGracePeriod: 30
tolerations: []
updateStrategy: RollingUpdate

HOOKS:
MANIFEST:

---
# Source: metricbeat/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: metricbeat-shared-metricbeat-config
  labels:
    app: "metricbeat-shared-metricbeat"
    chart: "metricbeat-7.4.1"
    heritage: "Tiller"
    release: "metricbeat-shared"
data:
  kube-state-metrics-metricbeat.yml: |
    metricbeat.modules:
    - module: kubernetes
      enabled: true
      metricsets:
        - state_node
        - state_deployment
        - state_replicaset
        - state_pod
        - state_container
      period: 10s
      hosts: ["${KUBE_STATE_METRICS_HOSTS}"]
    output.elasticsearch:
      hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'
  metricbeat.yml: |
    system:
      hostfs: /hostfs
    metricbeat.modules:
    - module: kubernetes
      metricsets:
        - container
        - node
        - pod
        - system
        - volume
      period: 10s
      host: "${NODE_NAME}"
      hosts: ["${NODE_NAME}:10255"]
      processors:
      - add_kubernetes_metadata:
          in_cluster: true
    - module: kubernetes
      enabled: true
      metricsets:
        - event
    - module: system
      period: 10s
      metricsets:
        - cpu
        - load
        - memory
        - network
        - process
        - process_summary
      processes: ['.*']
      process.include_top_n:
        by_cpu: 5
        by_memory: 5
    - module: system
      period: 1m
      metricsets:
        - filesystem
        - fsstat
      processors:
      - drop_event.when.regexp:
          system.filesystem.mount_point: '^/(sys|cgroup|proc|dev|etc|host|lib)($|/)'
    - module: golang
      metricsets:
        - expvar
        - heap
      period: 10s
      hosts: ["${NODE_NAME:6060}"]
      heap.path: "/debug/vars"
      expvar:
        namespace: "example"
        path: "/debug/vars"
    output.elasticsearch:
      hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'
---
# Source: metricbeat/charts/kube-state-metrics/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    app: kube-state-metrics
    chart: kube-state-metrics-1.6.0
    heritage: Tiller
    release: metricbeat-shared
  name: metricbeat-shared-kube-state-metrics
imagePullSecrets:
  []
---
# Source: metricbeat/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: metricbeat-shared-metricbeat
  labels:
    app: "metricbeat-shared-metricbeat"
    chart: "metricbeat-7.4.1"
    heritage: "Tiller"
    release: "metricbeat-shared"
---
# Source: metricbeat/charts/kube-state-metrics/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  labels:
    app: kube-state-metrics
    chart: kube-state-metrics-1.6.0
    heritage: Tiller
    release: metricbeat-shared
  name: metricbeat-shared-kube-state-metrics
rules:

- apiGroups: ["certificates.k8s.io"]
  resources:
  - certificatesigningrequests
  verbs: ["list", "watch"]

- apiGroups: [""]
  resources:
  - configmaps
  verbs: ["list", "watch"]

- apiGroups: ["batch"]
  resources:
  - cronjobs
  verbs: ["list", "watch"]

- apiGroups: ["extensions", "apps"]
  resources:
  - daemonsets
  verbs: ["list", "watch"]

- apiGroups: ["extensions", "apps"]
  resources:
  - deployments
  verbs: ["list", "watch"]

- apiGroups: [""]
  resources:
  - endpoints
  verbs: ["list", "watch"]

- apiGroups: ["autoscaling"]
  resources:
  - horizontalpodautoscalers
  verbs: ["list", "watch"]

- apiGroups: ["extensions"]
  resources:
  - ingresses
  verbs: ["list", "watch"]

- apiGroups: ["batch"]
  resources:
  - jobs
  verbs: ["list", "watch"]

- apiGroups: [""]
  resources:
  - limitranges
  verbs: ["list", "watch"]

- apiGroups: [""]
  resources:
  - namespaces
  verbs: ["list", "watch"]

- apiGroups: [""]
  resources:
  - nodes
  verbs: ["list", "watch"]

- apiGroups: [""]
  resources:
  - persistentvolumeclaims
  verbs: ["list", "watch"]

- apiGroups: [""]
  resources:
  - persistentvolumes
  verbs: ["list", "watch"]

- apiGroups: ["policy"]
  resources:
    - poddisruptionbudgets
  verbs: ["list", "watch"]

- apiGroups: [""]
  resources:
  - pods
  verbs: ["list", "watch"]

- apiGroups: ["extensions", "apps"]
  resources:
  - replicasets
  verbs: ["list", "watch"]

- apiGroups: [""]
  resources:
  - replicationcontrollers
  verbs: ["list", "watch"]

- apiGroups: [""]
  resources:
  - resourcequotas
  verbs: ["list", "watch"]

- apiGroups: [""]
  resources:
  - secrets
  verbs: ["list", "watch"]

- apiGroups: [""]
  resources:
  - services
  verbs: ["list", "watch"]

- apiGroups: ["apps"]
  resources:
  - statefulsets
  verbs: ["list", "watch"]
---
# Source: metricbeat/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: metricbeat-shared-metricbeat-cluster-role
  labels:
    app: "metricbeat-shared-metricbeat"
    chart: "metricbeat-7.4.1"
    heritage: "Tiller"
    release: "metricbeat-shared"
rules:
  - apiGroups:
    - extensions
    - apps
    - ""
    resources:
    - namespaces
    - pods
    - events
    - deployments
    - nodes
    - replicasets
    verbs:
    - get
    - list
    - watch
---
# Source: metricbeat/charts/kube-state-metrics/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  labels:
    app: kube-state-metrics
    chart: kube-state-metrics-1.6.0
    heritage: Tiller
    release: metricbeat-shared
  name: metricbeat-shared-kube-state-metrics
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: metricbeat-shared-kube-state-metrics
subjects:
- kind: ServiceAccount
  name: metricbeat-shared-kube-state-metrics
  namespace: shared
---
# Source: metricbeat/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: metricbeat-shared-metricbeat-cluster-role-binding
  labels:
    app: "metricbeat-shared-metricbeat"
    chart: "metricbeat-7.4.1"
    heritage: "Tiller"
    release: "metricbeat-shared"
roleRef:
  kind: ClusterRole
  name: metricbeat-shared-metricbeat-cluster-role
  apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
  name: metricbeat-shared-metricbeat
  namespace: shared
---
# Source: metricbeat/charts/kube-state-metrics/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
  name: metricbeat-shared-kube-state-metrics
  labels:
    app: kube-state-metrics
    chart: "kube-state-metrics-1.6.0"
    release: "metricbeat-shared"
    heritage: "Tiller"
  annotations:
    prometheus.io/scrape: 'true'
spec:
  type: "ClusterIP"
  ports:
  - name: "http"
    protocol: TCP
    port: 8080
    targetPort: 8080
  selector:
    app: kube-state-metrics
    release: metricbeat-shared
---
# Source: metricbeat/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: metricbeat-shared-metricbeat
  labels:
    app: "metricbeat-shared-metricbeat"
    chart: "metricbeat-7.4.1"
    heritage: "Tiller"
    release: "metricbeat-shared"
spec:
  selector:
    matchLabels:
      app: "metricbeat-shared-metricbeat"
      release: "metricbeat-shared"
  updateStrategy:
    type: RollingUpdate
  template:
    metadata:
      annotations:

        configChecksum: ada1dc59510ecb8b299d7c360de573f03bbd728ab5dc8585d8462b9076d2cbb
      name: "metricbeat-shared-metricbeat"
      labels:
        app: "metricbeat-shared-metricbeat"
        chart: "metricbeat-7.4.1"
        heritage: "Tiller"
        release: "metricbeat-shared"
    spec:
      serviceAccountName: metricbeat-shared-metricbeat
      terminationGracePeriodSeconds: 30
      volumes:
      - name: metricbeat-config
        configMap:
          defaultMode: 0600
          name: metricbeat-shared-metricbeat-config
      - name: data
        hostPath:
          path: /var/lib/metricbeat-shared-metricbeat-shared-data
          type: DirectoryOrCreate
      - name: varlibdockercontainers
        hostPath:
          path: /var/lib/docker/containers
      - name: varrundockersock
        hostPath:
          path: /var/run/docker.sock
      containers:
      - name: "metricbeat"
        image: "docker.elastic.co/beats/metricbeat:7.4.1"
        imagePullPolicy: "IfNotPresent"
        args:
        - "-e"
        - "-E"
        - "http.enabled=true"
        livenessProbe:
          exec:
            command:
            - sh
            - -c
            - |
              #!/usr/bin/env bash -e
              curl --fail 127.0.0.1:5066
          failureThreshold: 3
          initialDelaySeconds: 10
          periodSeconds: 10
          timeoutSeconds: 5

        readinessProbe:
          exec:
            command:
            - sh
            - -c
            - |
              #!/usr/bin/env bash -e
              metricbeat test output
          failureThreshold: 3
          initialDelaySeconds: 10
          periodSeconds: 10
          timeoutSeconds: 5

        resources:
          limits:
            cpu: 1000m
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 100Mi

        env:
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: NODE_NAME
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        securityContext:
          privileged: false
          runAsUser: 0

        volumeMounts:
        - name: metricbeat-config
          mountPath: /usr/share/metricbeat/kube-state-metrics-metricbeat.yml
          readOnly: true
          subPath: kube-state-metrics-metricbeat.yml
        - name: metricbeat-config
          mountPath: /usr/share/metricbeat/metricbeat.yml
          readOnly: true
          subPath: metricbeat.yml
        - name: data
          mountPath: /usr/share/metricbeat/data
        - name: varlibdockercontainers
          mountPath: /var/lib/docker/containers
          readOnly: true
        # Necessary when using autodiscovery; avoid mounting it otherwise
        # See: https://www.elastic.co/guide/en/beats/metricbeat/master/configuration-autodiscover.html
        - name: varrundockersock
          mountPath: /var/run/docker.sock
          readOnly: true
---
# Source: metricbeat/charts/kube-state-metrics/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: metricbeat-shared-kube-state-metrics
  labels:
    app: kube-state-metrics
    chart: "kube-state-metrics-1.6.0"
    release: "metricbeat-shared"
    heritage: "Tiller"
spec:
  selector:
    matchLabels:
      app: kube-state-metrics
  replicas: 1
  template:
    metadata:
      labels:
        app: kube-state-metrics
        release: "metricbeat-shared"
    spec:
      hostNetwork: false
      serviceAccountName: metricbeat-shared-kube-state-metrics
      securityContext:
        fsGroup: 65534
        runAsUser: 65534
      containers:
      - name: kube-state-metrics
        args:

        - --collectors=certificatesigningrequests


        - --collectors=configmaps


        - --collectors=cronjobs


        - --collectors=daemonsets


        - --collectors=deployments


        - --collectors=endpoints


        - --collectors=horizontalpodautoscalers


        - --collectors=ingresses


        - --collectors=jobs


        - --collectors=limitranges


        - --collectors=namespaces


        - --collectors=nodes


        - --collectors=persistentvolumeclaims


        - --collectors=persistentvolumes


        - --collectors=poddisruptionbudgets


        - --collectors=pods


        - --collectors=replicasets


        - --collectors=replicationcontrollers


        - --collectors=resourcequotas


        - --collectors=secrets


        - --collectors=services


        - --collectors=statefulsets


        imagePullPolicy: IfNotPresent
        image: "quay.io/coreos/kube-state-metrics:v1.6.0"
        ports:
        - containerPort: 8080
        readinessProbe:
          httpGet:
            path: /healthz
            port: 8080
          initialDelaySeconds: 5
          timeoutSeconds: 5
        resources:
            null
---
# Source: metricbeat/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: 'metricbeat-shared-metricbeat-metrics'
  labels:
    app: 'metricbeat-shared-metricbeat-metrics'
    chart: 'metricbeat-7.4.1'
    heritage: 'Tiller'
    release: 'metricbeat-shared'
spec:
  replicas: 1
  selector:
    matchLabels:
      app: 'metricbeat-shared-metricbeat-metrics'
      chart: 'metricbeat-7.4.1'
      heritage: 'Tiller'
      release: 'metricbeat-shared'
  template:
    metadata:
      annotations:

        configChecksum: ada1dc59510ecb8b299d7c360de573f03bbd728ab5dc8585d8462b9076d2cbb
      labels:
        app: 'metricbeat-shared-metricbeat-metrics'
        chart: 'metricbeat-7.4.1'
        heritage: 'Tiller'
        release: 'metricbeat-shared'
    spec:
      serviceAccountName: metricbeat-shared-metricbeat
      terminationGracePeriodSeconds: 30
      volumes:
      - name: metricbeat-config
        configMap:
          defaultMode: 0600
          name: metricbeat-shared-metricbeat-config
      containers:
      - name: "metricbeat"
        image: "docker.elastic.co/beats/metricbeat:7.4.1"
        imagePullPolicy: "IfNotPresent"
        args:
          - "-c"
          - "/usr/share/metricbeat/kube-state-metrics-metricbeat.yml"
          - "-e"
          - "-E"
          - "http.enabled=true"
        livenessProbe:
          exec:
            command:
            - sh
            - -c
            - |
              #!/usr/bin/env bash -e
              curl --fail 127.0.0.1:5066
        readinessProbe:
          exec:
            command:
            - sh
            - -c
            - |
              #!/usr/bin/env bash -e
              metricbeat test output
          failureThreshold: 3
          initialDelaySeconds: 10
          periodSeconds: 10
          timeoutSeconds: 5

        resources:
          limits:
            cpu: 1000m
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 100Mi

        env:
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: KUBE_STATE_METRICS_HOSTS
          value: "$(METRICBEAT_SHARED_KUBE_STATE_METRICS_SERVICE_HOST):$(METRICBEAT_SHARED_KUBE_STATE_METRICS_SERVICE_PORT_HTTP)"
        securityContext:
          privileged: false
          runAsUser: 0

        volumeMounts:
        - name: metricbeat-config
          mountPath: /usr/share/metricbeat/kube-state-metrics-metricbeat.yml
          readOnly: true
          subPath: kube-state-metrics-metricbeat.yml
        - name: metricbeat-config
          mountPath: /usr/share/metricbeat/metricbeat.yml
          readOnly: true
          subPath: metricbeat.yml

Describe the bug:
Metricbeat with default chart values fails to resolve node hosts.

WARN	transport/tcp.go:53	DNS lookup failure "dev-pool-yq2t": lookup dev-pool-yq2t on 10.245.0.10:53: no such host

Steps to reproduce:

  1. Install metricbeat helm chart with default configuration
  2. Open infrastructure dashboard in kibana & see that pods are listed but metrics are zero values

Expected behavior:
I expected to see actual metrics of the listed pods.

metricbeat logs:

2019-12-06T12:19:36.967Z	INFO	[monitoring]	log/log.go:145	Non-zero metrics in the last 30s	{"monitoring": {"metrics": {"beat":{"cpu":{"system":{"ticks":7020,"time":{"ms":124}},"total":{"ticks":11310,"time":{"ms":195},"value":11310},"user":{"ticks":4290,"time":{"ms":71}}},"handles":{"limit":{"hard":1048576,"soft":1048576},"open":10},"info":{"ephemeral_id":"d1d7d3cb-4b04-4f85-8e33-1aba86438261","uptime":{"ms":1320714}},"memstats":{"gc_next":16958624,"memory_alloc":15336920,"memory_total":295361056,"rss":-4476928},"runtime":{"goroutines":111}},"libbeat":{"config":{"module":{"running":0}},"output":{"events":{"acked":42,"batches":15,"total":42},"read":{"bytes":5392},"write":{"bytes":39474}},"pipeline":{"clients":5,"events":{"active":0,"published":42,"total":42},"queue":{"acked":42}}},"metricbeat":{"golang":{"expvar":{"events":3,"failures":3},"heap":{"events":3,"failures":3}},"kubernetes":{"container":{"events":3,"failures":3},"node":{"events":3,"failures":3},"pod":{"events":3,"failures":3},"system":{"events":3,"failures":3},"volume":{"events":3,"failures":3}},"system":{"cpu":{"events":3,"success":3},"load":{"events":3,"success":3},"memory":{"events":3,"success":3},"network":{"events":6,"success":6},"process":{"events":3,"success":3},"process_summary":{"events":3,"success":3}}},"system":{"load":{"1":7.55,"15":8.49,"5":8.87,"norm":{"1":3.775,"15":4.245,"5":4.435}}}}}}
2019-12-06T12:19:37.029Z	WARN	transport/tcp.go:53	DNS lookup failure "dev-pool-yq2t": lookup dev-pool-yq2t on 10.245.0.10:53: no such host
2019-12-06T12:19:37.031Z	INFO	module/wrapper.go:252	Error fetching data for metricset golang.heap: error in http fetch: error making http request: Get http://dev-pool-yq2t/debug/vars: lookup dev-pool-yq2t on 10.245.0.10:53: no such host
2019-12-06T12:19:37.089Z	WARN	transport/tcp.go:53	DNS lookup failure "dev-pool-yq2t": lookup dev-pool-yq2t on 10.245.0.10:53: no such host
2019-12-06T12:19:37.606Z	WARN	transport/tcp.go:53	DNS lookup failure "dev-pool-yq2t": lookup dev-pool-yq2t on 10.245.0.10:53: no such host
2019-12-06T12:19:38.244Z	WARN	transport/tcp.go:53	DNS lookup failure "dev-pool-yq2t": lookup dev-pool-yq2t on 10.245.0.10:53: no such host
2019-12-06T12:19:39.114Z	WARN	transport/tcp.go:53	DNS lookup failure "dev-pool-yq2t": lookup dev-pool-yq2t on 10.245.0.10:53: no such host

Any additional context:

@jmlrt jmlrt added the bug Something isn't working label Dec 18, 2019
@eddiewang
Copy link

Hi there, I was wondering if anyone knew of a manual fix for this (while the bug is being tackled).

@FedeBev
Copy link

FedeBev commented Jan 14, 2020

Hi there, any news here?
Thanks

@FedeBev
Copy link

FedeBev commented Jan 15, 2020

@eddiewang

For me the workaround/fix was to add the following options to the daemonset template:

spec:
  template:
    spec:
      hostNetwork: true  
      dnsPolicy: ClusterFirstWithHostNet 
      containers:

@botelastic
Copy link

botelastic bot commented Apr 14, 2020

This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.

@cesarqdt
Copy link

@FedeBev That didn't help for me.

My spec for the agents is this one:

apiVersion: agent.k8s.elastic.co/v1alpha1
kind: Agent
metadata:
  name: elastic-agent
spec:
  version: 8.4.2
  kibanaRef:
    name: kibana
  fleetServerRef:
    name: fleet-server
  policyID: eck-agent
  mode: fleet
  daemonSet:
    podTemplate:
      spec:
        serviceAccountName: elastic-agent
        hostNetwork: true
        dnsPolicy: ClusterFirstWithHostNet
        automountServiceAccountToken: true
        securityContext:
          runAsUser: 0
        containers:
        - name: agent
          volumeMounts:
          - mountPath: /var/lib/docker/containers
            name: varlibdockercontainers
          - mountPath: /var/log/containers
            name: varlogcontainers
          - mountPath: /var/log/pods
            name: varlogpods
        volumes:
        - name: varlibdockercontainers
          hostPath:
            path: /var/lib/docker/containers
        - name: varlogcontainers
          hostPath:
            path: /var/log/containers
        - name: varlogpods
          hostPath:
            path: /var/log/pods

kube-state-metrics was installed helm install kube-state-metrics prometheus-community/kube-state-metrics --version 4.20.2 -n kube-system

Sign up for free to subscribe to this conversation on GitHub. Already have an account? Sign in.
Labels
bug Something isn't working triage/stale
Projects
None yet
Development

Successfully merging a pull request may close this issue.

5 participants