forked from vmware-archive/eventrouter
-
Notifications
You must be signed in to change notification settings - Fork 0
/
eventrouter.go
162 lines (139 loc) · 4.69 KB
/
eventrouter.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"github.com/golang/glog"
"github.com/heptiolabs/eventrouter/sinks"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/api/core/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
)
var (
kubernetesWarningEventCounterVec = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "heptio_eventrouter_warnings_total",
Help: "Total number of warning events in the kubernetes cluster",
}, []string{
"involved_object_kind",
"involved_object_name",
"involved_object_namespace",
"reason",
"source",
})
kubernetesNormalEventCounterVec = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "heptio_eventrouter_normal_total",
Help: "Total number of normal events in the kubernetes cluster",
}, []string{
"involved_object_kind",
"involved_object_name",
"involved_object_namespace",
"reason",
"source",
})
)
func init() {
prometheus.MustRegister(kubernetesWarningEventCounterVec)
prometheus.MustRegister(kubernetesNormalEventCounterVec)
}
// EventRouter is responsible for maintaining a stream of kubernetes
// system Events and pushing them to another channel for storage
type EventRouter struct {
// kubeclient is the main kubernetes interface
kubeClient kubernetes.Interface
// store of events populated by the shared informer
eLister corelisters.EventLister
// returns true if the event store has been synced
eListerSynched cache.InformerSynced
// event sink
// TODO: Determine if we want to support multiple sinks.
eSink sinks.EventSinkInterface
}
// NewEventRouter will create a new event router using the input params
func NewEventRouter(kubeClient kubernetes.Interface, eventsInformer coreinformers.EventInformer) *EventRouter {
er := &EventRouter{
kubeClient: kubeClient,
eSink: sinks.ManufactureSink(),
}
eventsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: er.addEvent,
UpdateFunc: er.updateEvent,
DeleteFunc: er.deleteEvent,
})
er.eLister = eventsInformer.Lister()
er.eListerSynched = eventsInformer.Informer().HasSynced
return er
}
// Run starts the EventRouter/Controller.
func (er *EventRouter) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer glog.Infof("Shutting down EventRouter")
glog.Infof("Starting EventRouter")
// here is where we kick the caches into gear
if !cache.WaitForCacheSync(stopCh, er.eListerSynched) {
utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
return
}
<-stopCh
}
// addEvent is called when an event is created, or during the initial list
func (er *EventRouter) addEvent(obj interface{}) {
e := obj.(*v1.Event)
prometheusEvent(e)
er.eSink.UpdateEvents(e, nil)
}
// updateEvent is called any time there is an update to an existing event
func (er *EventRouter) updateEvent(objOld interface{}, objNew interface{}) {
eOld := objOld.(*v1.Event)
eNew := objNew.(*v1.Event)
prometheusEvent(eNew)
er.eSink.UpdateEvents(eNew, eOld)
}
// prometheusEvent is called when an event is added or updated
func prometheusEvent(event *v1.Event) {
var counter prometheus.Counter
var err error
if event.Type == "Normal" {
counter, err = kubernetesNormalEventCounterVec.GetMetricWithLabelValues(
event.InvolvedObject.Kind,
event.InvolvedObject.Name,
event.InvolvedObject.Namespace,
event.Reason,
event.Source.Host,
)
} else if event.Type == "Warning" {
counter, err = kubernetesWarningEventCounterVec.GetMetricWithLabelValues(
event.InvolvedObject.Kind,
event.InvolvedObject.Name,
event.InvolvedObject.Namespace,
event.Reason,
event.Source.Host,
)
}
if err != nil {
// Not sure this is the right place to log this error?
glog.Warning(err)
} else {
counter.Add(1)
}
}
// deleteEvent should only occur when the system garbage collects events via TTL expiration
func (er *EventRouter) deleteEvent(obj interface{}) {
e := obj.(*v1.Event)
// NOTE: This should *only* happen on TTL expiration there
// is no reason to push this to a sink
glog.V(5).Infof("Event Deleted from the system:\n%v", e)
}