Skip to content

Commit

Permalink
Zap logger
Browse files Browse the repository at this point in the history
  • Loading branch information
nithu0115 committed Feb 3, 2020
1 parent 66a5955 commit fad5934
Show file tree
Hide file tree
Showing 24 changed files with 505 additions and 218 deletions.
14 changes: 13 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,19 @@ Default: Unset

Valid Values: `stdout` or a file path

Specifies where to write the logging output. Either to stdout or to override the default file.
Specifies where to write the logging output of L-IPamD. Either to stdout or to override the default file.

---

`AWS_VPC_K8S_PLUGIN_LOGL_FILE`

Type: String

Default: Unset

Valid Values: `stdout` or a file path

Specifies where to write the logging output for CNI plugin. Either to stdout or to override the default file.

---

Expand Down
5 changes: 1 addition & 4 deletions cmd/aws-k8s-agent/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@ package main
import (
"os"

log "github.com/cihub/seelog"

"github.com/aws/amazon-vpc-cni-k8s/pkg/eniconfig"
"github.com/aws/amazon-vpc-cni-k8s/pkg/ipamd"
"github.com/aws/amazon-vpc-cni-k8s/pkg/k8sapi"
Expand All @@ -30,15 +28,14 @@ const (

var (
version string
log = logger.InitZapLogger("L-IPamD")
)

func main() {
os.Exit(_main())
}

func _main() int {
defer log.Flush()
logger.SetupLogger(logger.GetLogFileLocation(defaultLogFilePath))

log.Infof("Starting L-IPAMD %s ...", version)

Expand Down
23 changes: 10 additions & 13 deletions cmd/cni-metrics-helper/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,16 @@ import (
"strings"
"time"

"github.com/golang/glog"
"github.com/aws/amazon-vpc-cni-k8s/pkg/utils/logger"
"github.com/spf13/pflag"

"github.com/aws/amazon-vpc-cni-k8s/cmd/cni-metrics-helper/metrics"
"github.com/aws/amazon-vpc-cni-k8s/pkg/k8sapi"
"github.com/aws/amazon-vpc-cni-k8s/pkg/publisher"
)

var log = logger.InitZapLogger("cni-metrics-helper")

type options struct {
kubeconfig string
pullInterval int
Expand All @@ -40,11 +42,8 @@ type options struct {
func main() {
options := &options{}
flags := pflag.NewFlagSet("", pflag.ExitOnError)
// Add glog flags
flags.AddGoFlagSet(flag.CommandLine)
_ = flags.Lookup("logtostderr").Value.Set("true")
flags.Lookup("logtostderr").DefValue = "true"
flags.Lookup("logtostderr").NoOptDefVal = "true"

flags.BoolVar(&options.submitCW, "cloudwatch", true, "a bool")

flags.Usage = func() {
Expand All @@ -54,12 +53,12 @@ func main() {

err := flags.Parse(os.Args)
if err != nil {
glog.Fatalf("Error on parsing parameters: %s", err)
log.Fatalf("Error on parsing parameters: %s", err)
}

err = flag.CommandLine.Parse([]string{})
if err != nil {
glog.Fatalf("Error on parsing commandline: %s", err)
log.Fatalf("Error on parsing commandline: %s", err)
}

if options.help {
Expand All @@ -77,12 +76,11 @@ func main() {
}
}

glog.Infof("Starting CNIMetricsHelper. Sending metrics to CloudWatch: %v", options.submitCW)
log.Infof("Starting CNIMetricsHelper. Sending metrics to CloudWatch: %v", options.submitCW)

kubeClient, err := k8sapi.CreateKubeClient()
if err != nil {
glog.Errorf("Failed to create client: %v", err)
os.Exit(1)
log.Fatalf("Failed to create client: %v", err)
}

discoverController := k8sapi.NewController(kubeClient)
Expand All @@ -96,8 +94,7 @@ func main() {

cw, err = publisher.New(ctx)
if err != nil {
glog.Errorf("Failed to create publisher: %v", err)
os.Exit(1)
log.Fatalf("Failed to create publisher: %v", err)
}
go cw.Start()
defer cw.Stop()
Expand All @@ -109,7 +106,7 @@ func main() {
// metric loop
var pullInterval = 30 // seconds
for range time.Tick(time.Duration(pullInterval) * time.Second) {
glog.Info("Collecting metrics ...")
log.Info("Collecting metrics ...")
metrics.Handler(cniMetric)
}
}
64 changes: 34 additions & 30 deletions cmd/cni-metrics-helper/metrics/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,20 @@ import (
"fmt"

"github.com/aws/aws-sdk-go/aws"
"go.uber.org/zap"

"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/golang/glog"

dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
clientset "k8s.io/client-go/kubernetes"

"github.com/aws/amazon-vpc-cni-k8s/pkg/publisher"
"github.com/aws/amazon-vpc-cni-k8s/pkg/utils/logger"
)

var log = logger.GetZapLogger()

type metricMatcher func(metric *dto.Metric) bool
type actionFuncType func(aggregatedValue *float64, sampleValue float64)

Expand Down Expand Up @@ -98,26 +102,26 @@ func getMetricsFromPod(client clientset.Interface, podName string, namespace str

func processGauge(metric *dto.Metric, act *metricsAction) {
if act.logToFile {
glog.Infof("Label: %v, Value: %v ", metric.GetLabel(), metric.GetGauge().GetValue())
log.Infof("Label: %v, Value: %v ", metric.GetLabel(), metric.GetGauge().GetValue())
} else {
glog.V(10).Info("processing GAUGE: ", metric.GetGauge().GetValue())
log.Infof("processing: ", zap.Float64("GAUGE", metric.GetGauge().GetValue()))
}
act.actionFunc(&act.data.curSingleDataPoint, metric.GetGauge().GetValue())
}

func processCounter(metric *dto.Metric, act *metricsAction) {
if act.logToFile {
glog.Infof("Label: %v, Value: %v ", metric.GetLabel(), metric.GetCounter().GetValue())
log.Infof("Label: %v, Value: %v ", metric.GetLabel(), metric.GetCounter().GetValue())
} else {
glog.V(10).Info("processing COUNTER: ", metric.GetCounter().GetValue())
log.Infof("processing : ", zap.Float64("COUNTER", metric.GetCounter().GetValue()))
}
act.actionFunc(&act.data.curSingleDataPoint, metric.GetCounter().GetValue())
}

func processPercentile(metric *dto.Metric, act *metricsAction) {
var p99 float64

glog.V(10).Info("processing PERCENTILE: ", p99)
log.Infof("processing: ", zap.Float64("PERCENTILE", p99))
summary := metric.GetSummary()
quantiles := summary.GetQuantile()

Expand All @@ -130,27 +134,27 @@ func processPercentile(metric *dto.Metric, act *metricsAction) {
}

func processHistogram(metric *dto.Metric, act *metricsAction) {
glog.V(5).Info("processing HISTOGRAM:", metric.GetLabel())
glog.V(5).Info(metric.GetHistogram())
log.Infof("processing:", zap.Any("HISTOGRAM", metric.GetLabel()))
log.Infof("processing:", zap.Any("GETHISTOGRAM", metric.GetHistogram()))
histogram := metric.GetHistogram()

for _, bucket := range histogram.GetBucket() {
glog.V(10).Info("processing bucket:", bucket)
log.Infof("processing:", zap.Any("bucket", bucket))
existingBucket := false
for _, bucketInAct := range act.bucket.curBucket {
if bucket.GetUpperBound() == *bucketInAct.UpperBound {
// found the matching bucket
glog.V(10).Infof("Found the matching bucket with UpperBound: %f", *bucketInAct.UpperBound)
log.Infof("Found the matching bucket with UpperBound: %f", *bucketInAct.UpperBound)
act.actionFunc(bucketInAct.CumulativeCount, float64(bucket.GetCumulativeCount()))
glog.V(10).Infof("Found: *bucketInAct.CumulativeCount:%f, bucket.GetCumulativeCount():%f",
log.Infof("Found: *bucketInAct.CumulativeCount:%f, bucket.GetCumulativeCount():%f",
*bucketInAct.CumulativeCount, float64(bucket.GetCumulativeCount()))
existingBucket = true
break
}
}

if !existingBucket {
glog.V(10).Infof("Create a new bucket with upperBound:%f", bucket.GetUpperBound())
log.Infof("Create a new bucket with upperBound:%f", bucket.GetUpperBound())
upperBound := new(float64)
*upperBound = float64(bucket.GetUpperBound())
cumulativeCount := new(float64)
Expand Down Expand Up @@ -186,7 +190,7 @@ func postProcessingCounter(convert metricsConvert) bool {
}

if resetDetected || (noPreviousDataPoint && !noCurrentDataPoint) {
glog.Infof("Reset detected resetDetected: %v, noPreviousDataPoint: %v, noCurrentDataPoint: %v",
log.Infof("Reset detected resetDetected: %v, noPreviousDataPoint: %v, noCurrentDataPoint: %v",
resetDetected, noPreviousDataPoint, noCurrentDataPoint)
}
return resetDetected || (noPreviousDataPoint && !noCurrentDataPoint)
Expand All @@ -199,28 +203,28 @@ func postProcessingHistogram(convert metricsConvert) bool {
for _, action := range convert.actions {
numOfBuckets := len(action.bucket.curBucket)
if numOfBuckets == 0 {
glog.Info("Post Histogram Processing: no bucket found")
log.Info("Post Histogram Processing: no bucket found")
continue
}
for i := 1; i < numOfBuckets; i++ {
glog.V(10).Infof("Found numOfBuckets-i:=%d, *action.bucket.curBucket[numOfBuckets-i].CumulativeCount=%f",
log.Infof("Found numOfBuckets-i:=%d, *action.bucket.curBucket[numOfBuckets-i].CumulativeCount=%f",
numOfBuckets-i, *action.bucket.curBucket[numOfBuckets-i].CumulativeCount)

// Delta against the previous bucket value
// e.g. diff between bucket LE250000 and previous bucket LE125000
*action.bucket.curBucket[numOfBuckets-i].CumulativeCount -= *action.bucket.curBucket[numOfBuckets-i-1].CumulativeCount
glog.V(10).Infof("Found numOfBuckets-i:=%d, *action.bucket.curBucket[numOfBuckets-i].CumulativeCount=%f, *action.bucket.curBucket[numOfBuckets-i-1].CumulativeCount=%f",
log.Infof("Found numOfBuckets-i:=%d, *action.bucket.curBucket[numOfBuckets-i].CumulativeCount=%f, *action.bucket.curBucket[numOfBuckets-i-1].CumulativeCount=%f",
numOfBuckets-i, *action.bucket.curBucket[numOfBuckets-i].CumulativeCount, *action.bucket.curBucket[numOfBuckets-i-1].CumulativeCount)

// Delta against the previous value
if action.bucket.lastBucket != nil {
glog.V(10).Infof("Found *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount=%f",
log.Infof("Found *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount=%f",
*action.bucket.lastBucket[numOfBuckets-i].CumulativeCount)
currentTotal := *action.bucket.curBucket[numOfBuckets-i].CumulativeCount
// Only do delta if there is no restart for metric target
if *action.bucket.curBucket[numOfBuckets-i].CumulativeCount >= *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount {
*action.bucket.curBucket[numOfBuckets-i].CumulativeCount -= *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount
glog.V(10).Infof("Found *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount=%f, *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount=%f",
log.Infof("Found *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount=%f, *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount=%f",
*action.bucket.curBucket[numOfBuckets-i].CumulativeCount, *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount)
} else {
resetDetected = true
Expand Down Expand Up @@ -250,7 +254,7 @@ func postProcessingHistogram(convert metricsConvert) bool {
}

func processMetric(family *dto.MetricFamily, convert metricsConvert) (bool, error) {
glog.Info("Processing metric: ", family.GetName())
log.Infof("Processing: ", zap.String("metric", family.GetName()))
resetDetected := false

mType := family.GetType()
Expand Down Expand Up @@ -301,7 +305,7 @@ func produceHistogram(act metricsAction, cw publisher.Publisher) {

prevUpperBound = *bucket.UpperBound
if *bucket.CumulativeCount != 0 {
glog.Infof("Produce HISTOGRAM metrics: %s, max:%f, min:%f, count: %f, sum: %f",
log.Infof("Produce HISTOGRAM metrics: %s, max:%f, min:%f, count: %f, sum: %f",
act.cwMetricName, mid, mid, *bucket.CumulativeCount, mid*float64(*bucket.CumulativeCount))
dataPoint := &cloudwatch.MetricDatum{
MetricName: aws.String(act.cwMetricName),
Expand Down Expand Up @@ -337,7 +341,7 @@ func produceCloudWatchMetrics(t metricsTarget, families map[string]*dto.MetricFa
for _, action := range convertMetrics.actions {
switch mType {
case dto.MetricType_COUNTER:
glog.Infof("Produce COUNTER metrics: %s, value: %f", action.cwMetricName, action.data.curSingleDataPoint)
log.Infof("Produce COUNTER metrics: %s, value: %f", action.cwMetricName, action.data.curSingleDataPoint)
if t.submitCloudWatch() {
dataPoint := &cloudwatch.MetricDatum{
MetricName: aws.String(action.cwMetricName),
Expand All @@ -347,7 +351,7 @@ func produceCloudWatchMetrics(t metricsTarget, families map[string]*dto.MetricFa
cw.Publish(dataPoint)
}
case dto.MetricType_GAUGE:
glog.Infof("Produce GAUGE metrics: %s, value: %f", action.cwMetricName, action.data.curSingleDataPoint)
log.Infof("Produce GAUGE metrics: %s, value: %f", action.cwMetricName, action.data.curSingleDataPoint)
if t.submitCloudWatch() {
dataPoint := &cloudwatch.MetricDatum{
MetricName: aws.String(action.cwMetricName),
Expand All @@ -357,7 +361,7 @@ func produceCloudWatchMetrics(t metricsTarget, families map[string]*dto.MetricFa
cw.Publish(dataPoint)
}
case dto.MetricType_SUMMARY:
glog.Infof("Produce PERCENTILE metrics: %s, value: %f", action.cwMetricName, action.data.curSingleDataPoint)
log.Infof("Produce PERCENTILE metrics: %s, value: %f", action.cwMetricName, action.data.curSingleDataPoint)
if t.submitCloudWatch() {
dataPoint := &cloudwatch.MetricDatum{
MetricName: aws.String(action.cwMetricName),
Expand Down Expand Up @@ -397,13 +401,13 @@ func metricsListGrabAggregateConvert(t metricsTarget) (map[string]*dto.MetricFam
resetMetrics(interestingMetrics)

targetList := t.getTargetList()
glog.Info("targetList: ", targetList)
glog.Info("len(targetList)", len(targetList))
log.Infof("targetList: %v ", targetList)
log.Infof("len(targetList) %d", len(targetList))
for _, target := range targetList {
glog.Infof("Grab/Aggregate metrics from %v", target)
log.Infof("Grab/Aggregate metrics from %v", target)
rawOutput, err := t.grabMetricsFromTarget(target)
if err != nil {
glog.Errorf("Failed to getMetricsFromTarget: %v", err)
log.Errorf("Failed to getMetricsFromTarget: %v", err)
// it may take times to remove some metric targets
continue
}
Expand All @@ -412,13 +416,13 @@ func metricsListGrabAggregateConvert(t metricsTarget) (map[string]*dto.MetricFam
origFamilies, err := parser.TextToMetricFamilies(bytes.NewReader(rawOutput))

if err != nil {
glog.Warning("Failed to parse metrics:", err)
log.Warnf("Failed to parse metrics:", zap.Error(err))
return nil, nil, true, err
}

families, err = filterMetrics(origFamilies, interestingMetrics)
if err != nil {
glog.Warning("Failed to filter metrics:", err)
log.Warnf("Failed to filter metrics:", zap.Error(err))
return nil, nil, true, err
}

Expand Down Expand Up @@ -447,7 +451,7 @@ func Handler(t metricsTarget) {
families, interestingMetrics, resetDetected, err := metricsListGrabAggregateConvert(t)

if err != nil || resetDetected {
glog.Info("Skipping 1st poll after reset, error:", err)
log.Infof("Skipping 1st poll after reset, error: %v", err)
}

cw := t.getCWMetricsPublisher()
Expand Down
Loading

0 comments on commit fad5934

Please sign in to comment.