Skip to content

Commit

Permalink
zap logger
Browse files Browse the repository at this point in the history
  • Loading branch information
nithu0115 committed Mar 19, 2020
1 parent 71538ac commit a00483a
Show file tree
Hide file tree
Showing 37 changed files with 710 additions and 429 deletions.
26 changes: 24 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ Default: `DEBUG`

Valid Values: `trace`, `debug`, `info`, `warn`, `error`, `critical` or `off`. (Not case sensitive)

Specifies the loglevel for ipamd.
Specifies the loglevel for `ipamd`.

---

Expand All @@ -299,7 +299,29 @@ Default: Unset

Valid Values: `stdout` or a file path

Specifies where to write the logging output. Either to stdout or to override the default file.
Specifies where to write the logging output of `ipamd`. Either to stdout or to override the default file (i.e., `/var/log/aws-routed-eni/ipamd.log`).

---

`AWS_VPC_K8S_PLUGIN_LOG_FILE`

Type: String

Default: Unset

Valid Values: `stdout` or a file path

Specifies where to write the logging output for `aws-cni` plugin. Either to stdout or to override the default file (i.e., `/var/log/aws-routed-eni/plugin.log`).

---

`AWS_VPC_K8s_PLUGIN_LOG_LEVEL`

Type: String

Valid Values: `trace`, `debug`, `info`, `warn`, `error`, `critical` or `off`. (Not case sensitive)

Specifies the loglevel for `aws-cni` plugin.

---

Expand Down
18 changes: 8 additions & 10 deletions cmd/aws-k8s-agent/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,29 +16,27 @@ package main
import (
"os"

log "github.com/cihub/seelog"

"github.com/aws/amazon-vpc-cni-k8s/pkg/eniconfig"
"github.com/aws/amazon-vpc-cni-k8s/pkg/ipamd"
"github.com/aws/amazon-vpc-cni-k8s/pkg/k8sapi"
"github.com/aws/amazon-vpc-cni-k8s/pkg/utils/logger"
)

const (
defaultLogFilePath = "/host/var/log/aws-routed-eni/ipamd.log"
)
const binaryName = "ipamd"

var version string

var (
version string
)

func main() {
os.Exit(_main())
}

func _main() int {
defer log.Flush()
logger.SetupLogger(logger.GetLogFileLocation(defaultLogFilePath))
//Do not add anything before initializing logger
logConfig := logger.Configuration{
BinaryName: binaryName,
}
log := logger.New(&logConfig)

log.Infof("Starting L-IPAMD %s ...", version)

Expand Down
23 changes: 10 additions & 13 deletions cmd/cni-metrics-helper/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,16 @@ import (
"strings"
"time"

"github.com/golang/glog"
"github.com/aws/amazon-vpc-cni-k8s/pkg/utils/logger"
"github.com/spf13/pflag"

"github.com/aws/amazon-vpc-cni-k8s/cmd/cni-metrics-helper/metrics"
"github.com/aws/amazon-vpc-cni-k8s/pkg/k8sapi"
"github.com/aws/amazon-vpc-cni-k8s/pkg/publisher"
)

var log = logger.DefaultLogger()

type options struct {
kubeconfig string
pullInterval int
Expand All @@ -40,11 +42,8 @@ type options struct {
func main() {
options := &options{}
flags := pflag.NewFlagSet("", pflag.ExitOnError)
// Add glog flags
flags.AddGoFlagSet(flag.CommandLine)
_ = flags.Lookup("logtostderr").Value.Set("true")
flags.Lookup("logtostderr").DefValue = "true"
flags.Lookup("logtostderr").NoOptDefVal = "true"

flags.BoolVar(&options.submitCW, "cloudwatch", true, "a bool")

flags.Usage = func() {
Expand All @@ -54,12 +53,12 @@ func main() {

err := flags.Parse(os.Args)
if err != nil {
glog.Fatalf("Error on parsing parameters: %s", err)
log.Fatalf("Error on parsing parameters: %s", err)
}

err = flag.CommandLine.Parse([]string{})
if err != nil {
glog.Fatalf("Error on parsing commandline: %s", err)
log.Fatalf("Error on parsing commandline: %s", err)
}

if options.help {
Expand All @@ -77,12 +76,11 @@ func main() {
}
}

glog.Infof("Starting CNIMetricsHelper. Sending metrics to CloudWatch: %v", options.submitCW)
log.Infof("Starting CNIMetricsHelper. Sending metrics to CloudWatch: %v", options.submitCW)

kubeClient, err := k8sapi.CreateKubeClient()
if err != nil {
glog.Errorf("Failed to create client: %v", err)
os.Exit(1)
log.Fatalf("Failed to create client: %v", err)
}

discoverController := k8sapi.NewController(kubeClient)
Expand All @@ -96,8 +94,7 @@ func main() {

cw, err = publisher.New(ctx)
if err != nil {
glog.Errorf("Failed to create publisher: %v", err)
os.Exit(1)
log.Fatalf("Failed to create publisher: %v", err)
}
go cw.Start()
defer cw.Stop()
Expand All @@ -109,7 +106,7 @@ func main() {
// metric loop
var pullInterval = 30 // seconds
for range time.Tick(time.Duration(pullInterval) * time.Second) {
glog.Info("Collecting metrics ...")
log.Info("Collecting metrics ...")
metrics.Handler(cniMetric)
}
}
6 changes: 2 additions & 4 deletions cmd/cni-metrics-helper/metrics/cni_metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
package metrics

import (
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"

Expand Down Expand Up @@ -152,14 +151,13 @@ func CNIMetricsNew(c clientset.Interface, cw publisher.Publisher, d *k8sapi.Cont
}

func (t *CNIMetricsTarget) grabMetricsFromTarget(cniPod string) ([]byte, error) {
glog.Infof("Grabbing metrics from CNI: %s", cniPod)
output, err := getMetricsFromPod(t.kubeClient, cniPod, metav1.NamespaceSystem, metricsPort)
if err != nil {
glog.Errorf("grabMetricsFromTarget: Failed to grab CNI endpoint: %v", err)
log.Errorf("grabMetricsFromTarget: Failed to grab CNI endpoint: %v", err)
return nil, err
}

glog.V(5).Infof("cni-metrics text output: %s", string(output))
log.Infof("cni-metrics text output: %s", string(output))
return output, nil
}

Expand Down
50 changes: 12 additions & 38 deletions cmd/cni-metrics-helper/metrics/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,17 @@ import (
"github.com/aws/aws-sdk-go/aws"

"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/golang/glog"

dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
clientset "k8s.io/client-go/kubernetes"

"github.com/aws/amazon-vpc-cni-k8s/pkg/publisher"
"github.com/aws/amazon-vpc-cni-k8s/pkg/utils/logger"
)

var log = logger.DefaultLogger()

type metricMatcher func(metric *dto.Metric) bool
type actionFuncType func(aggregatedValue *float64, sampleValue float64)

Expand Down Expand Up @@ -97,27 +100,16 @@ func getMetricsFromPod(client clientset.Interface, podName string, namespace str
}

func processGauge(metric *dto.Metric, act *metricsAction) {
if act.logToFile {
glog.Infof("Label: %v, Value: %v ", metric.GetLabel(), metric.GetGauge().GetValue())
} else {
glog.V(10).Info("processing GAUGE: ", metric.GetGauge().GetValue())
}
act.actionFunc(&act.data.curSingleDataPoint, metric.GetGauge().GetValue())
}

func processCounter(metric *dto.Metric, act *metricsAction) {
if act.logToFile {
glog.Infof("Label: %v, Value: %v ", metric.GetLabel(), metric.GetCounter().GetValue())
} else {
glog.V(10).Info("processing COUNTER: ", metric.GetCounter().GetValue())
}
act.actionFunc(&act.data.curSingleDataPoint, metric.GetCounter().GetValue())
}

func processPercentile(metric *dto.Metric, act *metricsAction) {
var p99 float64

glog.V(10).Info("processing PERCENTILE: ", p99)
summary := metric.GetSummary()
quantiles := summary.GetQuantile()

Expand All @@ -130,33 +122,27 @@ func processPercentile(metric *dto.Metric, act *metricsAction) {
}

func processHistogram(metric *dto.Metric, act *metricsAction) {
glog.V(5).Info("processing HISTOGRAM:", metric.GetLabel())
glog.V(5).Info(metric.GetHistogram())
histogram := metric.GetHistogram()

for _, bucket := range histogram.GetBucket() {
glog.V(10).Info("processing bucket:", bucket)
existingBucket := false
for _, bucketInAct := range act.bucket.curBucket {
if bucket.GetUpperBound() == *bucketInAct.UpperBound {
// found the matching bucket
glog.V(10).Infof("Found the matching bucket with UpperBound: %f", *bucketInAct.UpperBound)
act.actionFunc(bucketInAct.CumulativeCount, float64(bucket.GetCumulativeCount()))
glog.V(10).Infof("Found: *bucketInAct.CumulativeCount:%f, bucket.GetCumulativeCount():%f",
*bucketInAct.CumulativeCount, float64(bucket.GetCumulativeCount()))
existingBucket = true
break
}
}

if !existingBucket {
glog.V(10).Infof("Create a new bucket with upperBound:%f", bucket.GetUpperBound())
upperBound := new(float64)
*upperBound = float64(bucket.GetUpperBound())
cumulativeCount := new(float64)
*cumulativeCount = float64(bucket.GetCumulativeCount())
newBucket := &bucketPoint{UpperBound: upperBound, CumulativeCount: cumulativeCount}
act.bucket.curBucket = append(act.bucket.curBucket, newBucket)
log.Infof("Created a new bucket with upperBound:%f", bucket.GetUpperBound())
}
}
}
Expand Down Expand Up @@ -186,7 +172,7 @@ func postProcessingCounter(convert metricsConvert) bool {
}

if resetDetected || (noPreviousDataPoint && !noCurrentDataPoint) {
glog.Infof("Reset detected resetDetected: %v, noPreviousDataPoint: %v, noCurrentDataPoint: %v",
log.Infof("Reset detected resetDetected: %v, noPreviousDataPoint: %v, noCurrentDataPoint: %v",
resetDetected, noPreviousDataPoint, noCurrentDataPoint)
}
return resetDetected || (noPreviousDataPoint && !noCurrentDataPoint)
Expand All @@ -199,28 +185,28 @@ func postProcessingHistogram(convert metricsConvert) bool {
for _, action := range convert.actions {
numOfBuckets := len(action.bucket.curBucket)
if numOfBuckets == 0 {
glog.Info("Post Histogram Processing: no bucket found")
log.Info("Post Histogram Processing: no bucket found")
continue
}
for i := 1; i < numOfBuckets; i++ {
glog.V(10).Infof("Found numOfBuckets-i:=%d, *action.bucket.curBucket[numOfBuckets-i].CumulativeCount=%f",
log.Infof("Found numOfBuckets-i:=%d, *action.bucket.curBucket[numOfBuckets-i].CumulativeCount=%f",
numOfBuckets-i, *action.bucket.curBucket[numOfBuckets-i].CumulativeCount)

// Delta against the previous bucket value
// e.g. diff between bucket LE250000 and previous bucket LE125000
*action.bucket.curBucket[numOfBuckets-i].CumulativeCount -= *action.bucket.curBucket[numOfBuckets-i-1].CumulativeCount
glog.V(10).Infof("Found numOfBuckets-i:=%d, *action.bucket.curBucket[numOfBuckets-i].CumulativeCount=%f, *action.bucket.curBucket[numOfBuckets-i-1].CumulativeCount=%f",
log.Infof("Found numOfBuckets-i:=%d, *action.bucket.curBucket[numOfBuckets-i].CumulativeCount=%f, *action.bucket.curBucket[numOfBuckets-i-1].CumulativeCount=%f",
numOfBuckets-i, *action.bucket.curBucket[numOfBuckets-i].CumulativeCount, *action.bucket.curBucket[numOfBuckets-i-1].CumulativeCount)

// Delta against the previous value
if action.bucket.lastBucket != nil {
glog.V(10).Infof("Found *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount=%f",
log.Infof("Found *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount=%f",
*action.bucket.lastBucket[numOfBuckets-i].CumulativeCount)
currentTotal := *action.bucket.curBucket[numOfBuckets-i].CumulativeCount
// Only do delta if there is no restart for metric target
if *action.bucket.curBucket[numOfBuckets-i].CumulativeCount >= *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount {
*action.bucket.curBucket[numOfBuckets-i].CumulativeCount -= *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount
glog.V(10).Infof("Found *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount=%f, *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount=%f",
log.Infof("Found *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount=%f, *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount=%f",
*action.bucket.curBucket[numOfBuckets-i].CumulativeCount, *action.bucket.lastBucket[numOfBuckets-i].CumulativeCount)
} else {
resetDetected = true
Expand Down Expand Up @@ -250,7 +236,6 @@ func postProcessingHistogram(convert metricsConvert) bool {
}

func processMetric(family *dto.MetricFamily, convert metricsConvert) (bool, error) {
glog.Info("Processing metric: ", family.GetName())
resetDetected := false

mType := family.GetType()
Expand Down Expand Up @@ -301,8 +286,6 @@ func produceHistogram(act metricsAction, cw publisher.Publisher) {

prevUpperBound = *bucket.UpperBound
if *bucket.CumulativeCount != 0 {
glog.Infof("Produce HISTOGRAM metrics: %s, max:%f, min:%f, count: %f, sum: %f",
act.cwMetricName, mid, mid, *bucket.CumulativeCount, mid*float64(*bucket.CumulativeCount))
dataPoint := &cloudwatch.MetricDatum{
MetricName: aws.String(act.cwMetricName),
StatisticValues: &cloudwatch.StatisticSet{
Expand Down Expand Up @@ -337,7 +320,6 @@ func produceCloudWatchMetrics(t metricsTarget, families map[string]*dto.MetricFa
for _, action := range convertMetrics.actions {
switch mType {
case dto.MetricType_COUNTER:
glog.Infof("Produce COUNTER metrics: %s, value: %f", action.cwMetricName, action.data.curSingleDataPoint)
if t.submitCloudWatch() {
dataPoint := &cloudwatch.MetricDatum{
MetricName: aws.String(action.cwMetricName),
Expand All @@ -347,7 +329,6 @@ func produceCloudWatchMetrics(t metricsTarget, families map[string]*dto.MetricFa
cw.Publish(dataPoint)
}
case dto.MetricType_GAUGE:
glog.Infof("Produce GAUGE metrics: %s, value: %f", action.cwMetricName, action.data.curSingleDataPoint)
if t.submitCloudWatch() {
dataPoint := &cloudwatch.MetricDatum{
MetricName: aws.String(action.cwMetricName),
Expand All @@ -357,7 +338,6 @@ func produceCloudWatchMetrics(t metricsTarget, families map[string]*dto.MetricFa
cw.Publish(dataPoint)
}
case dto.MetricType_SUMMARY:
glog.Infof("Produce PERCENTILE metrics: %s, value: %f", action.cwMetricName, action.data.curSingleDataPoint)
if t.submitCloudWatch() {
dataPoint := &cloudwatch.MetricDatum{
MetricName: aws.String(action.cwMetricName),
Expand Down Expand Up @@ -397,13 +377,9 @@ func metricsListGrabAggregateConvert(t metricsTarget) (map[string]*dto.MetricFam
resetMetrics(interestingMetrics)

targetList := t.getTargetList()
glog.Info("targetList: ", targetList)
glog.Info("len(targetList)", len(targetList))
for _, target := range targetList {
glog.Infof("Grab/Aggregate metrics from %v", target)
rawOutput, err := t.grabMetricsFromTarget(target)
if err != nil {
glog.Errorf("Failed to getMetricsFromTarget: %v", err)
// it may take times to remove some metric targets
continue
}
Expand All @@ -412,13 +388,11 @@ func metricsListGrabAggregateConvert(t metricsTarget) (map[string]*dto.MetricFam
origFamilies, err := parser.TextToMetricFamilies(bytes.NewReader(rawOutput))

if err != nil {
glog.Warning("Failed to parse metrics:", err)
return nil, nil, true, err
}

families, err = filterMetrics(origFamilies, interestingMetrics)
if err != nil {
glog.Warning("Failed to filter metrics:", err)
return nil, nil, true, err
}

Expand Down Expand Up @@ -447,7 +421,7 @@ func Handler(t metricsTarget) {
families, interestingMetrics, resetDetected, err := metricsListGrabAggregateConvert(t)

if err != nil || resetDetected {
glog.Info("Skipping 1st poll after reset, error:", err)
log.Infof("Skipping 1st poll after reset, error: %v", err)
}

cw := t.getCWMetricsPublisher()
Expand Down
Loading

0 comments on commit a00483a

Please sign in to comment.