Skip to content

Commit

Permalink
Optimize metric collection in benchmarks with multiple jobs (kube-bur…
Browse files Browse the repository at this point in the history
…ner#128)

* Optimize metric collection in benchmarks with multiple jobs

Signed-off-by: Raul Sevilla <[email protected]>

* Fix tests

Signed-off-by: Raul Sevilla <[email protected]>

* Index metadata at the end of benchmark

Signed-off-by: Raul Sevilla <[email protected]>
  • Loading branch information
rsevilla87 authored Sep 30, 2021
1 parent cc055fb commit 607f561
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 20 deletions.
7 changes: 5 additions & 2 deletions cmd/kube-burner/kube-burner.go
Original file line number Diff line number Diff line change
Expand Up @@ -350,8 +350,11 @@ func steps(uuid string, p *prometheus.Prometheus, alertM *alerting.AlertManager)
jobList[jobPosition].End = time.Now().UTC()
elapsedTime := jobList[jobPosition].End.Sub(jobList[jobPosition].Start).Seconds()
log.Infof("Job %s took %.2f seconds", job.Config.Name, elapsedTime)
if config.ConfigSpec.GlobalConfig.IndexerConfig.Enabled {
err := burner.IndexMetadataInfo(indexer, uuid, elapsedTime, job.Config, jobList[jobPosition].Start)
}
if config.ConfigSpec.GlobalConfig.IndexerConfig.Enabled {
for _, job := range jobList {
elapsedTime := job.End.Sub(job.Start).Seconds()
err := burner.IndexMetadataInfo(indexer, uuid, elapsedTime, job.Config, job.Start)
if err != nil {
log.Errorf(err.Error())
}
Expand Down
47 changes: 30 additions & 17 deletions pkg/prometheus/prometheus.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,26 +98,26 @@ func (p *Prometheus) ReadProfile(metricsProfile string) error {

// ScrapeMetrics defined in the metrics profile from start to end
func (p *Prometheus) ScrapeMetrics(start, end time.Time, indexer *indexers.Indexer) error {
err := p.scrapeMetrics("kube-burner-indexing", start, end, indexer)
foo := []burner.Executor{
{
Start: start,
End: end,
Config: config.Job{
Name: "kube-burner-indexing"},
},
}
err := p.ScrapeJobsMetrics(foo, indexer)
return err
}

// ScrapeJobsMetrics gets all prometheus metrics required and handles them
func (p *Prometheus) ScrapeJobsMetrics(jobList []burner.Executor, indexer *indexers.Indexer) error {
for _, job := range jobList {
err := p.scrapeMetrics(job.Config.Name, job.Start, job.End, indexer)
if err != nil {
return err
}
}
return nil
}

func (p *Prometheus) scrapeMetrics(jobName string, start, end time.Time, indexer *indexers.Indexer) error {
start := jobList[0].Start
end := jobList[len(jobList)-1].End
var filename string
var err error
var v model.Value
log.Infof("🔍 Scraping prometheus metrics for job %s from %s to %s", jobName, start, end)
log.Infof("🔍 Scraping prometheus metrics for benchmark from %s to %s", start, end)
for _, md := range p.MetricProfile {
var metrics []interface{}
if md.Instant {
Expand All @@ -126,7 +126,7 @@ func (p *Prometheus) scrapeMetrics(jobName string, start, end time.Time, indexer
log.Warnf("Error found with query %s: %s", md.Query, err)
continue
}
if err := p.parseVector(md.MetricName, md.Query, jobName, v, &metrics); err != nil {
if err := p.parseVector(md.MetricName, md.Query, jobList, v, &metrics); err != nil {
log.Warnf("Error found parsing result from query %s: %s", md.Query, err)
}
} else {
Expand All @@ -137,13 +137,13 @@ func (p *Prometheus) scrapeMetrics(jobName string, start, end time.Time, indexer
log.Warnf("Error found with query %s: %s", md.Query, err)
continue
}
if err := p.parseMatrix(md.MetricName, md.Query, jobName, v, &metrics); err != nil {
if err := p.parseMatrix(md.MetricName, md.Query, jobList, v, &metrics); err != nil {
log.Warnf("Error found parsing result from query %s: %s", md.Query, err)
continue
}
}
if config.ConfigSpec.GlobalConfig.WriteToFile {
filename = fmt.Sprintf("%s-%s.json", jobName, md.MetricName)
filename = fmt.Sprintf("%s-%s.json", md.MetricName, p.uuid)
if config.ConfigSpec.GlobalConfig.MetricsDirectory != "" {
err = os.MkdirAll(config.ConfigSpec.GlobalConfig.MetricsDirectory, 0744)
if err != nil {
Expand Down Expand Up @@ -174,14 +174,21 @@ func (p *Prometheus) scrapeMetrics(jobName string, start, end time.Time, indexer
}
}
return nil

}

func (p *Prometheus) parseVector(metricName, query, jobName string, value model.Value, metrics *[]interface{}) error {
func (p *Prometheus) parseVector(metricName, query string, jobList []burner.Executor, value model.Value, metrics *[]interface{}) error {
var jobName string
data, ok := value.(model.Vector)
if !ok {
return fmt.Errorf("Unsupported result format: %s", value.Type().String())
}
for _, v := range data {
for _, job := range jobList {
if v.Timestamp.Time().Before(job.End) {
jobName = job.Config.Name
}
}
m := metric{
Labels: make(map[string]string),
UUID: p.uuid,
Expand All @@ -206,13 +213,19 @@ func (p *Prometheus) parseVector(metricName, query, jobName string, value model.
return nil
}

func (p *Prometheus) parseMatrix(metricName, query string, jobName string, value model.Value, metrics *[]interface{}) error {
func (p *Prometheus) parseMatrix(metricName, query string, jobList []burner.Executor, value model.Value, metrics *[]interface{}) error {
var jobName string
data, ok := value.(model.Matrix)
if !ok {
return fmt.Errorf("Unsupported result format: %s", value.Type().String())
}
for _, v := range data {
for _, val := range v.Values {
for _, job := range jobList {
if val.Timestamp.Time().Before(job.End) {
jobName = job.Config.Name
}
}
m := metric{
Labels: make(map[string]string),
UUID: p.uuid,
Expand Down
2 changes: 1 addition & 1 deletion test/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ check_running_pods() {
}

check_files () {
for f in collected-metrics/namespaced-prometheusRSS.json collected-metrics/not-namespaced-prometheusRSS.json collected-metrics/namespaced-podLatency.json collected-metrics/namespaced-podLatency-summary.json; do
for f in collected-metrics/prometheusRSS-${uuid}.json collected-metrics/prometheusRSS-${uuid}.json collected-metrics/namespaced-podLatency.json collected-metrics/namespaced-podLatency-summary.json; do
log "Checking file ${f}"
if [[ ! -f $f ]]; then
log "File ${f} not present"
Expand Down

0 comments on commit 607f561

Please sign in to comment.