Skip to content

Commit

Permalink
Fixing measurement for pod lantencies (kube-burner#421)
Browse files Browse the repository at this point in the history
  • Loading branch information
vishnuchalla authored Aug 11, 2023
1 parent 8d033ae commit 5683067
Show file tree
Hide file tree
Showing 8 changed files with 50 additions and 4 deletions.
3 changes: 3 additions & 0 deletions docs/measurements.md
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,9 @@ Where `quantileName` matches with the pod conditions and can be:
- `ContainersReady`: Indicates whether all containers in the pod are ready.
- `Ready`: The pod is able to service requests and should be added to the load balancing pools of all matching services.

!!! note
There are a V2 version of these latencies as well. Both are being kept with the intention of monitoring them over time (i.e precision vs accuracy problem) Therefore, if you notice a significant discrepancy and want to tell us about it, please feel free to do so. Or else prefer to stay with the non-v2 and everything will continue to be the same as it always has been.

!!! info
More information about the pod conditions can be found at the [kubernetes documentation site](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions).

Expand Down
2 changes: 1 addition & 1 deletion docs/reference/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ In this section is described global job configuration, it holds the following pa
| `waitWhenFinished` | Wait for all pods to be running when all jobs are completed | Boolean | false |

!!! note
The precedence order to wait on resources is Global.waitWhenFinished > Jod.waitWhenFinished > Job.podWait
The precedence order to wait on resources is Global.waitWhenFinished > Job.waitWhenFinished > Job.podWait

kube-burner connects k8s clusters using the following methods in this order:

Expand Down
7 changes: 5 additions & 2 deletions pkg/burner/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,9 @@ func setupCreateJob(jobConfig config.Job) Executor {
// RunCreateJob executes a creation job
func (ex *Executor) RunCreateJob(iterationStart, iterationEnd int, waitListNamespaces *[]string) {
nsLabels := map[string]string{
"kube-burner-job": ex.Name,
"kube-burner-uuid": ex.uuid,
"kube-burner-job": ex.Name,
"kube-burner-uuid": ex.uuid,
"kube-burner-runid": ex.runid,
}
var wg sync.WaitGroup
var ns string
Expand Down Expand Up @@ -198,6 +199,7 @@ func (ex *Executor) replicaHandler(objectIndex int, obj object, ns string, itera
"kube-burner-uuid": ex.uuid,
"kube-burner-job": ex.Name,
"kube-burner-index": strconv.Itoa(objectIndex),
"kube-burner-runid": ex.runid,
}
templateData := map[string]interface{}{
jobName: ex.Name,
Expand All @@ -219,6 +221,7 @@ func (ex *Executor) replicaHandler(objectIndex int, obj object, ns string, itera
labels[k] = v
}
newObject.SetLabels(labels)
setMetadataLabels(newObject, labels)
json.Marshal(newObject.Object)
// replicaWg is necessary because we want to wait for all replicas
// to be created before running any other action such as verify objects,
Expand Down
2 changes: 2 additions & 0 deletions pkg/burner/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ type Executor struct {
End time.Time
config.Job
uuid string
runid string
limiter *rate.Limiter
}

Expand Down Expand Up @@ -267,6 +268,7 @@ func newExecutorList(configSpec config.Spec, uuid string, timeout time.Duration)
ex.limiter = rate.NewLimiter(rate.Limit(job.QPS), job.Burst)
ex.Job = job
ex.uuid = uuid
ex.runid = configSpec.GlobalConfig.RUNID
executorList = append(executorList, ex)
}
return executorList
Expand Down
17 changes: 17 additions & 0 deletions pkg/burner/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,23 @@ func prepareTemplate(original []byte) ([]byte, error) {
return original, nil
}

// Helps to set metadata labels
func setMetadataLabels(obj *unstructured.Unstructured, labels map[string]string) {
// Will be useful for the resources like Deployments and Replicasets. Because
// object.SetLabels(labels) doesn't actually set labels for the underlying
// objects (i.e Pods under deployment/replicastes). So this function should help
// us achieve that without breaking any of our labeling functionality.
templatePath := []string{"spec", "template", "metadata", "labels"}
metadata, found, _ := unstructured.NestedMap(obj.Object, templatePath...)
if !found {
return
}
for k, v := range labels {
metadata[k] = v
}
unstructured.SetNestedMap(obj.Object, metadata, templatePath...)
}

func yamlToUnstructured(y []byte, uns *unstructured.Unstructured) (runtime.Object, *schema.GroupVersionKind) {
o, gvk, err := scheme.Codecs.UniversalDeserializer().Decode(y, nil, uns)
if err != nil {
Expand Down
2 changes: 2 additions & 0 deletions pkg/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"github.com/cloud-bulldozer/kube-burner/pkg/util"
log "github.com/sirupsen/logrus"

uid "github.com/satori/go.uuid"
"gopkg.in/yaml.v3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation"
Expand All @@ -40,6 +41,7 @@ import (

var configSpec = Spec{
GlobalConfig: GlobalConfig{
RUNID: uid.NewV4().String(),
GC: false,
GCTimeout: 1 * time.Hour,
RequestTimeout: 15 * time.Second,
Expand Down
2 changes: 2 additions & 0 deletions pkg/config/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ type Spec struct {
type GlobalConfig struct {
// Benchmark UUID
UUID string
// Benchmark RUNID
RUNID string
// IndexerConfig contains a IndexerConfig definition
IndexerConfig indexers.IndexerConfig `yaml:"indexerConfig"`
// Measurements describes a list of measurements kube-burner
Expand Down
19 changes: 18 additions & 1 deletion pkg/measurements/pod_latency.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,8 @@ func (p *podLatency) handleCreatePod(obj interface{}) {
pod := obj.(*v1.Pod)
jobConfig := *factory.jobConfig
if _, exists := p.metrics[string(pod.UID)]; !exists {
if strings.Contains(pod.Namespace, factory.jobConfig.Namespace) {
runid, exists := pod.Labels["kube-burner-runid"]
if exists && runid == globalCfg.RUNID {
p.metrics[string(pod.UID)] = podMetric{
Timestamp: now,
CreationTimestampV2: pod.CreationTimestamp.Time.UTC(),
Expand Down Expand Up @@ -226,6 +227,10 @@ func (p *podLatency) normalizeMetrics() {
log.Tracef("ContainersReadyLatency for pod %+v falling under negative case. So explicitly setting it to 0", m.Name)
m.ContainersReadyLatency = 0
}
if m.ContainersReadyLatencyV2 < 0 {
log.Tracef("ContainersReadyLatencyV2 for pod %+v falling under negative case. So explicitly setting it to 0", m.Name)
m.ContainersReadyLatencyV2 = 0
}
log.Tracef("ContainersReadyLatency: %+v for pod %+v", m.ContainersReadyLatency, m.Name)
log.Tracef("ContainersReadyLatencyV2: %+v for pod %+v", m.ContainersReadyLatencyV2, m.Name)

Expand All @@ -235,6 +240,10 @@ func (p *podLatency) normalizeMetrics() {
log.Tracef("SchedulingLatency for pod %+v falling under negative case. So explicitly setting it to 0", m.Name)
m.SchedulingLatency = 0
}
if m.SchedulingLatencyV2 < 0 {
log.Tracef("SchedulingLatencyV2 for pod %+v falling under negative case. So explicitly setting it to 0", m.Name)
m.SchedulingLatencyV2 = 0
}
log.Tracef("SchedulingLatency: %+v for pod %+v", m.SchedulingLatency, m.Name)
log.Tracef("SchedulingLatencyV2: %+v for pod %+v", m.SchedulingLatencyV2, m.Name)

Expand All @@ -244,6 +253,10 @@ func (p *podLatency) normalizeMetrics() {
log.Tracef("InitializedLatency for pod %+v falling under negative case. So explicitly setting it to 0", m.Name)
m.InitializedLatency = 0
}
if m.InitializedLatencyV2 < 0 {
log.Tracef("InitializedLatencyV2 for pod %+v falling under negative case. So explicitly setting it to 0", m.Name)
m.InitializedLatencyV2 = 0
}
log.Tracef("InitializedLatency: %+v for pod %+v", m.InitializedLatency, m.Name)
log.Tracef("InitializedLatencyV2: %+v for pod %+v", m.InitializedLatencyV2, m.Name)

Expand All @@ -253,6 +266,10 @@ func (p *podLatency) normalizeMetrics() {
log.Tracef("PodReadyLatency for pod %+v falling under negative case. So explicitly setting it to 0", m.Name)
m.PodReadyLatency = 0
}
if m.PodReadyLatencyV2 < 0 {
log.Tracef("PodReadyLatencyV2 for pod %+v falling under negative case. So explicitly setting it to 0", m.Name)
m.PodReadyLatencyV2 = 0
}
log.Tracef("PodReadyLatency: %+v for pod %+v", m.PodReadyLatency, m.Name)
log.Tracef("PodReadyLatencyV2: %+v for pod %+v", m.PodReadyLatencyV2, m.Name)
p.normLatencies = append(p.normLatencies, m)
Expand Down

0 comments on commit 5683067

Please sign in to comment.