Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

kubeletstats v102 rebase fixes #81

Merged
merged 1 commit into from
Jun 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ func (a *metricDataAccumulator) podStats(s stats.PodStats) {

currentTime := pcommon.NewTimestampFromTime(a.time)
addUptimeMetric(a.mbs.PodMetricsBuilder, metadata.PodUptimeMetrics.Uptime, s.StartTime, currentTime)
addCPUMetrics(a.mbs.PodMetricsBuilder, metadata.PodCPUMetrics, s.CPU, currentTime, a.metadata.podResources[s.PodRef.UID], 0)
addCPUMetrics(a.mbs.PodMetricsBuilder, metadata.PodCPUMetrics, s.CPU, currentTime, a.metadata.podResources[s.PodRef.UID], a.metadata.cpuNodeLimit)
addMemoryMetrics(a.mbs.PodMetricsBuilder, metadata.PodMemoryMetrics, s.Memory, currentTime, a.metadata.podResources[s.PodRef.UID])
addFilesystemMetrics(a.mbs.PodMetricsBuilder, metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime)
addNetworkMetrics(a.mbs.PodMetricsBuilder, metadata.PodNetworkMetrics, s.Network, currentTime)
Expand Down Expand Up @@ -172,7 +172,6 @@ func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.Cont

resourceKey := sPod.PodRef.UID + s.Name
currentTime := pcommon.NewTimestampFromTime(a.time)
resourceKey := sPod.PodRef.UID + s.Name
addUptimeMetric(a.mbs.ContainerMetricsBuilder, metadata.ContainerUptimeMetrics.Uptime, s.StartTime, currentTime)
addCPUMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerCPUMetrics, s.CPU, currentTime, a.metadata.containerResources[resourceKey], a.metadata.cpuNodeLimit)
addMemoryMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerMemoryMetrics, s.Memory, currentTime, a.metadata.containerResources[resourceKey])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func TestMetadataErrorCases(t *testing.T) {
},
},
},
}, NodeLimits{}, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
testScenario: func(acc metricDataAccumulator) {
now := metav1.Now()
podStats := stats.PodStats{
Expand All @@ -84,7 +84,7 @@ func TestMetadataErrorCases(t *testing.T) {
metricGroupsToCollect: map[MetricGroup]bool{
VolumeMetricGroup: true,
},
metadata: NewMetadata([]MetadataLabel{MetadataLabelVolumeType}, nil, NodeLimits{}, nil),
metadata: NewMetadata([]MetadataLabel{MetadataLabelVolumeType}, nil, nil, NodeLimits{}, nil),
testScenario: func(acc metricDataAccumulator) {
podStats := stats.PodStats{
PodRef: stats.PodReference{
Expand Down Expand Up @@ -126,7 +126,7 @@ func TestMetadataErrorCases(t *testing.T) {
},
},
},
}, NodeLimits{}, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
testScenario: func(acc metricDataAccumulator) {
podStats := stats.PodStats{
PodRef: stats.PodReference{
Expand Down Expand Up @@ -170,8 +170,8 @@ func TestMetadataErrorCases(t *testing.T) {
},
},
},
}, NodeLimits{}, nil),
detailedPVCLabelsSetterOverride: func(*metadata.ResourceBuilder, string, string, string) error {
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
detailedPVCLabelsSetterOverride: func(*metadata.ResourceBuilder, string, string, string) ([]metadata.ResourceMetricsOption, error) {
// Mock failure cases.
return nil, errors.New("")
},
Expand Down Expand Up @@ -275,7 +275,7 @@ func TestGetServiceName(t *testing.T) {
acc := metricDataAccumulator{
metadata: NewMetadata([]MetadataLabel{MetadataLabelContainerID}, &v1.PodList{
Items: pods,
}, nil, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
}

// Create a Service with the same labels as the Pod
Expand Down Expand Up @@ -324,7 +324,7 @@ func TestGetServiceAccountName(t *testing.T) {
acc := metricDataAccumulator{
metadata: NewMetadata([]MetadataLabel{MetadataLabelContainerID}, &v1.PodList{
Items: pods,
}, nil, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
}

// Call the getServiceName method
Expand Down Expand Up @@ -358,7 +358,7 @@ func TestGetJobInfo(t *testing.T) {
acc := metricDataAccumulator{
metadata: NewMetadata([]MetadataLabel{MetadataLabelContainerID}, &v1.PodList{
Items: pods,
}, nil, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
}

// Create a Job with the same labels as the Pod
Expand Down
11 changes: 8 additions & 3 deletions receiver/kubeletstatsreceiver/internal/kubelet/cpu.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,20 @@ func addCPUMetrics(
usageCores := float64(*s.UsageNanoCores) / 1_000_000_000
cpuMetrics.Usage(mb, currentTime, usageCores)
addCPUUtilizationMetrics(mb, cpuMetrics, usageCores, currentTime, r, nodeCPULimit)
addCPUUsageMetric(mb, cpuMetrics, s, currentTime, r)
addCPUUsageMetric(mb, cpuMetrics, s, currentTime, r, nodeCPULimit)
}
addCPUTimeMetric(mb, cpuMetrics.Time, s, currentTime)
}

func addCPUUsageMetric(mb *metadata.MetricsBuilder, cpuMetrics metadata.CPUMetrics, s *stats.CPUStats, currentTime pcommon.Timestamp, r resources) {
func addCPUUsageMetric(mb *metadata.MetricsBuilder, cpuMetrics metadata.CPUMetrics, s *stats.CPUStats, currentTime pcommon.Timestamp, r resources, nodeCPULimit float64) {
if s == nil {
return
}

value := float64(*s.UsageNanoCores) / 1_000_000_000
cpuMetrics.Utilization(mb, currentTime, value)
if nodeCPULimit > 0 {
if nodeCPULimit > 0 && s.UsageNanoCores != nil {
usageCores := float64(*s.UsageNanoCores) / 1_000_000_000
cpuMetrics.NodeUtilization(mb, currentTime, usageCores/nodeCPULimit)
}
if r.cpuLimit > 0 {
Expand Down
6 changes: 3 additions & 3 deletions receiver/kubeletstatsreceiver/internal/kubelet/metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ type Metadata struct {
Labels map[MetadataLabel]bool
PodsMetadata *v1.PodList
NodesMetadata *v1.NodeList
DetailedPVCResourceSetter func(rb *metadata.ResourceBuilder, volCacheID, volumeClaim, namespace string) error
DetailedPVCResourceSetter func(rb *metadata.ResourceBuilder, volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error)
podResources map[string]resources
containerResources map[string]resources
cpuNodeLimit float64
Expand Down Expand Up @@ -87,8 +87,8 @@ func getContainerResources(r *v1.ResourceRequirements) resources {
}
}

func NewMetadata(labels []MetadataLabel, podsMetadata *v1.PodList, nodeResourceLimits NodeLimits,
detailedPVCResourceSetter func(rb *metadata.ResourceBuilder, volCacheID, volumeClaim, namespace string) error) Metadata {
func NewMetadata(labels []MetadataLabel, podsMetadata *v1.PodList, nodesMetadata *v1.NodeList, nodeResourceLimits NodeLimits,
detailedPVCResourceSetter func(rb *metadata.ResourceBuilder, volCacheID, volumeClaim, namespace string) ([]metadata.ResourceMetricsOption, error)) Metadata {
m := Metadata{
Labels: getLabelsMap(labels),
PodsMetadata: podsMetadata,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,3 +32,16 @@ func (p *MetadataProvider) Pods() (*v1.PodList, error) {
}
return &out, nil
}

func (p *MetadataProvider) Nodes() (*v1.NodeList, error) {
nodes, err := p.rc.Nodes()
if err != nil {
return nil, err
}
var out v1.NodeList
err = json.Unmarshal(nodes, &out)
if err != nil {
return nil, err
}
return &out, nil
}
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ func (f testRestClient) Pods() ([]byte, error) {
return os.ReadFile("../../testdata/pods.json")
}

func (f testRestClient) Nodes() ([]byte, error) {
return []byte{}, nil
}

func TestPods(t *testing.T) {
tests := []struct {
name string
Expand Down
34 changes: 17 additions & 17 deletions receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func TestSetExtraLabels(t *testing.T) {
}{
{
name: "no_labels",
metadata: NewMetadata([]MetadataLabel{}, nil, NodeLimits{}, nil),
metadata: NewMetadata([]MetadataLabel{}, nil, nil, NodeLimits{}, nil),
args: []string{"uid", "container.id", "container"},
want: map[string]any{},
},
Expand Down Expand Up @@ -98,7 +98,7 @@ func TestSetExtraLabels(t *testing.T) {
},
},
},
}, NodeLimits{}, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
args: []string{"uid-1234", "container.id", "container1"},
want: map[string]any{
string(MetadataLabelContainerID): "test-container",
Expand Down Expand Up @@ -128,15 +128,15 @@ func TestSetExtraLabels(t *testing.T) {
},
},
},
}, NodeLimits{}, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
args: []string{"uid-1234", "container.id", "init-container1"},
want: map[string]any{
string(MetadataLabelContainerID): "test-init-container",
},
},
{
name: "set_container_id_no_metadata",
metadata: NewMetadata([]MetadataLabel{MetadataLabelContainerID}, nil, NodeLimits{}, nil),
metadata: NewMetadata([]MetadataLabel{MetadataLabelContainerID}, nil, nil, NodeLimits{}, nil),
args: []string{"uid-1234", "container.id", "container1"},
wantError: "pods metadata were not fetched",
},
Expand All @@ -158,7 +158,7 @@ func TestSetExtraLabels(t *testing.T) {
},
},
},
}, NodeLimits{}, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
args: []string{"uid-1234", "container.id", "container1"},
wantError: "pod \"uid-1234\" with container \"container1\" not found in the fetched metadata",
},
Expand All @@ -180,13 +180,13 @@ func TestSetExtraLabels(t *testing.T) {
},
},
},
}, NodeLimits{}, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
args: []string{"uid-1234", "container.id", "container1"},
wantError: "pod \"uid-1234\" with container \"container1\" has an empty containerID",
},
{
name: "set_volume_type_no_metadata",
metadata: NewMetadata([]MetadataLabel{MetadataLabelVolumeType}, nil, NodeLimits{}, nil),
metadata: NewMetadata([]MetadataLabel{MetadataLabelVolumeType}, nil, nil, NodeLimits{}, nil),
args: []string{"uid-1234", "k8s.volume.type", "volume0"},
wantError: "pods metadata were not fetched",
},
Expand All @@ -208,7 +208,7 @@ func TestSetExtraLabels(t *testing.T) {
},
},
},
}, NodeLimits{}, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
args: []string{"uid-1234", "k8s.volume.type", "volume1"},
wantError: "pod \"uid-1234\" with volume \"volume1\" not found in the fetched metadata",
},
Expand Down Expand Up @@ -376,8 +376,8 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) {
},
},
},
}, NodeLimits{}, func(*metadata.ResourceBuilder, string, string, string) error {
return nil
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, func(*metadata.ResourceBuilder, string, string, string) ([]metadata.ResourceMetricsOption, error) {
return []metadata.ResourceMetricsOption{}, nil
})
rb := metadata.NewResourceBuilder(metadata.DefaultResourceAttributesConfig())
err := md.setExtraResources(rb, stats.PodReference{UID: tt.args[0]}, MetadataLabel(tt.args[1]), volName)
Expand Down Expand Up @@ -407,7 +407,7 @@ func TestCpuAndMemoryGetters(t *testing.T) {
}{
{
name: "no metadata",
metadata: NewMetadata([]MetadataLabel{}, nil, NodeLimits{}, nil),
metadata: NewMetadata([]MetadataLabel{}, nil, nil, NodeLimits{}, nil),
},
{
name: "pod happy path",
Expand Down Expand Up @@ -449,7 +449,7 @@ func TestCpuAndMemoryGetters(t *testing.T) {
},
},
},
}, NodeLimits{}, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
podUID: "uid-1234",
containerName: "container-2",
wantPodCPULimit: 2.1,
Expand Down Expand Up @@ -501,7 +501,7 @@ func TestCpuAndMemoryGetters(t *testing.T) {
},
},
},
}, NodeLimits{}, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
podUID: "uid-12345",
},
{
Expand Down Expand Up @@ -544,7 +544,7 @@ func TestCpuAndMemoryGetters(t *testing.T) {
},
},
},
}, NodeLimits{}, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
podUID: "uid-1234",
containerName: "container-3",
wantPodCPULimit: 0.7,
Expand Down Expand Up @@ -584,7 +584,7 @@ func TestCpuAndMemoryGetters(t *testing.T) {
},
},
},
}, NodeLimits{}, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
podUID: "uid-1234",
containerName: "container-2",
wantPodCPURequest: 2,
Expand Down Expand Up @@ -624,7 +624,7 @@ func TestCpuAndMemoryGetters(t *testing.T) {
},
},
},
}, NodeLimits{}, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
podUID: "uid-1234",
containerName: "container-2",
wantPodCPULimit: 2,
Expand Down Expand Up @@ -662,7 +662,7 @@ func TestCpuAndMemoryGetters(t *testing.T) {
},
},
},
}, NodeLimits{}, nil),
}, &v1.NodeList{Items: []v1.Node{}}, NodeLimits{}, nil),
podUID: "uid-1234",
containerName: "container-1",
wantContainerCPULimit: 1,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,19 @@ func (f fakeRestClient) Pods() ([]byte, error) {
return os.ReadFile("../../testdata/pods.json")
}

func (f fakeRestClient) Nodes() ([]byte, error) {
return os.ReadFile("../../testdata/nodes.json")
}

func TestMetricAccumulator(t *testing.T) {
rc := &fakeRestClient{}
statsProvider := NewStatsProvider(rc)
summary, _ := statsProvider.StatsSummary()
metadataProvider := NewMetadataProvider(rc)
podsMetadata, _ := metadataProvider.Pods()
k8sMetadata := NewMetadata([]MetadataLabel{MetadataLabelContainerID}, podsMetadata, NodeLimits{}, nil)
nodesMetadata, _ := metadataProvider.Nodes()

k8sMetadata := NewMetadata([]MetadataLabel{MetadataLabelContainerID}, podsMetadata, nodesMetadata, NodeLimits{}, nil)
mbs := &metadata.MetricsBuilders{
NodeMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsBuilderConfig(), receivertest.NewNopSettings()),
PodMetricsBuilder: metadata.NewMetricsBuilder(metadata.DefaultMetricsBuilderConfig(), receivertest.NewNopSettings()),
Expand Down
5 changes: 5 additions & 0 deletions receiver/kubeletstatsreceiver/internal/kubelet/rest_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
type RestClient interface {
StatsSummary() ([]byte, error)
Pods() ([]byte, error)
Nodes() ([]byte, error)
}

// HTTPRestClient is a thin wrapper around a kubelet client, encapsulating endpoints
Expand All @@ -32,3 +33,7 @@ func (c *HTTPRestClient) StatsSummary() ([]byte, error) {
func (c *HTTPRestClient) Pods() ([]byte, error) {
return c.client.Get("/pods")
}

func (c *HTTPRestClient) Nodes() ([]byte, error) {
return c.client.Get("/nodes")
}
Loading
Loading