diff --git a/pkg/apis/shipper/v1alpha1/types.go b/pkg/apis/shipper/v1alpha1/types.go index 801f1e360..bed8c3d33 100644 --- a/pkg/apis/shipper/v1alpha1/types.go +++ b/pkg/apis/shipper/v1alpha1/types.go @@ -336,12 +336,47 @@ type CapacityTargetStatus struct { Clusters []ClusterCapacityStatus `json:"clusters,omitempty"` } +type ClusterCapacityReportContainerBreakdownExample struct { + Pod string `json:"pod"` + Message *string `json:"message,omitempty"` +} + +type ClusterCapacityReportContainerStateBreakdown struct { + Count uint32 `json:"count"` + Example ClusterCapacityReportContainerBreakdownExample `json:"example"` + Reason string `json:"reason,omitempty"` + Type string `json:"type"` +} + +type ClusterCapacityReportContainerBreakdown struct { + Name string `json:"name"` + States []ClusterCapacityReportContainerStateBreakdown `json:"states"` +} + +type ClusterCapacityReportBreakdown struct { + Containers []ClusterCapacityReportContainerBreakdown `json:"containers,omitempty"` + Count uint32 `json:"count"` + Reason string `json:"reason,omitempty"` + Status string `json:"status"` + Type string `json:"type"` +} + +type ClusterCapacityReportOwner struct { + Name string `json:"name"` +} + +type ClusterCapacityReport struct { + Owner ClusterCapacityReportOwner `json:"owner"` + Breakdown []ClusterCapacityReportBreakdown `json:"breakdown,omitempty"` +} + type ClusterCapacityStatus struct { Name string `json:"name"` AvailableReplicas int32 `json:"availableReplicas"` AchievedPercent int32 `json:"achievedPercent"` SadPods []PodStatus `json:"sadPods,omitempty"` Conditions []ClusterCapacityCondition `json:"conditions,omitempty"` + Reports []ClusterCapacityReport `json:"reports,omitempty"` } type ClusterConditionType string diff --git a/pkg/apis/shipper/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/shipper/v1alpha1/zz_generated.deepcopy.go index eb22dcd52..6d916d350 100644 --- a/pkg/apis/shipper/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/shipper/v1alpha1/zz_generated.deepcopy.go @@ -344,6 +344,134 @@ func (in *ClusterCapacityCondition) DeepCopy() *ClusterCapacityCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCapacityReport) DeepCopyInto(out *ClusterCapacityReport) { + *out = *in + out.Owner = in.Owner + if in.Breakdown != nil { + in, out := &in.Breakdown, &out.Breakdown + *out = make([]ClusterCapacityReportBreakdown, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCapacityReport. +func (in *ClusterCapacityReport) DeepCopy() *ClusterCapacityReport { + if in == nil { + return nil + } + out := new(ClusterCapacityReport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCapacityReportBreakdown) DeepCopyInto(out *ClusterCapacityReportBreakdown) { + *out = *in + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ClusterCapacityReportContainerBreakdown, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCapacityReportBreakdown. +func (in *ClusterCapacityReportBreakdown) DeepCopy() *ClusterCapacityReportBreakdown { + if in == nil { + return nil + } + out := new(ClusterCapacityReportBreakdown) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCapacityReportContainerBreakdown) DeepCopyInto(out *ClusterCapacityReportContainerBreakdown) { + *out = *in + if in.States != nil { + in, out := &in.States, &out.States + *out = make([]ClusterCapacityReportContainerStateBreakdown, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCapacityReportContainerBreakdown. +func (in *ClusterCapacityReportContainerBreakdown) DeepCopy() *ClusterCapacityReportContainerBreakdown { + if in == nil { + return nil + } + out := new(ClusterCapacityReportContainerBreakdown) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCapacityReportContainerBreakdownExample) DeepCopyInto(out *ClusterCapacityReportContainerBreakdownExample) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCapacityReportContainerBreakdownExample. +func (in *ClusterCapacityReportContainerBreakdownExample) DeepCopy() *ClusterCapacityReportContainerBreakdownExample { + if in == nil { + return nil + } + out := new(ClusterCapacityReportContainerBreakdownExample) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCapacityReportContainerStateBreakdown) DeepCopyInto(out *ClusterCapacityReportContainerStateBreakdown) { + *out = *in + in.Example.DeepCopyInto(&out.Example) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCapacityReportContainerStateBreakdown. +func (in *ClusterCapacityReportContainerStateBreakdown) DeepCopy() *ClusterCapacityReportContainerStateBreakdown { + if in == nil { + return nil + } + out := new(ClusterCapacityReportContainerStateBreakdown) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCapacityReportOwner) DeepCopyInto(out *ClusterCapacityReportOwner) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCapacityReportOwner. +func (in *ClusterCapacityReportOwner) DeepCopy() *ClusterCapacityReportOwner { + if in == nil { + return nil + } + out := new(ClusterCapacityReportOwner) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterCapacityStatus) DeepCopyInto(out *ClusterCapacityStatus) { *out = *in @@ -361,6 +489,13 @@ func (in *ClusterCapacityStatus) DeepCopyInto(out *ClusterCapacityStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Reports != nil { + in, out := &in.Reports, &out.Reports + *out = make([]ClusterCapacityReport, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/pkg/controller/capacity/builder/container_breakdown.go b/pkg/controller/capacity/builder/container_breakdown.go new file mode 100644 index 000000000..1c0a52853 --- /dev/null +++ b/pkg/controller/capacity/builder/container_breakdown.go @@ -0,0 +1,65 @@ +package builder + +import ( + "sort" + + shipper "github.com/bookingcom/shipper/pkg/apis/shipper/v1alpha1" +) + +type ContainerStateBreakdown struct { + containerName string + states []*shipper.ClusterCapacityReportContainerStateBreakdown +} + +func NewContainerBreakdown(containerName string) *ContainerStateBreakdown { + return &ContainerStateBreakdown{containerName: containerName} +} + +func (c *ContainerStateBreakdown) AddOrIncrementState( + podExampleName string, + containerConditionType string, + containerConditionReason string, + containerExampleMessage string, +) *ContainerStateBreakdown { + + var m *string + if len(containerExampleMessage) > 0 { + m = &containerExampleMessage + } + + for _, s := range c.states { + if s.Type == containerConditionType && s.Reason == containerConditionReason { + s.Count += 1 + return c + } + } + + breakdown := shipper.ClusterCapacityReportContainerStateBreakdown{ + Count: 1, + Type: containerConditionType, + Reason: containerConditionReason, + Example: shipper.ClusterCapacityReportContainerBreakdownExample{ + Pod: podExampleName, + Message: m, + }, + } + c.states = append(c.states, &breakdown) + return c +} + +func (c *ContainerStateBreakdown) Build() shipper.ClusterCapacityReportContainerBreakdown { + stateCount := len(c.states) + orderedStates := make([]shipper.ClusterCapacityReportContainerStateBreakdown, stateCount) + for i, v := range c.states { + orderedStates[i] = *v + } + + sort.Slice(orderedStates, func(i, j int) bool { + return orderedStates[i].Type < orderedStates[j].Type + }) + + return shipper.ClusterCapacityReportContainerBreakdown{ + Name: c.containerName, + States: orderedStates, + } +} diff --git a/pkg/controller/capacity/builder/pod_condition_breakdown.go b/pkg/controller/capacity/builder/pod_condition_breakdown.go new file mode 100644 index 000000000..117645561 --- /dev/null +++ b/pkg/controller/capacity/builder/pod_condition_breakdown.go @@ -0,0 +1,90 @@ +package builder + +import ( + "sort" + + shipper "github.com/bookingcom/shipper/pkg/apis/shipper/v1alpha1" +) + +type containerStateBreakdownBuilders map[string]*ContainerStateBreakdown + +func (c containerStateBreakdownBuilders) Get(containerName string) *ContainerStateBreakdown { + var b *ContainerStateBreakdown + var ok bool + if b, ok = c[containerName]; !ok { + b = NewContainerBreakdown(containerName) + c[containerName] = b + } + return b +} + +type PodConditionBreakdown struct { + podCount uint32 + podConditionType string + podConditionStatus string + podConditionReason string + + containerStateBreakdownBuilders containerStateBreakdownBuilders +} + +func NewPodConditionBreakdown( + initialPodCount uint32, + podConditionType string, + podConditionStatus string, + podConditionReason string, +) *PodConditionBreakdown { + return &PodConditionBreakdown{ + podCount: initialPodCount, + podConditionType: podConditionType, + podConditionStatus: podConditionStatus, + podConditionReason: podConditionReason, + containerStateBreakdownBuilders: make(containerStateBreakdownBuilders), + } +} + +func PodConditionBreakdownKey(typ, status, reason string) string { + return typ + status + reason +} + +func (p *PodConditionBreakdown) Key() string { + return PodConditionBreakdownKey(p.podConditionType, p.podConditionStatus, p.podConditionReason) +} + +func (p *PodConditionBreakdown) AddOrIncrementContainerState( + containerName string, + podExampleName string, + containerConditionType string, + containerConditionReason string, + containerExampleMessage string, +) *PodConditionBreakdown { + p.containerStateBreakdownBuilders. + Get(containerName). + AddOrIncrementState(podExampleName, containerConditionType, containerConditionReason, containerExampleMessage) + return p +} + +func (p *PodConditionBreakdown) IncrementCount() *PodConditionBreakdown { + p.podCount += 1 + return p +} + +func (p *PodConditionBreakdown) Build() shipper.ClusterCapacityReportBreakdown { + + orderedContainers := make([]shipper.ClusterCapacityReportContainerBreakdown, 0) + + for _, v := range p.containerStateBreakdownBuilders { + orderedContainers = append(orderedContainers, v.Build()) + } + + sort.Slice(orderedContainers, func(i, j int) bool { + return orderedContainers[i].Name < orderedContainers[j].Name + }) + + return shipper.ClusterCapacityReportBreakdown{ + Type: p.podConditionType, + Status: p.podConditionStatus, + Count: p.podCount, + Reason: p.podConditionReason, + Containers: orderedContainers, + } +} diff --git a/pkg/controller/capacity/builder/report.go b/pkg/controller/capacity/builder/report.go new file mode 100644 index 000000000..2752b1d71 --- /dev/null +++ b/pkg/controller/capacity/builder/report.go @@ -0,0 +1,164 @@ +package builder + +import ( + "fmt" + "sort" + + core_v1 "k8s.io/api/core/v1" + + shipper "github.com/bookingcom/shipper/pkg/apis/shipper/v1alpha1" +) + +type podConditionBreakdownBuilders map[string]*PodConditionBreakdown + +func (c podConditionBreakdownBuilders) Get(typ, status, reason string) *PodConditionBreakdown { + var b *PodConditionBreakdown + var ok bool + key := PodConditionBreakdownKey(typ, status, reason) + if b, ok = c[key]; !ok { + b = NewPodConditionBreakdown(0, typ, status, reason) + c[key] = b + } + return b +} + +type Report struct { + ownerName string + podConditionBreakdownBuilders podConditionBreakdownBuilders +} + +func NewReport(ownerName string) *Report { + return &Report{ + ownerName: ownerName, + podConditionBreakdownBuilders: make(podConditionBreakdownBuilders), + } +} + +func (r *Report) AddPod(pod *core_v1.Pod) { + for _, cond := range pod.Status.Conditions { + b := r.podConditionBreakdownBuilders. + Get(string(cond.Type), string(cond.Status), string(cond.Reason)). + IncrementCount() + + for _, containerStatus := range pod.Status.ContainerStatuses { + b.AddOrIncrementContainerState( + containerStatus.Name, + pod.Name, + GetContainerStateField(containerStatus.State, ContainerStateFieldType), + GetContainerStateField(containerStatus.State, ContainerStateFieldReason), + GetContainerStateMessage(containerStatus), + ) + } + } +} + +func (r *Report) Build() *shipper.ClusterCapacityReport { + return &shipper.ClusterCapacityReport{ + Owner: shipper.ClusterCapacityReportOwner{ + Name: r.ownerName, + }, + Breakdown: r.buildSortedBreakdowns(), + } +} + +func (r *Report) AddPodConditionBreakdownBuilder(b *PodConditionBreakdown) *Report { + r.podConditionBreakdownBuilders[b.Key()] = b + return r +} + +func (r *Report) buildSortedBreakdowns() []shipper.ClusterCapacityReportBreakdown { + orderedBreakdowns := make([]shipper.ClusterCapacityReportBreakdown, len(r.podConditionBreakdownBuilders)) + + i := 0 + for _, v := range r.podConditionBreakdownBuilders { + orderedBreakdowns[i] = v.Build() + i++ + } + + sort.Slice(orderedBreakdowns, func(i, j int) bool { + if orderedBreakdowns[i].Type == orderedBreakdowns[j].Type { + return orderedBreakdowns[i].Status < orderedBreakdowns[j].Status + } + return orderedBreakdowns[i].Type < orderedBreakdowns[j].Type + }) + + return orderedBreakdowns +} + +func GetRunningContainerStateField(field ContainerStateField) string { + switch field { + case ContainerStateFieldType: + return "Running" + case ContainerStateFieldReason, ContainerStateFieldMessage: + return "" + default: + panic(fmt.Sprintf("Unknown field %s", field)) + } +} + +func GetWaitingContainerStateField(stateWaiting *core_v1.ContainerStateWaiting, field ContainerStateField) string { + switch field { + case ContainerStateFieldType: + return "Waiting" + case ContainerStateFieldReason: + return stateWaiting.Reason + case ContainerStateFieldMessage: + return stateWaiting.Message + default: + panic(fmt.Sprintf("Unknown field %s", field)) + } +} + +func GetTerminatedContainerStateField(stateTerminated *core_v1.ContainerStateTerminated, f ContainerStateField) string { + switch f { + case ContainerStateFieldType: + return "Terminated" + case ContainerStateFieldReason: + return stateTerminated.Reason + case ContainerStateFieldMessage: + return stateTerminated.Message + default: + panic(fmt.Sprintf("Unknown field %s", f)) + } +} + +func GetContainerStateField(c core_v1.ContainerState, f ContainerStateField) string { + if c.Running != nil { + return GetRunningContainerStateField(f) + } else if c.Waiting != nil { + return GetWaitingContainerStateField(c.Waiting, f) + } else if c.Terminated != nil { + return GetTerminatedContainerStateField(c.Terminated, f) + } + + panic("Programmer error: a container state must be either Running, Waiting or Terminated.") +} + +func getTerminatedMessage(c core_v1.ContainerState) string { + if c.Terminated == nil { + return "" + } + + if len(c.Terminated.Message) > 0 { + return c.Terminated.Message + } else if c.Terminated.Signal > 0 { + return fmt.Sprintf("Terminated with signal %d", c.Terminated.Signal) + } else { + return fmt.Sprintf("Terminated with exit code %d", c.Terminated.ExitCode) + } +} + +func GetContainerStateMessage(c core_v1.ContainerStatus) string { + if c.RestartCount > 0 { + return getTerminatedMessage(c.LastTerminationState) + } + return getTerminatedMessage(c.State) +} + +type ContainerStateField string + +const ( + ContainerStateFieldType ContainerStateField = "type" + ContainerStateFieldReason ContainerStateField = "reason" + ContainerStateFieldMessage ContainerStateField = "message" +) diff --git a/pkg/controller/capacity/builder/report_test.go b/pkg/controller/capacity/builder/report_test.go new file mode 100644 index 000000000..a3785f90b --- /dev/null +++ b/pkg/controller/capacity/builder/report_test.go @@ -0,0 +1,327 @@ +package builder + +import ( + "testing" + + "github.com/pmezard/go-difflib/difflib" + "gopkg.in/yaml.v2" + + shipper "github.com/bookingcom/shipper/pkg/apis/shipper/v1alpha1" +) + +func yamlDiff(a interface{}, b interface{}) (string, error) { + yamlActual, _ := yaml.Marshal(a) + yamlExpected, _ := yaml.Marshal(b) + + diff := difflib.UnifiedDiff{ + A: difflib.SplitLines(string(yamlExpected)), + B: difflib.SplitLines(string(yamlActual)), + FromFile: "Expected", + ToFile: "Actual", + Context: 4, + } + + return difflib.GetUnifiedDiffString(diff) +} + +func TestEmptyReport(t *testing.T) { + + ownerName := "owner" + + actual := NewReport(ownerName).Build() + expected := &shipper.ClusterCapacityReport{ + Owner: shipper.ClusterCapacityReportOwner{Name: ownerName}, + } + + text, err := yamlDiff(expected, actual) + if err != nil { + t.Errorf("an error occurred: %s", err) + } + if len(text) > 0 { + t.Errorf("expected is different from actual:\n%s", text) + } +} + +func TestReportOneContainerOnePodOneCondition(t *testing.T) { + ownerName := "owner" + actual := NewReport(ownerName). + AddPodConditionBreakdownBuilder( + NewPodConditionBreakdown(1, "Ready", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Ready", "", "")). + Build() + expected := &shipper.ClusterCapacityReport{ + Owner: shipper.ClusterCapacityReportOwner{Name: ownerName}, + Breakdown: []shipper.ClusterCapacityReportBreakdown{ + { + Type: "Ready", + Status: "True", + Count: 1, + Containers: []shipper.ClusterCapacityReportContainerBreakdown{ + { + Name: "app", States: []shipper.ClusterCapacityReportContainerStateBreakdown{ + { + Count: 1, + Type: "Ready", + Reason: "", + Example: shipper.ClusterCapacityReportContainerBreakdownExample{ + Pod: "pod-a", + }, + }, + }, + }, + }, + }, + }, + } + + text, err := yamlDiff(expected, actual) + if err != nil { + t.Errorf("an error occurred: %s", err) + } + if len(text) > 0 { + t.Errorf("expected is different from actual:\n%s", text) + } +} + +func TestReportOneContainerOnePodOneConditionTerminatedWithExitCodeContainer(t *testing.T) { + ownerName := "owner" + actual := NewReport(ownerName). + AddPodConditionBreakdownBuilder( + NewPodConditionBreakdown(1, "Ready", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Ready", "", "Terminated with exit code 1")). + Build() + + m := "Terminated with exit code 1" + mPtr := &m + + expected := &shipper.ClusterCapacityReport{ + Owner: shipper.ClusterCapacityReportOwner{Name: ownerName}, + Breakdown: []shipper.ClusterCapacityReportBreakdown{ + { + Type: "Ready", + Status: "True", + Count: 1, + Containers: []shipper.ClusterCapacityReportContainerBreakdown{ + { + Name: "app", States: []shipper.ClusterCapacityReportContainerStateBreakdown{ + { + Count: 1, + Type: "Ready", + Reason: "", + Example: shipper.ClusterCapacityReportContainerBreakdownExample{ + Pod: "pod-a", + Message: mPtr, + }, + }, + }, + }, + }, + }, + }, + } + + text, err := yamlDiff(expected, actual) + if err != nil { + t.Errorf("an error occurred: %s", err) + } + if len(text) > 0 { + t.Errorf("expected is different from actual:\n%s", text) + } +} + +func TestReportOneContainerTwoPodsOneCondition(t *testing.T) { + ownerName := "owner" + actual := NewReport(ownerName). + AddPodConditionBreakdownBuilder( + NewPodConditionBreakdown(2, "Ready", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Ready", "", ""). + AddOrIncrementContainerState("app", "pod-b", "Ready", "", "")). + Build() + + expected := &shipper.ClusterCapacityReport{ + Owner: shipper.ClusterCapacityReportOwner{Name: ownerName}, + Breakdown: []shipper.ClusterCapacityReportBreakdown{ + { + Type: "Ready", + Status: "True", + Count: 2, + Containers: []shipper.ClusterCapacityReportContainerBreakdown{ + { + Name: "app", States: []shipper.ClusterCapacityReportContainerStateBreakdown{ + { + Count: 2, + Type: "Ready", + Reason: "", + Example: shipper.ClusterCapacityReportContainerBreakdownExample{ + Pod: "pod-a", + }, + }, + }, + }, + }, + }, + }, + } + + text, err := yamlDiff(expected, actual) + if err != nil { + t.Errorf("an error occurred: %s", err) + } + if len(text) > 0 { + t.Errorf("expected is different from actual:\n%s", text) + } +} + +func TestReportTwoContainersTwoPodsOneCondition(t *testing.T) { + ownerName := "owner" + actual := NewReport(ownerName). + AddPodConditionBreakdownBuilder( + NewPodConditionBreakdown(2, "Ready", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Ready", "", ""). + AddOrIncrementContainerState("app", "pod-b", "Ready", "", ""). + AddOrIncrementContainerState("nginx", "pod-a", "Ready", "", ""). + AddOrIncrementContainerState("nginx", "pod-b", "Ready", "", "")). + Build() + + expected := &shipper.ClusterCapacityReport{ + Owner: shipper.ClusterCapacityReportOwner{Name: ownerName}, + Breakdown: []shipper.ClusterCapacityReportBreakdown{ + { + Type: "Ready", + Status: "True", + Count: 2, + Containers: []shipper.ClusterCapacityReportContainerBreakdown{ + { + Name: "app", States: []shipper.ClusterCapacityReportContainerStateBreakdown{ + { + Count: 2, + Type: "Ready", + Reason: "", + Example: shipper.ClusterCapacityReportContainerBreakdownExample{ + Pod: "pod-a", + }, + }, + }, + }, + + { + Name: "nginx", States: []shipper.ClusterCapacityReportContainerStateBreakdown{ + { + Count: 2, + Type: "Ready", + Reason: "", + Example: shipper.ClusterCapacityReportContainerBreakdownExample{ + Pod: "pod-a", + }, + }, + }, + }, + }, + }, + }, + } + + text, err := yamlDiff(expected, actual) + if err != nil { + t.Errorf("an error occurred: %s", err) + } + if len(text) > 0 { + t.Errorf("expected is different from actual:\n%s", text) + } +} + +func TestReportTwoContainersTwoPodsTwoConditions(t *testing.T) { + ownerName := "owner" + actual := NewReport(ownerName). + AddPodConditionBreakdownBuilder( + NewPodConditionBreakdown(2, "Ready", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Ready", "", ""). + AddOrIncrementContainerState("app", "pod-b", "Ready", "", ""). + AddOrIncrementContainerState("nginx", "pod-a", "Ready", "", ""). + AddOrIncrementContainerState("nginx", "pod-b", "Ready", "", "")).AddPodConditionBreakdownBuilder( + NewPodConditionBreakdown(2, "PodInitialized", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Ready", "", ""). + AddOrIncrementContainerState("app", "pod-b", "Ready", "", ""). + AddOrIncrementContainerState("nginx", "pod-a", "Ready", "", ""). + AddOrIncrementContainerState("nginx", "pod-b", "Ready", "", "")). + Build() + + expected := &shipper.ClusterCapacityReport{ + Owner: shipper.ClusterCapacityReportOwner{Name: ownerName}, + Breakdown: []shipper.ClusterCapacityReportBreakdown{ + { + Type: "PodInitialized", + Status: "True", + Count: 2, + Containers: []shipper.ClusterCapacityReportContainerBreakdown{ + { + Name: "app", States: []shipper.ClusterCapacityReportContainerStateBreakdown{ + { + Count: 2, + Type: "Ready", + Reason: "", + Example: shipper.ClusterCapacityReportContainerBreakdownExample{ + Pod: "pod-a", + }, + }, + }, + }, + + { + Name: "nginx", States: []shipper.ClusterCapacityReportContainerStateBreakdown{ + { + Count: 2, + Type: "Ready", + Reason: "", + Example: shipper.ClusterCapacityReportContainerBreakdownExample{ + Pod: "pod-a", + }, + }, + }, + }, + }, + }, + + { + Type: "Ready", + Status: "True", + Count: 2, + Containers: []shipper.ClusterCapacityReportContainerBreakdown{ + { + Name: "app", States: []shipper.ClusterCapacityReportContainerStateBreakdown{ + { + Count: 2, + Type: "Ready", + Reason: "", + Example: shipper.ClusterCapacityReportContainerBreakdownExample{ + Pod: "pod-a", + }, + }, + }, + }, + + { + Name: "nginx", States: []shipper.ClusterCapacityReportContainerStateBreakdown{ + { + Count: 2, + Type: "Ready", + Reason: "", + Example: shipper.ClusterCapacityReportContainerBreakdownExample{ + Pod: "pod-a", + }, + }, + }, + }, + }, + }, + }, + } + + text, err := yamlDiff(expected, actual) + if err != nil { + t.Errorf("an error occurred: %s", err) + } + if len(text) > 0 { + t.Errorf("expected is different from actual:\n%s", text) + } +} diff --git a/pkg/controller/capacity/capacity_controller.go b/pkg/controller/capacity/capacity_controller.go index 326f7ac38..577564b6a 100644 --- a/pkg/controller/capacity/capacity_controller.go +++ b/pkg/controller/capacity/capacity_controller.go @@ -25,7 +25,6 @@ import ( listers "github.com/bookingcom/shipper/pkg/client/listers/shipper/v1alpha1" "github.com/bookingcom/shipper/pkg/clusterclientstore" "github.com/bookingcom/shipper/pkg/conditions" - shippercontroller "github.com/bookingcom/shipper/pkg/controller" "github.com/bookingcom/shipper/pkg/util/replicas" ) @@ -192,17 +191,24 @@ func (c *Controller) capacityTargetSyncHandler(key string) bool { var clusterStatus *shipper.ClusterCapacityStatus var targetDeployment *appsv1.Deployment + if ct.Status.Clusters == nil { + ct.Status.Clusters = []shipper.ClusterCapacityStatus{} + } + for i, cs := range ct.Status.Clusters { if cs.Name == clusterSpec.Name { clusterStatus = &ct.Status.Clusters[i] + clusterStatus.Reports = []shipper.ClusterCapacityReport{} + ct.Status.Clusters = append(ct.Status.Clusters[:i], ct.Status.Clusters[i+1:]...) + break } } if clusterStatus == nil { clusterStatus = &shipper.ClusterCapacityStatus{ - Name: clusterSpec.Name, + Name: clusterSpec.Name, + Reports: []shipper.ClusterCapacityReport{}, } - ct.Status.Clusters = append(ct.Status.Clusters, *clusterStatus) } // all the below functions add conditions to the clusterStatus as they do @@ -228,30 +234,36 @@ func (c *Controller) capacityTargetSyncHandler(key string) bool { clusterStatus.AvailableReplicas = targetDeployment.Status.AvailableReplicas clusterStatus.AchievedPercent = c.calculatePercentageFromAmount(clusterSpec.TotalReplicaCount, clusterStatus.AvailableReplicas) + + report, err := c.getReport(targetDeployment, clusterStatus) + if err == nil { + clusterStatus.Reports = append(clusterStatus.Reports, *report) + } + sadPods, err := c.getSadPods(targetDeployment, clusterStatus) if err != nil { + ct.Status.Clusters = append(ct.Status.Clusters, *clusterStatus) continue } - clusterStatus.SadPods = sadPods - if len(sadPods) > 0 { - continue + if len(sadPods) == 0 { + // If we've got here, the capacity target has no sad pods and there have been + // no errors, so set conditions to true. + clusterStatus.Conditions = conditions.SetCapacityCondition( + clusterStatus.Conditions, + shipper.ClusterConditionTypeReady, + corev1.ConditionTrue, + "", "") + clusterStatus.Conditions = conditions.SetCapacityCondition( + clusterStatus.Conditions, + shipper.ClusterConditionTypeOperational, + corev1.ConditionTrue, + "", + "") } - // If we've got here, the capacity target has no sad pods and there have been - // no errors, so set conditions to true. - clusterStatus.Conditions = conditions.SetCapacityCondition( - clusterStatus.Conditions, - shipper.ClusterConditionTypeReady, - corev1.ConditionTrue, - "", "") - clusterStatus.Conditions = conditions.SetCapacityCondition( - clusterStatus.Conditions, - shipper.ClusterConditionTypeOperational, - corev1.ConditionTrue, - "", - "") + ct.Status.Clusters = append(ct.Status.Clusters, *clusterStatus) } sort.Sort(byClusterName(ct.Status.Clusters)) @@ -262,15 +274,6 @@ func (c *Controller) capacityTargetSyncHandler(key string) bool { return true } - c.recorder.Eventf( - ct, - corev1.EventTypeNormal, - "CapacityTargetChanged", - "Set %q status to %v", - shippercontroller.MetaKey(ct), - ct.Status, - ) - return false } @@ -337,6 +340,25 @@ func (c *Controller) getSadPods(targetDeployment *appsv1.Deployment, clusterStat return sadPods, nil } +func (c *Controller) getReport(targetDeployment *appsv1.Deployment, clusterStatus *shipper.ClusterCapacityStatus) (*shipper.ClusterCapacityReport, error) { + targetClusterInformer, clusterErr := c.clusterClientStore.GetInformerFactory(clusterStatus.Name) + if clusterErr != nil { + // Not sure if each method should report operational conditions for + // the cluster it is operating on. + return nil, clusterErr + } + + selector := labels.Set(targetDeployment.Spec.Template.Labels).AsSelector() + podsList, clusterErr := targetClusterInformer.Core().V1().Pods().Lister().Pods(targetDeployment.Namespace).List(selector) + if clusterErr != nil { + return nil, clusterErr + } + + report := buildReport(targetDeployment.Name, podsList) + + return report, nil +} + func (c *Controller) findTargetDeploymentForClusterSpec(clusterSpec shipper.ClusterCapacityTarget, targetNamespace string, selector labels.Selector, clusterStatus *shipper.ClusterCapacityStatus) (*appsv1.Deployment, error) { targetClusterInformer, clusterErr := c.clusterClientStore.GetInformerFactory(clusterSpec.Name) if clusterErr != nil { diff --git a/pkg/controller/capacity/capacity_controller_test.go b/pkg/controller/capacity/capacity_controller_test.go index 4bd5f5341..ab30c6fc0 100644 --- a/pkg/controller/capacity/capacity_controller_test.go +++ b/pkg/controller/capacity/capacity_controller_test.go @@ -2,6 +2,7 @@ package capacity import ( "fmt" + "sort" "testing" "time" @@ -19,6 +20,7 @@ import ( shipperfake "github.com/bookingcom/shipper/pkg/client/clientset/versioned/fake" shipperinformers "github.com/bookingcom/shipper/pkg/client/informers/externalversions" "github.com/bookingcom/shipper/pkg/conditions" + "github.com/bookingcom/shipper/pkg/controller/capacity/builder" shippertesting "github.com/bookingcom/shipper/pkg/testing" ) @@ -30,7 +32,7 @@ func TestUpdatingCapacityTargetUpdatesDeployment(t *testing.T) { f := NewFixture(t) capacityTarget := newCapacityTarget(10, 50) - f.managementObjects = append(f.managementObjects, capacityTarget) + f.managementObjects = append(f.managementObjects, capacityTarget.DeepCopy()) deployment := newDeployment(0, 0) f.targetClusterObjects = append(f.targetClusterObjects, deployment) @@ -48,16 +50,502 @@ func TestUpdatingCapacityTargetUpdatesDeployment(t *testing.T) { }, } - f.expectCapacityTargetStatusUpdate(capacityTarget, 0, 0, expectedClusterConditions) + f.expectCapacityTargetStatusUpdate(capacityTarget, 0, 0, expectedClusterConditions, []shipper.ClusterCapacityReport{*builder.NewReport("nginx").Build()}) f.runCapacityTargetSyncHandler() } +func TestCapacityTargetStatusReturnsCorrectFleetReportWithSinglePod(t *testing.T) { + f := NewFixture(t) + + capacityTarget := newCapacityTarget(1, 100) + + deployment := newDeployment(1, 1) + podLabels, _ := metav1.LabelSelectorAsMap(deployment.Spec.Selector) + + podA := newPodBuilder("pod-a", deployment.GetNamespace(), podLabels). + AddContainerStatus("app", corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, 0, nil). + AddPodCondition(corev1.PodCondition{Type: corev1.PodInitialized, Status: corev1.ConditionTrue}). + AddPodCondition(corev1.PodCondition{Type: "ContainersReady", Status: corev1.ConditionTrue}). + AddPodCondition(corev1.PodCondition{Type: corev1.PodReady, Status: corev1.ConditionTrue}). + AddPodCondition(corev1.PodCondition{Type: corev1.PodScheduled, Status: corev1.ConditionTrue}). + Build() + + f.targetClusterObjects = append(f.targetClusterObjects, deployment, podA) + + c := builder.NewReport("nginx"). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(1, "ContainersReady", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Running", "", "")). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(1, "Initialized", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Running", "", "")). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(1, "PodScheduled", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Running", "", "")). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(1, "Ready", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Running", "", "")) + + f.managementObjects = append(f.managementObjects, capacityTarget.DeepCopy()) + + capacityTarget.Status.Clusters = append(capacityTarget.Status.Clusters, shipper.ClusterCapacityStatus{ + Name: "minikube", + Reports: []shipper.ClusterCapacityReport{*c.Build()}, + AchievedPercent: 100, + AvailableReplicas: 1, + Conditions: []shipper.ClusterCapacityCondition{ + {Type: shipper.ClusterConditionTypeOperational, Status: corev1.ConditionTrue}, + {Type: shipper.ClusterConditionTypeReady, Status: corev1.ConditionTrue}, + }, + }) + + updateAction := kubetesting.NewUpdateAction( + schema.GroupVersionResource{ + Group: shipper.SchemeGroupVersion.Group, + Version: shipper.SchemeGroupVersion.Version, + Resource: "capacitytargets", + }, + capacityTarget.GetNamespace(), + capacityTarget, + ) + + f.managementClusterActions = append(f.managementClusterActions, updateAction) + f.runCapacityTargetSyncHandler() + + // Calling the sync handler again with the updated capacity target object should yield the same results. + f.managementObjects = []runtime.Object{capacityTarget.DeepCopy()} + f.runCapacityTargetSyncHandler() +} + +func TestCapacityTargetStatusReturnsCorrectFleetReportWithSinglePodCompletedContainer(t *testing.T) { + f := NewFixture(t) + + capacityTarget := newCapacityTarget(1, 100) + + deployment := newDeployment(1, 1) + podLabels, _ := metav1.LabelSelectorAsMap(deployment.Spec.Selector) + + podA := newPodBuilder("pod-a", deployment.GetNamespace(), podLabels). + AddContainerStatus("app", corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{Reason: "Completed", ExitCode: 1}}, 0, nil). + AddPodCondition(corev1.PodCondition{Type: "ContainersReady", Status: corev1.ConditionTrue}). + Build() + + f.targetClusterObjects = append(f.targetClusterObjects, deployment, podA) + + c := builder.NewReport("nginx"). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(1, "ContainersReady", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Terminated", "Completed", "Terminated with exit code 1")) + + f.managementObjects = append(f.managementObjects, capacityTarget.DeepCopy()) + + capacityTarget.Status.Clusters = append(capacityTarget.Status.Clusters, shipper.ClusterCapacityStatus{ + Name: "minikube", + Reports: []shipper.ClusterCapacityReport{*c.Build()}, + AchievedPercent: 100, + AvailableReplicas: 1, + Conditions: []shipper.ClusterCapacityCondition{ + {Type: shipper.ClusterConditionTypeOperational, Status: corev1.ConditionTrue}, + {Type: shipper.ClusterConditionTypeReady, Status: corev1.ConditionTrue}, + }, + }) + + updateAction := kubetesting.NewUpdateAction( + schema.GroupVersionResource{ + Group: shipper.SchemeGroupVersion.Group, + Version: shipper.SchemeGroupVersion.Version, + Resource: "capacitytargets", + }, + capacityTarget.GetNamespace(), + capacityTarget, + ) + + f.managementClusterActions = append(f.managementClusterActions, updateAction) + f.runCapacityTargetSyncHandler() + + // Calling the sync handler again with the updated capacity target object should yield the same results. + f.managementObjects = []runtime.Object{capacityTarget.DeepCopy()} + f.runCapacityTargetSyncHandler() +} + +func TestCapacityTargetStatusReturnsCorrectFleetReportWithSinglePodTerminatedContainer(t *testing.T) { + f := NewFixture(t) + + capacityTarget := newCapacityTarget(1, 100) + + deployment := newDeployment(1, 1) + podLabels, _ := metav1.LabelSelectorAsMap(deployment.Spec.Selector) + + podA := newPodBuilder("pod-a", deployment.GetNamespace(), podLabels). + AddContainerStatus("app", corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{Reason: "Terminated", Signal: 9}}, 0, nil). + AddPodCondition(corev1.PodCondition{Type: "ContainersReady", Status: corev1.ConditionTrue}). + Build() + + f.targetClusterObjects = append(f.targetClusterObjects, deployment, podA) + + c := builder.NewReport("nginx"). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(1, "ContainersReady", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Terminated", "Terminated", "Terminated with signal 9")) + + f.managementObjects = append(f.managementObjects, capacityTarget.DeepCopy()) + + capacityTarget.Status.Clusters = append(capacityTarget.Status.Clusters, shipper.ClusterCapacityStatus{ + Name: "minikube", + Reports: []shipper.ClusterCapacityReport{*c.Build()}, + AchievedPercent: 100, + AvailableReplicas: 1, + Conditions: []shipper.ClusterCapacityCondition{ + {Type: shipper.ClusterConditionTypeOperational, Status: corev1.ConditionTrue}, + {Type: shipper.ClusterConditionTypeReady, Status: corev1.ConditionTrue}, + }, + }) + + updateAction := kubetesting.NewUpdateAction( + schema.GroupVersionResource{ + Group: shipper.SchemeGroupVersion.Group, + Version: shipper.SchemeGroupVersion.Version, + Resource: "capacitytargets", + }, + capacityTarget.GetNamespace(), + capacityTarget, + ) + + f.managementClusterActions = append(f.managementClusterActions, updateAction) + f.runCapacityTargetSyncHandler() + + // Calling the sync handler again with the updated capacity target object should yield the same results. + f.managementObjects = []runtime.Object{capacityTarget.DeepCopy()} + f.runCapacityTargetSyncHandler() +} + +func TestCapacityTargetStatusReturnsCorrectFleetReportWithSinglePodRestartedContainer(t *testing.T) { + f := NewFixture(t) + + capacityTarget := newCapacityTarget(1, 100) + + deployment := newDeployment(1, 1) + podLabels, _ := metav1.LabelSelectorAsMap(deployment.Spec.Selector) + + podA := newPodBuilder("pod-a", deployment.GetNamespace(), podLabels). + AddContainerStatus("app", corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{Reason: "Terminated", Signal: 9}}, 0, nil). + AddPodCondition(corev1.PodCondition{Type: "ContainersReady", Status: corev1.ConditionTrue}). + Build() + + f.targetClusterObjects = append(f.targetClusterObjects, deployment, podA) + + c := builder.NewReport("nginx"). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(1, "ContainersReady", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Terminated", "Terminated", "Terminated with signal 9")) + + f.managementObjects = append(f.managementObjects, capacityTarget.DeepCopy()) + + capacityTarget.Status.Clusters = append(capacityTarget.Status.Clusters, shipper.ClusterCapacityStatus{ + Name: "minikube", + Reports: []shipper.ClusterCapacityReport{*c.Build()}, + AchievedPercent: 100, + AvailableReplicas: 1, + Conditions: []shipper.ClusterCapacityCondition{ + {Type: shipper.ClusterConditionTypeOperational, Status: corev1.ConditionTrue}, + {Type: shipper.ClusterConditionTypeReady, Status: corev1.ConditionTrue}, + }, + }) + + updateAction := kubetesting.NewUpdateAction( + schema.GroupVersionResource{ + Group: shipper.SchemeGroupVersion.Group, + Version: shipper.SchemeGroupVersion.Version, + Resource: "capacitytargets", + }, + capacityTarget.GetNamespace(), + capacityTarget, + ) + + f.managementClusterActions = append(f.managementClusterActions, updateAction) + f.runCapacityTargetSyncHandler() + + // Calling the sync handler again with the updated capacity target object should yield the same results. + f.managementObjects = []runtime.Object{capacityTarget.DeepCopy()} + f.runCapacityTargetSyncHandler() +} + +func TestCapacityTargetStatusReturnsCorrectFleetReportWithSinglePodRestartedContainerWithTerminationMessage(t *testing.T) { + f := NewFixture(t) + + capacityTarget := newCapacityTarget(1, 100) + + deployment := newDeployment(1, 1) + podLabels, _ := metav1.LabelSelectorAsMap(deployment.Spec.Selector) + + podA := newPodBuilder("pod-a", deployment.GetNamespace(), podLabels). + AddContainerStatus("app", corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{Reason: "Terminated", Signal: 9}}, 1, &corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{Message: "termination message"}}). + AddPodCondition(corev1.PodCondition{Type: "ContainersReady", Status: corev1.ConditionTrue}). + Build() + + f.targetClusterObjects = append(f.targetClusterObjects, deployment, podA) + + c := builder.NewReport("nginx"). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(1, "ContainersReady", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Terminated", "Terminated", "termination message")) + + f.managementObjects = append(f.managementObjects, capacityTarget.DeepCopy()) + + capacityTarget.Status.Clusters = append(capacityTarget.Status.Clusters, shipper.ClusterCapacityStatus{ + Name: "minikube", + Reports: []shipper.ClusterCapacityReport{*c.Build()}, + AchievedPercent: 100, + AvailableReplicas: 1, + Conditions: []shipper.ClusterCapacityCondition{ + {Type: shipper.ClusterConditionTypeOperational, Status: corev1.ConditionTrue}, + {Type: shipper.ClusterConditionTypeReady, Status: corev1.ConditionTrue}, + }, + }) + + updateAction := kubetesting.NewUpdateAction( + schema.GroupVersionResource{ + Group: shipper.SchemeGroupVersion.Group, + Version: shipper.SchemeGroupVersion.Version, + Resource: "capacitytargets", + }, + capacityTarget.GetNamespace(), + capacityTarget, + ) + + f.managementClusterActions = append(f.managementClusterActions, updateAction) + f.runCapacityTargetSyncHandler() + + // Calling the sync handler again with the updated capacity target object should yield the same results. + f.managementObjects = []runtime.Object{capacityTarget.DeepCopy()} + f.runCapacityTargetSyncHandler() +} + +func TestCapacityTargetStatusReturnsCorrectFleetReportWithMultiplePods(t *testing.T) { + f := NewFixture(t) + + capacityTarget := newCapacityTarget(2, 100) + + deployment := newDeployment(2, 2) + podLabels, _ := metav1.LabelSelectorAsMap(deployment.Spec.Selector) + + podA := newPodBuilder("pod-a", deployment.GetNamespace(), podLabels). + AddContainerStatus("app", corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, 0, nil). + AddPodCondition(corev1.PodCondition{Type: corev1.PodInitialized, Status: corev1.ConditionTrue}). + AddPodCondition(corev1.PodCondition{Type: "ContainersReady", Status: corev1.ConditionTrue}). + AddPodCondition(corev1.PodCondition{Type: corev1.PodReady, Status: corev1.ConditionTrue}). + AddPodCondition(corev1.PodCondition{Type: corev1.PodScheduled, Status: corev1.ConditionTrue}). + Build() + + podB := newPodBuilder("pod-b", deployment.GetNamespace(), podLabels). + AddContainerStatus("app", corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, 0, nil). + AddPodCondition(corev1.PodCondition{Type: corev1.PodInitialized, Status: corev1.ConditionTrue}). + AddPodCondition(corev1.PodCondition{Type: "ContainersReady", Status: corev1.ConditionTrue}). + AddPodCondition(corev1.PodCondition{Type: corev1.PodReady, Status: corev1.ConditionTrue}). + AddPodCondition(corev1.PodCondition{Type: corev1.PodScheduled, Status: corev1.ConditionTrue}). + Build() + + f.targetClusterObjects = append(f.targetClusterObjects, deployment, podA, podB) + + c := builder.NewReport("nginx"). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(2, "ContainersReady", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Running", "", ""). + AddOrIncrementContainerState("app", "pod-a", "Running", "", "")). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(2, "Initialized", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Running", "", ""). + AddOrIncrementContainerState("app", "pod-a", "Running", "", "")). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(2, "PodScheduled", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Running", "", ""). + AddOrIncrementContainerState("app", "pod-a", "Running", "", "")). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(2, "Ready", "True", ""). + AddOrIncrementContainerState("app", "pod-a", "Running", "", ""). + AddOrIncrementContainerState("app", "pod-a", "Running", "", "")) + + f.managementObjects = append(f.managementObjects, capacityTarget.DeepCopy()) + + capacityTarget.Status.Clusters = append(capacityTarget.Status.Clusters, shipper.ClusterCapacityStatus{ + Name: "minikube", + Reports: []shipper.ClusterCapacityReport{*c.Build()}, + AchievedPercent: 100, + AvailableReplicas: 2, + Conditions: []shipper.ClusterCapacityCondition{ + {Type: shipper.ClusterConditionTypeOperational, Status: corev1.ConditionTrue}, + {Type: shipper.ClusterConditionTypeReady, Status: corev1.ConditionTrue}, + }, + }) + + updateAction := kubetesting.NewUpdateAction( + schema.GroupVersionResource{ + Group: shipper.SchemeGroupVersion.Group, + Version: shipper.SchemeGroupVersion.Version, + Resource: "capacitytargets", + }, + capacityTarget.GetNamespace(), + capacityTarget, + ) + + f.managementClusterActions = append(f.managementClusterActions, updateAction) + f.runCapacityTargetSyncHandler() + + // Calling the sync handler again with the updated capacity target object should yield the same results. + f.managementObjects = []runtime.Object{capacityTarget.DeepCopy()} + f.runCapacityTargetSyncHandler() +} + +func TestCapacityTargetStatusReturnsCorrectFleetReportWithMultiplePodsWithDifferentConditions(t *testing.T) { + f := NewFixture(t) + + capacityTarget := newCapacityTarget(3, 100) + + deployment := newDeployment(3, 3) + podLabels, _ := metav1.LabelSelectorAsMap(deployment.Spec.Selector) + + podA := newPodBuilder("pod-a", deployment.GetNamespace(), podLabels). + AddContainerStatus("app", corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{Reason: "ContainerCreating"}}, 0, nil). + AddPodCondition(corev1.PodCondition{Type: corev1.PodInitialized, Status: corev1.ConditionTrue}). + AddPodCondition(corev1.PodCondition{Type: corev1.PodReady, Status: corev1.ConditionFalse, Reason: "ContainersNotReady"}). + AddPodCondition(corev1.PodCondition{Type: corev1.PodScheduled, Status: corev1.ConditionTrue}). + Build() + + podB := newPodBuilder("pod-b", deployment.GetNamespace(), podLabels). + AddContainerStatus("app", corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{Reason: "ContainerCreating"}}, 0, nil). + AddPodCondition(corev1.PodCondition{Type: corev1.PodInitialized, Status: corev1.ConditionTrue}). + AddPodCondition(corev1.PodCondition{Type: corev1.PodReady, Status: corev1.ConditionFalse, Reason: "ContainersNotReady"}). + AddPodCondition(corev1.PodCondition{Type: corev1.PodScheduled, Status: corev1.ConditionTrue}). + Build() + + podC := newPodBuilder("pod-c", deployment.GetNamespace(), podLabels). + AddContainerStatus("app", corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{Reason: "Completed"}}, 0, nil). + AddPodCondition(corev1.PodCondition{Type: corev1.PodInitialized, Status: corev1.ConditionTrue}). + AddPodCondition(corev1.PodCondition{Type: corev1.PodReady, Status: corev1.ConditionFalse, Reason: "ContainersNotReady"}). + AddPodCondition(corev1.PodCondition{Type: corev1.PodScheduled, Status: corev1.ConditionTrue}). + Build() + + f.targetClusterObjects = append(f.targetClusterObjects, deployment, podA, podB, podC) + + c := builder.NewReport("nginx"). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(3, string(corev1.PodInitialized), string(corev1.ConditionTrue), ""). + AddOrIncrementContainerState("app", "pod-a", "Waiting", "ContainerCreating", ""). + AddOrIncrementContainerState("app", "pod-a", "Waiting", "ContainerCreating", ""). + AddOrIncrementContainerState("app", "pod-c", "Terminated", "Completed", "Terminated with exit code 0")). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(3, string(corev1.PodScheduled), string(corev1.ConditionTrue), ""). + AddOrIncrementContainerState("app", "pod-a", "Waiting", "ContainerCreating", ""). + AddOrIncrementContainerState("app", "pod-a", "Waiting", "ContainerCreating", ""). + AddOrIncrementContainerState("app", "pod-c", "Terminated", "Completed", "Terminated with exit code 0")). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(3, string(corev1.PodReady), string(corev1.ConditionFalse), "ContainersNotReady"). + AddOrIncrementContainerState("app", "pod-a", "Waiting", "ContainerCreating", ""). + AddOrIncrementContainerState("app", "pod-a", "Waiting", "ContainerCreating", ""). + AddOrIncrementContainerState("app", "pod-c", "Terminated", "Completed", "Terminated with exit code 0")) + + f.managementObjects = append(f.managementObjects, capacityTarget.DeepCopy()) + + sadPodsStatuses := []shipper.PodStatus{ + { + Condition: corev1.PodCondition{ + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + Reason: "ContainersNotReady", + }, + Containers: []corev1.ContainerStatus{ + { + Name: "app", + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: "ContainerCreating", + }, + }, + }, + }, + Name: "pod-a", + }, + { + Condition: corev1.PodCondition{ + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + Reason: "ContainersNotReady", + }, + Containers: []corev1.ContainerStatus{ + { + Name: "app", + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: "ContainerCreating", + }, + }, + }, + }, + Name: "pod-b", + }, + { + Condition: corev1.PodCondition{ + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + Reason: "ContainersNotReady", + }, + Containers: []corev1.ContainerStatus{ + { + Name: "app", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Reason: "Completed", + }, + }, + }, + }, + Name: "pod-c", + }, + } + + sort.Slice(sadPodsStatuses, func(i, j int) bool { + return sadPodsStatuses[i].Name < sadPodsStatuses[j].Name + }) + + capacityTarget.Status.Clusters = append(capacityTarget.Status.Clusters, shipper.ClusterCapacityStatus{ + Name: "minikube", + Reports: []shipper.ClusterCapacityReport{*c.Build()}, + AchievedPercent: 100, + AvailableReplicas: 3, + Conditions: []shipper.ClusterCapacityCondition{ + {Type: shipper.ClusterConditionTypeReady, Status: corev1.ConditionFalse, Reason: conditions.PodsNotReady, Message: "there are 3 sad pods"}, + }, + SadPods: sadPodsStatuses, + }) + + sort.Slice(capacityTarget.Status.Clusters[0].SadPods, func(i, j int) bool { + return capacityTarget.Status.Clusters[0].SadPods[i].Name < capacityTarget.Status.Clusters[0].SadPods[j].Name + }) + + updateAction := kubetesting.NewUpdateAction( + schema.GroupVersionResource{ + Group: shipper.SchemeGroupVersion.Group, + Version: shipper.SchemeGroupVersion.Version, + Resource: "capacitytargets", + }, + capacityTarget.GetNamespace(), + capacityTarget, + ) + + f.managementClusterActions = append(f.managementClusterActions, updateAction) + f.runCapacityTargetSyncHandler() + + // Calling the sync handler again with the updated capacity target object should yield the same results. + f.managementObjects = []runtime.Object{capacityTarget.DeepCopy()} + f.runCapacityTargetSyncHandler() +} + func TestUpdatingDeploymentsUpdatesTheCapacityTargetStatus(t *testing.T) { f := NewFixture(t) capacityTarget := newCapacityTarget(10, 50) - f.managementObjects = append(f.managementObjects, capacityTarget) + f.managementObjects = append(f.managementObjects, capacityTarget.DeepCopy()) deployment := newDeployment(5, 5) f.targetClusterObjects = append(f.targetClusterObjects, deployment) @@ -70,7 +558,7 @@ func TestUpdatingDeploymentsUpdatesTheCapacityTargetStatus(t *testing.T) { Message: "expected 5 replicas but have 0", }, } - f.expectCapacityTargetStatusUpdate(capacityTarget, 5, 50, clusterConditions) + f.expectCapacityTargetStatusUpdate(capacityTarget, 5, 50, clusterConditions, []shipper.ClusterCapacityReport{*builder.NewReport("nginx").Build()}) f.runCapacityTargetSyncHandler() } @@ -82,7 +570,7 @@ func TestSadPodsAreReflectedInCapacityTargetStatus(t *testing.T) { f := NewFixture(t) capacityTarget := newCapacityTarget(2, 100) - f.managementObjects = append(f.managementObjects, capacityTarget) + f.managementObjects = append(f.managementObjects, capacityTarget.DeepCopy()) deployment := newDeployment(2, 1) happyPod := createHappyPodForDeployment(deployment) @@ -97,7 +585,14 @@ func TestSadPodsAreReflectedInCapacityTargetStatus(t *testing.T) { Message: "there are 1 sad pods", }, } - f.expectCapacityTargetStatusUpdate(capacityTarget, 1, 50, clusterConditions, createSadPodConditionFromPod(sadPod)) + + c := builder.NewReport("nginx"). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(1, string(corev1.PodReady), string(corev1.ConditionFalse), "ExpectedFail")). + AddPodConditionBreakdownBuilder( + builder.NewPodConditionBreakdown(1, string(corev1.PodReady), string(corev1.ConditionTrue), "")) + + f.expectCapacityTargetStatusUpdate(capacityTarget, 1, 50, clusterConditions, []shipper.ClusterCapacityReport{*c.Build()}, createSadPodConditionFromPod(sadPod)) f.runCapacityTargetSyncHandler() } @@ -189,13 +684,14 @@ func (f *fixture) ExpectDeploymentPatchWithReplicas(deployment *appsv1.Deploymen f.targetClusterActions = append(f.targetClusterActions, patchAction) } -func (f *fixture) expectCapacityTargetStatusUpdate(capacityTarget *shipper.CapacityTarget, availableReplicas, achievedPercent int32, clusterConditions []shipper.ClusterCapacityCondition, sadPods ...shipper.PodStatus) { +func (f *fixture) expectCapacityTargetStatusUpdate(capacityTarget *shipper.CapacityTarget, availableReplicas, achievedPercent int32, clusterConditions []shipper.ClusterCapacityCondition, reports []shipper.ClusterCapacityReport, sadPods ...shipper.PodStatus) { clusterStatus := shipper.ClusterCapacityStatus{ Name: capacityTarget.Spec.Clusters[0].Name, AvailableReplicas: availableReplicas, AchievedPercent: achievedPercent, Conditions: clusterConditions, SadPods: sadPods, + Reports: reports, } capacityTarget.Status.Clusters = append(capacityTarget.Status.Clusters, clusterStatus) @@ -234,7 +730,7 @@ func newCapacityTarget(totalReplicaCount, percent int32) *shipper.CapacityTarget Namespace: namespace, Labels: metaLabels, OwnerReferences: []metav1.OwnerReference{ - metav1.OwnerReference{ + { APIVersion: shipper.SchemeGroupVersion.String(), Kind: "Release", Name: "0.0.1", @@ -277,6 +773,11 @@ func newDeployment(replicas int32, availableReplicas int32) *appsv1.Deployment { Spec: appsv1.DeploymentSpec{ Replicas: &replicas, Selector: specSelector, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: metaLabels, + }, + }, }, Status: status, } diff --git a/pkg/controller/capacity/deployment.go b/pkg/controller/capacity/deployment.go index e5a7b0060..2dca43f87 100644 --- a/pkg/controller/capacity/deployment.go +++ b/pkg/controller/capacity/deployment.go @@ -3,6 +3,7 @@ package capacity import ( "fmt" "math" + "sort" "github.com/golang/glog" appsv1 "k8s.io/api/apps/v1" @@ -183,6 +184,10 @@ func (c Controller) getSadPodsForDeploymentOnCluster(deployment *appsv1.Deployme } } + sort.Slice(sadPods, func(i, j int) bool { + return sadPods[i].Name < sadPods[j].Name + }) + return len(pods), len(sadPods), sadPods, nil } diff --git a/pkg/controller/capacity/reporting.go b/pkg/controller/capacity/reporting.go new file mode 100644 index 000000000..7c0796d9c --- /dev/null +++ b/pkg/controller/capacity/reporting.go @@ -0,0 +1,25 @@ +package capacity + +import ( + "sort" + + core_v1 "k8s.io/api/core/v1" + + shipper "github.com/bookingcom/shipper/pkg/apis/shipper/v1alpha1" + "github.com/bookingcom/shipper/pkg/controller/capacity/builder" +) + +func buildReport(ownerName string, podsList []*core_v1.Pod) *shipper.ClusterCapacityReport { + + sort.Slice(podsList, func(i, j int) bool { + return podsList[i].Name < podsList[j].Name + }) + + reportBuilder := builder.NewReport(ownerName) + + for _, pod := range podsList { + reportBuilder.AddPod(pod) + } + + return reportBuilder.Build() +} diff --git a/pkg/controller/capacity/utils_test.go b/pkg/controller/capacity/utils_test.go new file mode 100644 index 000000000..50468ad4f --- /dev/null +++ b/pkg/controller/capacity/utils_test.go @@ -0,0 +1,76 @@ +package capacity + +import ( + "sort" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type PodBuilder struct { + podName string + podNamespace string + podLabels map[string]string + containerStatuses []corev1.ContainerStatus + podConditions []corev1.PodCondition +} + +func newPodBuilder(podName string, podNamespace string, podLabels map[string]string) *PodBuilder { + return &PodBuilder{ + podName: podName, + podNamespace: podNamespace, + podLabels: podLabels, + } +} + +func (p *PodBuilder) SetName(name string) *PodBuilder { + p.podName = name + return p +} + +func (p *PodBuilder) SetNamespace(namespace string) *PodBuilder { + p.podNamespace = namespace + return p +} + +func (p *PodBuilder) SetLabels(labels map[string]string) *PodBuilder { + p.podLabels = labels + return p +} + +func (p *PodBuilder) AddContainerStatus(containerName string, containerState corev1.ContainerState, restartCount int32, lastTerminatedState *corev1.ContainerState) *PodBuilder { + containerStatus := corev1.ContainerStatus{Name: containerName, State: containerState, RestartCount: restartCount} + if lastTerminatedState != nil { + containerStatus.LastTerminationState = *lastTerminatedState + } + p.containerStatuses = append(p.containerStatuses, containerStatus) + return p +} + +func (p *PodBuilder) AddPodCondition(cond corev1.PodCondition) *PodBuilder { + p.podConditions = append(p.podConditions, cond) + return p +} + +func (p *PodBuilder) Build() *corev1.Pod { + + sort.Slice(p.podConditions, func(i, j int) bool { + return p.podConditions[i].Type < p.podConditions[j].Type + }) + + sort.Slice(p.containerStatuses, func(i, j int) bool { + return p.containerStatuses[i].Name < p.containerStatuses[j].Name + }) + + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: p.podName, + Namespace: p.podNamespace, + Labels: p.podLabels, + }, + Status: corev1.PodStatus{ + ContainerStatuses: p.containerStatuses, + Conditions: p.podConditions, + }, + } +}