Skip to content

Commit

Permalink
feat(build): added container execution condition
Browse files Browse the repository at this point in the history
* Added a condition for each container execution during build (either success or failure)
* Added E2E test to cover such feature
  • Loading branch information
squakez committed May 19, 2023
1 parent 4c493a4 commit 9e104b3
Show file tree
Hide file tree
Showing 3 changed files with 74 additions and 20 deletions.
43 changes: 42 additions & 1 deletion e2e/common/traits/builder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ func TestBuilderTrait(t *testing.T) {
Eventually(BuilderPod(ns, builderKitName)().Spec.InitContainers[0].Resources.Requests.Memory().String(), TestTimeoutShort).Should(Equal("2Gi"))
Eventually(BuilderPod(ns, builderKitName)().Spec.InitContainers[0].Resources.Limits.Memory().String(), TestTimeoutShort).Should(Equal("3Gi"))

Expect(Kamel("delete", "--all", "-n", ns).Execute()).To(Succeed())
Expect(Kamel("reset", "-n", ns).Execute()).To(Succeed())
})

t.Run("Run custom pipeline task", func(t *testing.T) {
Expand All @@ -114,9 +114,50 @@ func TestBuilderTrait(t *testing.T) {
Eventually(BuilderPod(ns, builderKitName)().Spec.InitContainers[0].Name, TestTimeoutShort).Should(Equal("builder"))
Eventually(BuilderPod(ns, builderKitName)().Spec.InitContainers[1].Name, TestTimeoutShort).Should(Equal("custom1"))
Eventually(BuilderPod(ns, builderKitName)().Spec.InitContainers[2].Name, TestTimeoutShort).Should(Equal("custom2"))

// Check containers conditions
Eventually(Build(ns, integrationKitName), TestTimeoutShort).ShouldNot(BeNil())
Eventually(
Build(
ns, integrationKitName)().Status.GetCondition(v1.BuildConditionType("Container custom1 succeeded")).Status,
TestTimeoutShort).Should(Equal(corev1.ConditionTrue))
Eventually(
Build(ns, integrationKitName)().Status.GetCondition(v1.BuildConditionType("Container custom1 succeeded")).Message,
TestTimeoutShort).Should(ContainSubstring("generated-bytecode.jar"))
Eventually(Build(ns, integrationKitName), TestTimeoutShort).ShouldNot(BeNil())
Eventually(
Build(ns, integrationKitName)().Status.GetCondition(v1.BuildConditionType("Container custom2 succeeded")).Status,
TestTimeoutShort).Should(Equal(corev1.ConditionTrue))
Eventually(
Build(ns, integrationKitName)().Status.GetCondition(v1.BuildConditionType("Container custom2 succeeded")).Message,
TestTimeoutShort).Should(ContainSubstring("</project>"))

// Check logs
Eventually(Logs(ns, builderKitName, corev1.PodLogOptions{Container: "custom1"})).Should(ContainSubstring(`generated-bytecode.jar`))
Eventually(Logs(ns, builderKitName, corev1.PodLogOptions{Container: "custom2"})).Should(ContainSubstring(`<artifactId>camel-k-runtime-bom</artifactId>`))

Expect(Kamel("delete", "--all", "-n", ns).Execute()).To(Succeed())
})

name = "java-error"
t.Run("Run custom pipeline task error", func(t *testing.T) {
Expect(KamelRunWithID(operatorID, ns, "files/Java.java",
"--name", name,
"-t", "builder.tasks=custom1;alpine;cat missingfile.txt",
).Execute()).To(Succeed())

Eventually(IntegrationPhase(ns, name)).Should(Equal(v1.IntegrationPhaseBuildingKit))
integrationKitName := IntegrationKit(ns, name)()
// Check containers conditions
Eventually(Build(ns, integrationKitName), TestTimeoutShort).ShouldNot(BeNil())
Eventually(BuildConditions(ns, integrationKitName), TestTimeoutShort).ShouldNot(BeNil())
Eventually(
Build(ns, integrationKitName)().Status.GetCondition(v1.BuildConditionType("Container custom1 succeeded")).Status,
TestTimeoutShort).Should(Equal(corev1.ConditionFalse))
Eventually(
Build(ns, integrationKitName)().Status.GetCondition(v1.BuildConditionType("Container custom1 succeeded")).Message,
TestTimeoutShort).Should(ContainSubstring("No such file or directory"))

Expect(Kamel("delete", "--all", "-n", ns).Execute()).To(Succeed())
})
}
10 changes: 10 additions & 0 deletions e2e/support/test_support.go
Original file line number Diff line number Diff line change
Expand Up @@ -1622,6 +1622,16 @@ func BuildPhase(ns, name string) func() v1.BuildPhase {
}
}

func BuildConditions(ns, name string) func() []v1.BuildCondition {
return func() []v1.BuildCondition {
build := Build(ns, name)()
if build != nil && &build.Status != nil && build.Status.Conditions != nil {
return build.Status.Conditions
}
return nil
}
}

func BuildFailureRecovery(ns, name string) func() int {
return func() int {
build := Build(ns, name)()
Expand Down
41 changes: 22 additions & 19 deletions pkg/controller/build/monitor_pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ func (action *monitorPodAction) Handle(ctx context.Context, build *v1.Build) (*v
finishedAt := action.getTerminatedTime(pod)
duration := finishedAt.Sub(build.Status.StartedAt.Time)
build.Status.Duration = duration.String()

action.setConditionsFromTerminationMessages(ctx, pod, &build.Status)
monitorFinishedBuild(build)

buildCreator := kubernetes.GetCamelCreator(build)
Expand Down Expand Up @@ -168,15 +168,12 @@ func (action *monitorPodAction) Handle(ctx context.Context, build *v1.Build) (*v

case corev1.PodFailed:
phase := v1.BuildPhaseFailed
message := "Pod failed"
if terminationMessage := action.getTerminationMessage(ctx, pod); terminationMessage != "" {
message = terminationMessage
}
message := fmt.Sprintf("Builder Pod %s failed (see conditions for more details)", pod.Name)
if pod.DeletionTimestamp != nil {
phase = v1.BuildPhaseInterrupted
message = "Pod deleted"
message = fmt.Sprintf("Builder Pod %s deleted", pod.Name)
} else if _, ok := pod.GetAnnotations()[timeoutAnnotation]; ok {
message = "Build timeout"
message = fmt.Sprintf("Builder Pod %s timeout", pod.Name)
}
// Do not override errored build
if build.Status.Phase == v1.BuildPhaseError {
Expand All @@ -187,7 +184,7 @@ func (action *monitorPodAction) Handle(ctx context.Context, build *v1.Build) (*v
finishedAt := action.getTerminatedTime(pod)
duration := finishedAt.Sub(build.Status.StartedAt.Time)
build.Status.Duration = duration.String()

action.setConditionsFromTerminationMessages(ctx, pod, &build.Status)
monitorFinishedBuild(build)

buildCreator := kubernetes.GetCamelCreator(build)
Expand Down Expand Up @@ -304,36 +301,42 @@ func (action *monitorPodAction) getTerminatedTime(pod *corev1.Pod) metav1.Time {
return finishedAt
}

func (action *monitorPodAction) getTerminationMessage(ctx context.Context, pod *corev1.Pod) string {
// setConditionsFromTerminationMessages sets a condition for all those containers which have been terminated (successfully or not)
func (action *monitorPodAction) setConditionsFromTerminationMessages(ctx context.Context, pod *corev1.Pod, buildStatus *v1.BuildStatus) {
var containers []corev1.ContainerStatus
containers = append(containers, pod.Status.InitContainerStatuses...)
containers = append(containers, pod.Status.ContainerStatuses...)

for _, container := range containers {
if t := container.State.Terminated; t != nil && t.ExitCode != 0 {
if t.Message != "" {
return fmt.Sprintf("Container %s failed with: %s", container.Name, t.Message)
if t := container.State.Terminated; t != nil {
terminationMessage := t.Message
// Dynamic condition type (it depends on each container name)
containerConditionType := v1.BuildConditionType(fmt.Sprintf("Container %s succeeded", container.Name))
containerSucceeded := corev1.ConditionTrue
if t.ExitCode != 0 {
containerSucceeded = corev1.ConditionFalse
}

var maxLines int64
maxLines = 20
// TODO we can make it a user variable !?
maxLines = 10
logOptions := corev1.PodLogOptions{
Container: container.Name,
TailLines: &maxLines,
}
message, err := log.DumpLog(ctx, action.client, pod, logOptions)
terminationMessage, err := log.DumpLog(ctx, action.client, pod, logOptions)
if err != nil {
action.L.Errorf(err, "Dumping log for %s Pod failed", pod.Name)
return fmt.Sprintf(
"Container %s failed. Operator was not able to retrieve the error message, please, check the container log from %s Pod",
action.L.Errorf(err, "Dumping log for %s container in %s Pod failed", container.Name, pod.Name)
terminationMessage = fmt.Sprintf(
"Operator was not able to retrieve the error message, please, check the container %s log directly from %s Pod",
container.Name,
pod.Name,
)
}

return fmt.Sprintf("Container %s failed with: %s", container.Name, message)
terminationReason := fmt.Sprintf("%s (%d)", t.Reason, t.ExitCode)
buildStatus.SetCondition(containerConditionType, containerSucceeded, terminationReason, terminationMessage)
}
}

return ""
}

0 comments on commit 9e104b3

Please sign in to comment.