From 9e104b309245df85521dc9fb8ec5cfcbb24777b2 Mon Sep 17 00:00:00 2001 From: Pasquale Congiusti Date: Fri, 19 May 2023 11:45:11 +0200 Subject: [PATCH] feat(build): added container execution condition * Added a condition for each container execution during build (either success or failure) * Added E2E test to cover such feature --- e2e/common/traits/builder_test.go | 43 ++++++++++++++++++++++++++++- e2e/support/test_support.go | 10 +++++++ pkg/controller/build/monitor_pod.go | 41 ++++++++++++++------------- 3 files changed, 74 insertions(+), 20 deletions(-) diff --git a/e2e/common/traits/builder_test.go b/e2e/common/traits/builder_test.go index 9b47e48ff5..da8e6f39b0 100644 --- a/e2e/common/traits/builder_test.go +++ b/e2e/common/traits/builder_test.go @@ -93,7 +93,7 @@ func TestBuilderTrait(t *testing.T) { Eventually(BuilderPod(ns, builderKitName)().Spec.InitContainers[0].Resources.Requests.Memory().String(), TestTimeoutShort).Should(Equal("2Gi")) Eventually(BuilderPod(ns, builderKitName)().Spec.InitContainers[0].Resources.Limits.Memory().String(), TestTimeoutShort).Should(Equal("3Gi")) - Expect(Kamel("delete", "--all", "-n", ns).Execute()).To(Succeed()) + Expect(Kamel("reset", "-n", ns).Execute()).To(Succeed()) }) t.Run("Run custom pipeline task", func(t *testing.T) { @@ -114,9 +114,50 @@ func TestBuilderTrait(t *testing.T) { Eventually(BuilderPod(ns, builderKitName)().Spec.InitContainers[0].Name, TestTimeoutShort).Should(Equal("builder")) Eventually(BuilderPod(ns, builderKitName)().Spec.InitContainers[1].Name, TestTimeoutShort).Should(Equal("custom1")) Eventually(BuilderPod(ns, builderKitName)().Spec.InitContainers[2].Name, TestTimeoutShort).Should(Equal("custom2")) + + // Check containers conditions + Eventually(Build(ns, integrationKitName), TestTimeoutShort).ShouldNot(BeNil()) + Eventually( + Build( + ns, integrationKitName)().Status.GetCondition(v1.BuildConditionType("Container custom1 succeeded")).Status, + TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) + Eventually( + Build(ns, integrationKitName)().Status.GetCondition(v1.BuildConditionType("Container custom1 succeeded")).Message, + TestTimeoutShort).Should(ContainSubstring("generated-bytecode.jar")) + Eventually(Build(ns, integrationKitName), TestTimeoutShort).ShouldNot(BeNil()) + Eventually( + Build(ns, integrationKitName)().Status.GetCondition(v1.BuildConditionType("Container custom2 succeeded")).Status, + TestTimeoutShort).Should(Equal(corev1.ConditionTrue)) + Eventually( + Build(ns, integrationKitName)().Status.GetCondition(v1.BuildConditionType("Container custom2 succeeded")).Message, + TestTimeoutShort).Should(ContainSubstring("")) + + // Check logs Eventually(Logs(ns, builderKitName, corev1.PodLogOptions{Container: "custom1"})).Should(ContainSubstring(`generated-bytecode.jar`)) Eventually(Logs(ns, builderKitName, corev1.PodLogOptions{Container: "custom2"})).Should(ContainSubstring(`camel-k-runtime-bom`)) Expect(Kamel("delete", "--all", "-n", ns).Execute()).To(Succeed()) }) + + name = "java-error" + t.Run("Run custom pipeline task error", func(t *testing.T) { + Expect(KamelRunWithID(operatorID, ns, "files/Java.java", + "--name", name, + "-t", "builder.tasks=custom1;alpine;cat missingfile.txt", + ).Execute()).To(Succeed()) + + Eventually(IntegrationPhase(ns, name)).Should(Equal(v1.IntegrationPhaseBuildingKit)) + integrationKitName := IntegrationKit(ns, name)() + // Check containers conditions + Eventually(Build(ns, integrationKitName), TestTimeoutShort).ShouldNot(BeNil()) + Eventually(BuildConditions(ns, integrationKitName), TestTimeoutShort).ShouldNot(BeNil()) + Eventually( + Build(ns, integrationKitName)().Status.GetCondition(v1.BuildConditionType("Container custom1 succeeded")).Status, + TestTimeoutShort).Should(Equal(corev1.ConditionFalse)) + Eventually( + Build(ns, integrationKitName)().Status.GetCondition(v1.BuildConditionType("Container custom1 succeeded")).Message, + TestTimeoutShort).Should(ContainSubstring("No such file or directory")) + + Expect(Kamel("delete", "--all", "-n", ns).Execute()).To(Succeed()) + }) } diff --git a/e2e/support/test_support.go b/e2e/support/test_support.go index 69e5e0b36b..8d2d48b573 100644 --- a/e2e/support/test_support.go +++ b/e2e/support/test_support.go @@ -1622,6 +1622,16 @@ func BuildPhase(ns, name string) func() v1.BuildPhase { } } +func BuildConditions(ns, name string) func() []v1.BuildCondition { + return func() []v1.BuildCondition { + build := Build(ns, name)() + if build != nil && &build.Status != nil && build.Status.Conditions != nil { + return build.Status.Conditions + } + return nil + } +} + func BuildFailureRecovery(ns, name string) func() int { return func() int { build := Build(ns, name)() diff --git a/pkg/controller/build/monitor_pod.go b/pkg/controller/build/monitor_pod.go index 2e64e667d3..439cacf0fe 100644 --- a/pkg/controller/build/monitor_pod.go +++ b/pkg/controller/build/monitor_pod.go @@ -139,7 +139,7 @@ func (action *monitorPodAction) Handle(ctx context.Context, build *v1.Build) (*v finishedAt := action.getTerminatedTime(pod) duration := finishedAt.Sub(build.Status.StartedAt.Time) build.Status.Duration = duration.String() - + action.setConditionsFromTerminationMessages(ctx, pod, &build.Status) monitorFinishedBuild(build) buildCreator := kubernetes.GetCamelCreator(build) @@ -168,15 +168,12 @@ func (action *monitorPodAction) Handle(ctx context.Context, build *v1.Build) (*v case corev1.PodFailed: phase := v1.BuildPhaseFailed - message := "Pod failed" - if terminationMessage := action.getTerminationMessage(ctx, pod); terminationMessage != "" { - message = terminationMessage - } + message := fmt.Sprintf("Builder Pod %s failed (see conditions for more details)", pod.Name) if pod.DeletionTimestamp != nil { phase = v1.BuildPhaseInterrupted - message = "Pod deleted" + message = fmt.Sprintf("Builder Pod %s deleted", pod.Name) } else if _, ok := pod.GetAnnotations()[timeoutAnnotation]; ok { - message = "Build timeout" + message = fmt.Sprintf("Builder Pod %s timeout", pod.Name) } // Do not override errored build if build.Status.Phase == v1.BuildPhaseError { @@ -187,7 +184,7 @@ func (action *monitorPodAction) Handle(ctx context.Context, build *v1.Build) (*v finishedAt := action.getTerminatedTime(pod) duration := finishedAt.Sub(build.Status.StartedAt.Time) build.Status.Duration = duration.String() - + action.setConditionsFromTerminationMessages(ctx, pod, &build.Status) monitorFinishedBuild(build) buildCreator := kubernetes.GetCamelCreator(build) @@ -304,36 +301,42 @@ func (action *monitorPodAction) getTerminatedTime(pod *corev1.Pod) metav1.Time { return finishedAt } -func (action *monitorPodAction) getTerminationMessage(ctx context.Context, pod *corev1.Pod) string { +// setConditionsFromTerminationMessages sets a condition for all those containers which have been terminated (successfully or not) +func (action *monitorPodAction) setConditionsFromTerminationMessages(ctx context.Context, pod *corev1.Pod, buildStatus *v1.BuildStatus) { var containers []corev1.ContainerStatus containers = append(containers, pod.Status.InitContainerStatuses...) containers = append(containers, pod.Status.ContainerStatuses...) for _, container := range containers { - if t := container.State.Terminated; t != nil && t.ExitCode != 0 { - if t.Message != "" { - return fmt.Sprintf("Container %s failed with: %s", container.Name, t.Message) + if t := container.State.Terminated; t != nil { + terminationMessage := t.Message + // Dynamic condition type (it depends on each container name) + containerConditionType := v1.BuildConditionType(fmt.Sprintf("Container %s succeeded", container.Name)) + containerSucceeded := corev1.ConditionTrue + if t.ExitCode != 0 { + containerSucceeded = corev1.ConditionFalse } var maxLines int64 - maxLines = 20 + // TODO we can make it a user variable !? + maxLines = 10 logOptions := corev1.PodLogOptions{ Container: container.Name, TailLines: &maxLines, } - message, err := log.DumpLog(ctx, action.client, pod, logOptions) + terminationMessage, err := log.DumpLog(ctx, action.client, pod, logOptions) if err != nil { - action.L.Errorf(err, "Dumping log for %s Pod failed", pod.Name) - return fmt.Sprintf( - "Container %s failed. Operator was not able to retrieve the error message, please, check the container log from %s Pod", + action.L.Errorf(err, "Dumping log for %s container in %s Pod failed", container.Name, pod.Name) + terminationMessage = fmt.Sprintf( + "Operator was not able to retrieve the error message, please, check the container %s log directly from %s Pod", container.Name, pod.Name, ) } - return fmt.Sprintf("Container %s failed with: %s", container.Name, message) + terminationReason := fmt.Sprintf("%s (%d)", t.Reason, t.ExitCode) + buildStatus.SetCondition(containerConditionType, containerSucceeded, terminationReason, terminationMessage) } } - return "" }