diff --git a/internal/civisibility/constants/test_tags.go b/internal/civisibility/constants/test_tags.go index ccd4fa4635..7248d82450 100644 --- a/internal/civisibility/constants/test_tags.go +++ b/internal/civisibility/constants/test_tags.go @@ -70,9 +70,16 @@ const ( // This constant is used to tag traces with the test session name TestSessionName = "test_session.name" + // TestIsNew indicates a new test + // This constant is used to tag test events that are detected as new by early flake detection + TestIsNew = "test.is_new" + // TestIsRetry indicates a retry execution // This constant is used to tag test events that are part of a retry execution TestIsRetry = "test.is_retry" + + // TestEarlyFlakeDetectionRetryAborted indicates a retry abort reason by the early flake detection feature + TestEarlyFlakeDetectionRetryAborted = "test.early_flake.abort_reason" ) // Define valid test status types. diff --git a/internal/civisibility/integrations/gotesting/instrumentation.go b/internal/civisibility/integrations/gotesting/instrumentation.go index 82aac9868b..ba934dce7b 100644 --- a/internal/civisibility/integrations/gotesting/instrumentation.go +++ b/internal/civisibility/integrations/gotesting/instrumentation.go @@ -7,26 +7,21 @@ package gotesting import ( "fmt" - "os" "reflect" "runtime" - "strings" + "slices" "sync" "sync/atomic" "testing" "time" "unsafe" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" "gopkg.in/DataDog/dd-trace-go.v1/internal" "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants" "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations" - "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils" + "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/net" ) -// The following functions are being used by the gotesting package for manual instrumentation and the orchestrion -// automatic instrumentation - type ( // instrumentationMetadata contains the internal instrumentation metadata instrumentationMetadata struct { @@ -41,8 +36,21 @@ type ( panicData any // panic data recovered from an internal test execution when using an additional feature wrapper panicStacktrace string // stacktrace from the panic recovered from an internal test isARetry bool // flag to tag if a current test execution is a retry + isANewTest bool // flag to tag if a current test execution is part of a new test (EFD not known test) hasAdditionalFeatureWrapper bool // flag to check if the current execution is part of an additional feature wrapper } + + // runTestWithRetryOptions contains the options for calling runTestWithRetry function + runTestWithRetryOptions struct { + targetFunc func(t *testing.T) // target function to retry + t *testing.T // test to be executed + initialRetryCount int64 // initial retry count + adjustRetryCount func(duration time.Duration) int64 // adjust retry count function depending on the duration of the first execution + shouldRetry func(ptrToLocalT *testing.T, executionIndex int, remainingRetries int64) bool // function to decide whether we want to perform a retry + perExecution func(ptrToLocalT *testing.T, executionIndex int, duration time.Duration) // function to run after each test execution + onRetryEnd func(t *testing.T, executionIndex int, lastPtrToLocalT *testing.T) // function executed when all execution have finished + execMetaAdjust func(execMeta *testExecutionMetadata, executionIndex int) // function to modify the execution metadata for each execution + } ) var ( @@ -112,9 +120,13 @@ func createTestMetadata(tb testing.TB) *testExecutionMetadata { // getTestMetadata retrieves the CI visibility test metadata associated with a given *testing.T, *testing.B, *testing.common func getTestMetadata(tb testing.TB) *testExecutionMetadata { + return getTestMetadataFromPointer(reflect.ValueOf(tb).UnsafePointer()) +} + +// getTestMetadataFromPointer retrieves the CI visibility test metadata associated with a given *testing.T, *testing.B, *testing.common using a pointer +func getTestMetadataFromPointer(ptr unsafe.Pointer) *testExecutionMetadata { ciVisibilityTestMetadataMutex.RLock() defer ciVisibilityTestMetadataMutex.RUnlock() - ptr := reflect.ValueOf(tb).UnsafePointer() if v, ok := ciVisibilityTestMetadata[ptr]; ok { return v } @@ -128,492 +140,319 @@ func deleteTestMetadata(tb testing.TB) { delete(ciVisibilityTestMetadata, reflect.ValueOf(tb).UnsafePointer()) } -// instrumentTestingM helper function to instrument internalTests and internalBenchmarks in a `*testing.M` instance. -func instrumentTestingM(m *testing.M) func(exitCode int) { - // Check if CI Visibility was disabled using the kill switch before trying to initialize it - atomic.StoreInt32(&ciVisibilityEnabledValue, -1) - if !isCiVisibilityEnabled() { - return func(exitCode int) {} - } - - // Initialize CI Visibility - integrations.EnsureCiVisibilityInitialization() - - // Create a new test session for CI visibility. - session = integrations.CreateTestSession() +// checkIfCIVisibilityExitIsRequiredByPanic checks the additional features settings to decide if we allow individual tests to panic or not +func checkIfCIVisibilityExitIsRequiredByPanic() bool { + // Apply additional features + settings := integrations.GetSettings() - ddm := (*M)(m) + // If we don't plan to do retries then we allow to panic + return !settings.FlakyTestRetriesEnabled && !settings.EarlyFlakeDetection.Enabled +} - // Instrument the internal tests for CI visibility. - ddm.instrumentInternalTests(getInternalTestArray(m)) +// applyAdditionalFeaturesToTestFunc applies all the additional features as wrapper of a func(*testing.T) +func applyAdditionalFeaturesToTestFunc(f func(*testing.T), testInfo *commonInfo) func(*testing.T) { + // Apply additional features + settings := integrations.GetSettings() - // Instrument the internal benchmarks for CI visibility. - for _, v := range os.Args { - // check if benchmarking is enabled to instrument - if strings.Contains(v, "-bench") || strings.Contains(v, "test.bench") { - ddm.instrumentInternalBenchmarks(getInternalBenchmarkArray(m)) - break - } + // Check if we have something to do, if not we bail out + if !settings.FlakyTestRetriesEnabled && !settings.EarlyFlakeDetection.Enabled { + return f } - return func(exitCode int) { - // Check for code coverage if enabled. - if testing.CoverMode() != "" { - coveragePercentage := testing.Coverage() * 100 - session.SetTag(constants.CodeCoveragePercentageOfTotalLines, coveragePercentage) - } + // Target function + targetFunc := f - // Close the session and return the exit code. - session.Close(exitCode) + // Flaky test retries + if settings.FlakyTestRetriesEnabled { + targetFunc = applyFlakyTestRetriesAdditionalFeature(targetFunc) + } - // Finalize CI Visibility - integrations.ExitCiVisibility() + // Early flake detection + if settings.EarlyFlakeDetection.Enabled { + targetFunc = applyEarlyFlakeDetectionAdditionalFeature(testInfo, targetFunc, settings) } + + // Register the instrumented func as an internal instrumented func (to avoid double instrumentation) + setInstrumentationMetadata(runtime.FuncForPC(reflect.ValueOf(targetFunc).Pointer()), &instrumentationMetadata{IsInternal: true}) + return targetFunc } -// instrumentTestingTFunc helper function to instrument a testing function func(*testing.T) -func instrumentTestingTFunc(f func(*testing.T)) func(*testing.T) { - // Check if CI Visibility was disabled using the kill switch before instrumenting - if !isCiVisibilityEnabled() { - return f - } +// applyFlakyTestRetriesAdditionalFeature applies the flaky test retries feature as a wrapper of a func(*testing.T) +func applyFlakyTestRetriesAdditionalFeature(targetFunc func(*testing.T)) func(*testing.T) { + flakyRetrySettings := integrations.GetFlakyRetriesSettings() + + // If the retry count per test is > 1 and if we still have remaining total retry count + if flakyRetrySettings.RetryCount > 1 && flakyRetrySettings.RemainingTotalRetryCount > 0 { + return func(t *testing.T) { + runTestWithRetry(&runTestWithRetryOptions{ + targetFunc: targetFunc, + t: t, + initialRetryCount: flakyRetrySettings.RetryCount, + adjustRetryCount: nil, // No adjustRetryCount + shouldRetry: func(ptrToLocalT *testing.T, executionIndex int, remainingRetries int64) bool { + remainingTotalRetries := atomic.AddInt64(&flakyRetrySettings.RemainingTotalRetryCount, -1) + // Decide whether to retry + return ptrToLocalT.Failed() && remainingRetries >= 0 && remainingTotalRetries >= 0 + }, + perExecution: nil, // No perExecution needed + onRetryEnd: func(t *testing.T, executionIndex int, lastPtrToLocalT *testing.T) { + // Update original `t` with results from last execution + tCommonPrivates := getTestPrivateFields(t) + tCommonPrivates.SetFailed(lastPtrToLocalT.Failed()) + tCommonPrivates.SetSkipped(lastPtrToLocalT.Skipped()) + + // Update parent status if failed + if lastPtrToLocalT.Failed() { + tParentCommonPrivates := getTestParentPrivateFields(t) + tParentCommonPrivates.SetFailed(true) + } - // Reflect the function to obtain its pointer. - fReflect := reflect.Indirect(reflect.ValueOf(f)) - moduleName, suiteName := utils.GetModuleAndSuiteName(fReflect.Pointer()) - originalFunc := runtime.FuncForPC(fReflect.Pointer()) + // Print summary after retries + if executionIndex > 0 { + status := "passed" + if t.Failed() { + status = "failed" + } else if t.Skipped() { + status = "skipped" + } - // Avoid instrumenting twice - metadata := getInstrumentationMetadata(originalFunc) - if metadata != nil && metadata.IsInternal { - // If is an internal test, we don't instrument because f is already the instrumented func by executeInternalTest - return f - } + fmt.Printf(" [ %v after %v retries by Datadog's auto test retries ]\n", status, executionIndex) + } - instrumentedFn := func(t *testing.T) { - // Initialize module counters if not already present. - if _, ok := modulesCounters[moduleName]; !ok { - var v int32 - modulesCounters[moduleName] = &v + // Check if total retry count was exceeded + if flakyRetrySettings.RemainingTotalRetryCount < 1 { + fmt.Println(" the maximum number of total retries was exceeded.") + } + }, + execMetaAdjust: nil, // No execMetaAdjust needed + }) } - // Increment the test count in the module. - atomic.AddInt32(modulesCounters[moduleName], 1) + } + return targetFunc +} - // Initialize suite counters if not already present. - if _, ok := suitesCounters[suiteName]; !ok { - var v int32 - suitesCounters[suiteName] = &v - } - // Increment the test count in the suite. - atomic.AddInt32(suitesCounters[suiteName], 1) - - // Create or retrieve the module, suite, and test for CI visibility. - module := session.GetOrCreateModuleWithFramework(moduleName, testFramework, runtime.Version()) - suite := module.GetOrCreateSuite(suiteName) - test := suite.CreateTest(t.Name()) - test.SetTestFunc(originalFunc) - - // Get the metadata regarding the execution (in case is already created from the additional features) - execMeta := getTestMetadata(t) - if execMeta == nil { - // in case there's no additional features then we create the metadata for this execution and defer the disposal - execMeta = createTestMetadata(t) - defer deleteTestMetadata(t) - } +// applyEarlyFlakeDetectionAdditionalFeature applies the early flake detection feature as a wrapper of a func(*testing.T) +func applyEarlyFlakeDetectionAdditionalFeature(testInfo *commonInfo, targetFunc func(*testing.T), settings *net.SettingsResponseData) func(*testing.T) { + earlyFlakeDetectionData := integrations.GetEarlyFlakeDetectionSettings() + if earlyFlakeDetectionData != nil && + len(earlyFlakeDetectionData.Tests) > 0 { - // Set the CI visibility test. - execMeta.test = test - - defer func() { - if r := recover(); r != nil { - // Handle panic and set error information. - test.SetErrorInfo("panic", fmt.Sprint(r), utils.GetStacktrace(1)) - test.Close(integrations.ResultStatusFail) - checkModuleAndSuite(module, suite) - // this is not an internal test. Retries are not applied to subtest (because the parent internal test is going to be retried) - // so for this case we avoid closing CI Visibility, but we don't stop the panic from happening. - // it will be handled by `t.Run` - if checkIfCIVisibilityExitIsRequiredByPanic() { - integrations.ExitCiVisibility() - } - panic(r) - } else { - // Normal finalization: determine the test result based on its state. - if t.Failed() { - test.SetTag(ext.Error, true) - suite.SetTag(ext.Error, true) - module.SetTag(ext.Error, true) - test.Close(integrations.ResultStatusFail) - } else if t.Skipped() { - test.Close(integrations.ResultStatusSkip) - } else { - test.Close(integrations.ResultStatusPass) + // Define is a known test flag + isAKnownTest := false + + // Check if the test is a known test or a new one + if knownSuites, ok := earlyFlakeDetectionData.Tests[testInfo.moduleName]; ok { + if knownTests, ok := knownSuites[testInfo.suiteName]; ok { + if slices.Contains(knownTests, testInfo.testName) { + isAKnownTest = true } - checkModuleAndSuite(module, suite) } - }() + } - // Execute the original test function. - f(t) - } + // If it's a new test, then we apply the EFD wrapper + if !isAKnownTest { + return func(t *testing.T) { + var testPassCount, testSkipCount, testFailCount int + + runTestWithRetry(&runTestWithRetryOptions{ + targetFunc: targetFunc, + t: t, + initialRetryCount: 0, + adjustRetryCount: func(duration time.Duration) int64 { + slowTestRetriesSettings := settings.EarlyFlakeDetection.SlowTestRetries + durationSecs := duration.Seconds() + if durationSecs < 5 { + return int64(slowTestRetriesSettings.FiveS) + } else if durationSecs < 10 { + return int64(slowTestRetriesSettings.TenS) + } else if durationSecs < 30 { + return int64(slowTestRetriesSettings.ThirtyS) + } else if duration.Minutes() < 5 { + return int64(slowTestRetriesSettings.FiveM) + } + return 0 + }, + shouldRetry: func(ptrToLocalT *testing.T, executionIndex int, remainingRetries int64) bool { + return remainingRetries >= 0 + }, + perExecution: func(ptrToLocalT *testing.T, executionIndex int, duration time.Duration) { + // Collect test results + if ptrToLocalT.Failed() { + testFailCount++ + } else if ptrToLocalT.Skipped() { + testSkipCount++ + } else { + testPassCount++ + } + }, + onRetryEnd: func(t *testing.T, executionIndex int, lastPtrToLocalT *testing.T) { + // Update test status based on collected counts + tCommonPrivates := getTestPrivateFields(t) + tParentCommonPrivates := getTestParentPrivateFields(t) + status := "passed" + if testPassCount == 0 { + if testSkipCount > 0 { + status = "skipped" + tCommonPrivates.SetSkipped(true) + } + if testFailCount > 0 { + status = "failed" + tCommonPrivates.SetFailed(true) + tParentCommonPrivates.SetFailed(true) + } + } - setInstrumentationMetadata(runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(instrumentedFn)).Pointer()), &instrumentationMetadata{IsInternal: true}) - return instrumentedFn + // Print summary after retries + if executionIndex > 0 { + fmt.Printf(" [ %v after %v retries by Datadog's early flake detection ]\n", status, executionIndex) + } + }, + execMetaAdjust: func(execMeta *testExecutionMetadata, executionIndex int) { + // Set the flag new test to true + execMeta.isANewTest = true + }, + }) + } + } + } + return targetFunc } -// instrumentSetErrorInfo helper function to set an error in the `*testing.T, *testing.B, *testing.common` CI Visibility span -func instrumentSetErrorInfo(tb testing.TB, errType string, errMessage string, skip int) { - // Check if CI Visibility was disabled using the kill switch before - if !isCiVisibilityEnabled() { - return - } +// runTestWithRetry encapsulates the common retry logic for test functions. +func runTestWithRetry(options *runTestWithRetryOptions) { + executionIndex := -1 + var panicExecution *testExecutionMetadata + var lastPtrToLocalT *testing.T - // Get the CI Visibility span and check if we can set the error type, message and stack - ciTestItem := getTestMetadata(tb) - if ciTestItem != nil && ciTestItem.test != nil && ciTestItem.error.CompareAndSwap(0, 1) { - ciTestItem.test.SetErrorInfo(errType, errMessage, utils.GetStacktrace(2+skip)) - } -} + // Module and suite for this test + var module integrations.DdTestModule + var suite integrations.DdTestSuite -// instrumentCloseAndSkip helper function to close and skip with a reason a `*testing.T, *testing.B, *testing.common` CI Visibility span -func instrumentCloseAndSkip(tb testing.TB, skipReason string) { - // Check if CI Visibility was disabled using the kill switch before - if !isCiVisibilityEnabled() { - return - } + // Check if we have execution metadata to propagate + originalExecMeta := getTestMetadata(options.t) - // Get the CI Visibility span and check if we can mark it as skipped and close it - ciTestItem := getTestMetadata(tb) - if ciTestItem != nil && ciTestItem.test != nil && ciTestItem.skipped.CompareAndSwap(0, 1) { - ciTestItem.test.CloseWithFinishTimeAndSkipReason(integrations.ResultStatusSkip, time.Now(), skipReason) - } -} + retryCount := options.initialRetryCount -// instrumentSkipNow helper function to close and skip a `*testing.T, *testing.B, *testing.common` CI Visibility span -func instrumentSkipNow(tb testing.TB) { - // Check if CI Visibility was disabled using the kill switch before - if !isCiVisibilityEnabled() { - return - } + for { + // Clear the matcher subnames map before each execution to avoid subname tests being called "parent/subname#NN" due to retries + getTestContextMatcherPrivateFields(options.t).ClearSubNames() - // Get the CI Visibility span and check if we can mark it as skipped and close it - ciTestItem := getTestMetadata(tb) - if ciTestItem != nil && ciTestItem.test != nil && ciTestItem.skipped.CompareAndSwap(0, 1) { - ciTestItem.test.Close(integrations.ResultStatusSkip) - } -} + // Increment execution index + executionIndex++ -// instrumentTestingBFunc helper function to instrument a benchmark function func(*testing.B) -func instrumentTestingBFunc(pb *testing.B, name string, f func(*testing.B)) (string, func(*testing.B)) { - // Check if CI Visibility was disabled using the kill switch before instrumenting - if !isCiVisibilityEnabled() { - return name, f - } + // Create a new local copy of `t` to isolate execution results + ptrToLocalT := &testing.T{} + copyTestWithoutParent(options.t, ptrToLocalT) - // Reflect the function to obtain its pointer. - fReflect := reflect.Indirect(reflect.ValueOf(f)) - moduleName, suiteName := utils.GetModuleAndSuiteName(fReflect.Pointer()) - originalFunc := runtime.FuncForPC(fReflect.Pointer()) + // Create a dummy parent so we can run the test using this local copy + // without affecting the test parent + localTPrivateFields := getTestPrivateFields(ptrToLocalT) + *localTPrivateFields.parent = unsafe.Pointer(&testing.T{}) - // Avoid instrumenting twice - if hasCiVisibilityBenchmarkFunc(originalFunc) { - return name, f - } + // Create an execution metadata instance + execMeta := createTestMetadata(ptrToLocalT) + execMeta.hasAdditionalFeatureWrapper = true - instrumentedFunc := func(b *testing.B) { - // The sub-benchmark implementation relies on creating a dummy sub benchmark (called [DD:TestVisibility]) with - // a Run over the original sub benchmark function to get the child results without interfering measurements - // By doing this the name of the sub-benchmark are changed - // from: - // benchmark/child - // to: - // benchmark/[DD:TestVisibility]/child - // We use regex and decrement the depth level of the benchmark to restore the original name - - // Initialize module counters if not already present. - if _, ok := modulesCounters[moduleName]; !ok { - var v int32 - modulesCounters[moduleName] = &v + // Propagate set tags from a parent wrapper + if originalExecMeta != nil { + if originalExecMeta.isANewTest { + execMeta.isANewTest = true + } + if originalExecMeta.isARetry { + execMeta.isARetry = true + } } - // Increment the test count in the module. - atomic.AddInt32(modulesCounters[moduleName], 1) - // Initialize suite counters if not already present. - if _, ok := suitesCounters[suiteName]; !ok { - var v int32 - suitesCounters[suiteName] = &v + // If we are in a retry execution, set the `isARetry` flag + if executionIndex > 0 { + execMeta.isARetry = true } - // Increment the test count in the suite. - atomic.AddInt32(suitesCounters[suiteName], 1) - // Decrement level. - bpf := getBenchmarkPrivateFields(b) - bpf.AddLevel(-1) + // Adjust execution metadata + if options.execMetaAdjust != nil { + options.execMetaAdjust(execMeta, executionIndex) + } + // Run original func similar to how it gets run internally in tRunner startTime := time.Now() - module := session.GetOrCreateModuleWithFrameworkAndStartTime(moduleName, testFramework, runtime.Version(), startTime) - suite := module.GetOrCreateSuiteWithStartTime(suiteName, startTime) - test := suite.CreateTestWithStartTime(fmt.Sprintf("%s/%s", pb.Name(), name), startTime) - test.SetTestFunc(originalFunc) - - // Restore the original name without the sub-benchmark auto name. - *bpf.name = subBenchmarkAutoNameRegex.ReplaceAllString(*bpf.name, "") - - // Run original benchmark. - var iPfOfB *benchmarkPrivateFields - var recoverFunc *func(r any) - instrumentedFunc := func(b *testing.B) { - // Stop the timer to do the initialization and replacements. - b.StopTimer() - + chn := make(chan struct{}, 1) + go func() { defer func() { - if r := recover(); r != nil { - if recoverFunc != nil { - fn := *recoverFunc - fn(r) - } - panic(r) - } + chn <- struct{}{} }() + options.targetFunc(ptrToLocalT) + }() + <-chn + duration := time.Since(startTime) - // First time we get the private fields of the inner testing.B. - iPfOfB = getBenchmarkPrivateFields(b) - // Replace this function with the original one (executed only once - the first iteration[b.run1]). - *iPfOfB.benchFunc = f - - // Get the metadata regarding the execution (in case is already created from the additional features) - execMeta := getTestMetadata(b) - if execMeta == nil { - // in case there's no additional features then we create the metadata for this execution and defer the disposal - execMeta = createTestMetadata(b) - defer deleteTestMetadata(b) - } - - // Set the CI visibility test. - execMeta.test = test - - // Enable the timer again. - b.ResetTimer() - b.StartTimer() - - // Execute original func - f(b) + // Call cleanup functions after this execution + if err := testingTRunCleanup(ptrToLocalT, 1); err != nil { + fmt.Printf("cleanup error: %v\n", err) } - setCiVisibilityBenchmarkFunc(runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(instrumentedFunc)).Pointer())) - b.Run(name, instrumentedFunc) - - endTime := time.Now() - results := iPfOfB.result - - // Set benchmark data for CI visibility. - test.SetBenchmarkData("duration", map[string]any{ - "run": results.N, - "mean": results.NsPerOp(), - }) - test.SetBenchmarkData("memory_total_operations", map[string]any{ - "run": results.N, - "mean": results.AllocsPerOp(), - "statistics.max": results.MemAllocs, - }) - test.SetBenchmarkData("mean_heap_allocations", map[string]any{ - "run": results.N, - "mean": results.AllocedBytesPerOp(), - }) - test.SetBenchmarkData("total_heap_allocations", map[string]any{ - "run": results.N, - "mean": iPfOfB.result.MemBytes, - }) - if len(results.Extra) > 0 { - mapConverted := map[string]any{} - for k, v := range results.Extra { - mapConverted[k] = v - } - test.SetBenchmarkData("extra", mapConverted) + // Copy the current test to the wrapper if necessary + if originalExecMeta != nil { + originalExecMeta.test = execMeta.test } - // Define a function to handle panic during benchmark finalization. - panicFunc := func(r any) { - test.SetErrorInfo("panic", fmt.Sprint(r), utils.GetStacktrace(1)) - suite.SetTag(ext.Error, true) - module.SetTag(ext.Error, true) - test.Close(integrations.ResultStatusFail) - checkModuleAndSuite(module, suite) - integrations.ExitCiVisibility() + // Extract module and suite if present + currentSuite := execMeta.test.Suite() + if suite == nil && currentSuite != nil { + suite = currentSuite } - recoverFunc = &panicFunc - - // Normal finalization: determine the benchmark result based on its state. - if iPfOfB.B.Failed() { - test.SetTag(ext.Error, true) - suite.SetTag(ext.Error, true) - module.SetTag(ext.Error, true) - test.CloseWithFinishTime(integrations.ResultStatusFail, endTime) - } else if iPfOfB.B.Skipped() { - test.CloseWithFinishTime(integrations.ResultStatusSkip, endTime) - } else { - test.CloseWithFinishTime(integrations.ResultStatusPass, endTime) + if module == nil && currentSuite != nil && currentSuite.Module() != nil { + module = currentSuite.Module() } - checkModuleAndSuite(module, suite) - } - setCiVisibilityBenchmarkFunc(originalFunc) - setCiVisibilityBenchmarkFunc(runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(instrumentedFunc)).Pointer())) - return subBenchmarkAutoName, instrumentedFunc -} - -// checkIfCIVisibilityExitIsRequiredByPanic checks the additional features settings to decide if we allow individual tests to panic or not -func checkIfCIVisibilityExitIsRequiredByPanic() bool { - // Apply additional features - settings := integrations.GetSettings() - - // If we don't plan to do retries then we allow to panic - return !settings.FlakyTestRetriesEnabled && !settings.EarlyFlakeDetection.Enabled -} - -// applyAdditionalFeaturesToTestFunc applies all the additional features as wrapper of a func(*testing.T) -func applyAdditionalFeaturesToTestFunc(f func(*testing.T)) func(*testing.T) { - // Apply additional features - settings := integrations.GetSettings() - - // Wrapper function - wrapperFunc := f - - // Flaky test retries - if settings.FlakyTestRetriesEnabled { - flakyRetrySettings := integrations.GetFlakyRetriesSettings() - - // if the retry count per test is > 1 and if we still have remaining total retry count - if flakyRetrySettings.RetryCount > 1 && flakyRetrySettings.RemainingTotalRetryCount > 0 { - wrapperFunc = func(t *testing.T) { - retryCount := flakyRetrySettings.RetryCount - executionIndex := -1 - var panicExecution *testExecutionMetadata - - // Get the private fields from the *testing.T instance - tParentCommonPrivates := getTestParentPrivateFields(t) - - // Module and suite for this test - var module integrations.DdTestModule - var suite integrations.DdTestSuite - - for { - // increment execution index - executionIndex++ - - // we need to create a new local copy of `t` as a way to isolate the results of this execution. - // this is because we don't want these executions to affect the overall result of the test process - // nor the parent test status. - ptrToLocalT := &testing.T{} - copyTestWithoutParent(t, ptrToLocalT) - - // we create a dummy parent so we can run the test using this local copy - // without affecting the test parent - localTPrivateFields := getTestPrivateFields(ptrToLocalT) - *localTPrivateFields.parent = unsafe.Pointer(&testing.T{}) - - // create an execution metadata instance - execMeta := createTestMetadata(ptrToLocalT) - execMeta.hasAdditionalFeatureWrapper = true - - // if we are in a retry execution we set the `isARetry` flag so we can tag the test event. - if executionIndex > 0 { - execMeta.isARetry = true - } + // Remove execution metadata + deleteTestMetadata(ptrToLocalT) - // run original func similar to it gets run internally in tRunner - chn := make(chan struct{}, 1) - go func() { - defer func() { - chn <- struct{}{} - }() - f(ptrToLocalT) - }() - <-chn - - // we call the cleanup funcs after this execution before trying another execution - callTestCleanupPanicValue := testingTRunCleanup(ptrToLocalT, 1) - if callTestCleanupPanicValue != nil { - fmt.Printf("cleanup error: %v\n", callTestCleanupPanicValue) - } - - // extract module and suite if present - currentSuite := execMeta.test.Suite() - if suite == nil && currentSuite != nil { - suite = currentSuite - } - if module == nil && currentSuite != nil && currentSuite.Module() != nil { - module = currentSuite.Module() - } - - // remove execution metadata - deleteTestMetadata(ptrToLocalT) - - // decrement retry counts - remainingRetries := atomic.AddInt64(&retryCount, -1) - remainingTotalRetries := atomic.AddInt64(&flakyRetrySettings.RemainingTotalRetryCount, -1) - - // if a panic occurs we fail the test - if execMeta.panicData != nil { - ptrToLocalT.Fail() - - // stores the first panic data so we can do a panic later after all retries - if panicExecution == nil { - panicExecution = execMeta - } - } + // Handle panic data + if execMeta.panicData != nil { + ptrToLocalT.Fail() + if panicExecution == nil { + panicExecution = execMeta + } + } - // if not failed and if there's no panic data then we don't do any retry - // if there's no more retries we also exit the loop - if !ptrToLocalT.Failed() || remainingRetries < 0 || remainingTotalRetries < 0 { - // because we are not going to do any other retry we set the original `t` with the results - // and in case of failure we mark the parent test as failed as well. - tCommonPrivates := getTestPrivateFields(t) - tCommonPrivates.SetFailed(ptrToLocalT.Failed()) - tCommonPrivates.SetSkipped(ptrToLocalT.Skipped()) + // Adjust retry count after first execution if necessary + if options.adjustRetryCount != nil && executionIndex == 0 { + retryCount = options.adjustRetryCount(duration) + } - // Only change the parent status to failing if the current test failed - if ptrToLocalT.Failed() { - tParentCommonPrivates.SetFailed(ptrToLocalT.Failed()) - } - break - } - } + // Decrement retry count + retryCount-- - // in case we execute some retries then let's print a summary of the result with the retries count - retries := flakyRetrySettings.RetryCount - (retryCount + 1) - if retries > 0 { - status := "passed" - if t.Failed() { - status = "failed" - } else if t.Skipped() { - status = "skipped" - } + // Call perExecution function + if options.perExecution != nil { + options.perExecution(ptrToLocalT, executionIndex, duration) + } - fmt.Printf(" [ %v after %v retries ]\n", status, retries) - } + // Update lastPtrToLocalT + lastPtrToLocalT = ptrToLocalT - // after all test executions we check if we need to close the suite and the module - checkModuleAndSuite(module, suite) + // Decide whether to continue + if !options.shouldRetry(ptrToLocalT, executionIndex, retryCount) { + break + } + } - // let's check if total retry count was exceeded - if flakyRetrySettings.RemainingTotalRetryCount < 1 { - fmt.Println(" the maximum number of total retries was exceeded.") - } + // Call onRetryEnd + if options.onRetryEnd != nil { + options.onRetryEnd(options.t, executionIndex, lastPtrToLocalT) + } - // if the test failed, and we have a panic information let's re-panic that - if t.Failed() && panicExecution != nil { - // we are about to panic, let's ensure we flush all ci visibility data and close the session event - integrations.ExitCiVisibility() - panic(fmt.Sprintf("test failed and panicked after %d retries.\n%v\n%v", executionIndex, panicExecution.panicData, panicExecution.panicStacktrace)) - } - } - } + // After all test executions, check if we need to close the suite and the module + if originalExecMeta == nil { + checkModuleAndSuite(module, suite) } - // Register the instrumented func as an internal instrumented func (to avoid double instrumentation) - setInstrumentationMetadata(runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(wrapperFunc)).Pointer()), &instrumentationMetadata{IsInternal: true}) - return wrapperFunc + // Re-panic if test failed and panic data exists + if options.t.Failed() && panicExecution != nil { + // Ensure we flush all CI visibility data and close the session event + integrations.ExitCiVisibility() + panic(fmt.Sprintf("test failed and panicked after %d retries.\n%v\n%v", executionIndex, panicExecution.panicData, panicExecution.panicStacktrace)) + } } //go:linkname testingTRunCleanup testing.(*common).runCleanup diff --git a/internal/civisibility/integrations/gotesting/instrumentation_orchestrion.go b/internal/civisibility/integrations/gotesting/instrumentation_orchestrion.go new file mode 100644 index 0000000000..53677d33ee --- /dev/null +++ b/internal/civisibility/integrations/gotesting/instrumentation_orchestrion.go @@ -0,0 +1,389 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package gotesting + +import ( + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync/atomic" + "testing" + "time" + + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" + "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants" + "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations" + "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils" +) + +// ****************************************************************************************************************** +// WARNING: DO NOT CHANGE THE SIGNATURE OF THESE FUNCTIONS! +// +// The following functions are being used by both the manual api and most importantly the Orchestrion automatic +// instrumentation integration. +// ****************************************************************************************************************** + +// instrumentTestingM helper function to instrument internalTests and internalBenchmarks in a `*testing.M` instance. +func instrumentTestingM(m *testing.M) func(exitCode int) { + // Check if CI Visibility was disabled using the kill switch before trying to initialize it + atomic.StoreInt32(&ciVisibilityEnabledValue, -1) + if !isCiVisibilityEnabled() { + return func(exitCode int) {} + } + + // Initialize CI Visibility + integrations.EnsureCiVisibilityInitialization() + + // Create a new test session for CI visibility. + session = integrations.CreateTestSession() + + ddm := (*M)(m) + + // Instrument the internal tests for CI visibility. + ddm.instrumentInternalTests(getInternalTestArray(m)) + + // Instrument the internal benchmarks for CI visibility. + for _, v := range os.Args { + // check if benchmarking is enabled to instrument + if strings.Contains(v, "-bench") || strings.Contains(v, "test.bench") { + ddm.instrumentInternalBenchmarks(getInternalBenchmarkArray(m)) + break + } + } + + return func(exitCode int) { + // Check for code coverage if enabled. + if testing.CoverMode() != "" { + coveragePercentage := testing.Coverage() * 100 + session.SetTag(constants.CodeCoveragePercentageOfTotalLines, coveragePercentage) + } + + // Close the session and return the exit code. + session.Close(exitCode) + + // Finalize CI Visibility + integrations.ExitCiVisibility() + } +} + +// instrumentTestingTFunc helper function to instrument a testing function func(*testing.T) +func instrumentTestingTFunc(f func(*testing.T)) func(*testing.T) { + // Check if CI Visibility was disabled using the kill switch before instrumenting + if !isCiVisibilityEnabled() { + return f + } + + // Reflect the function to obtain its pointer. + fReflect := reflect.Indirect(reflect.ValueOf(f)) + moduleName, suiteName := utils.GetModuleAndSuiteName(fReflect.Pointer()) + originalFunc := runtime.FuncForPC(fReflect.Pointer()) + + // Avoid instrumenting twice + metadata := getInstrumentationMetadata(originalFunc) + if metadata != nil && metadata.IsInternal { + // If is an internal test, we don't instrument because f is already the instrumented func by executeInternalTest + return f + } + + instrumentedFn := func(t *testing.T) { + // Initialize module counters if not already present. + if _, ok := modulesCounters[moduleName]; !ok { + var v int32 + modulesCounters[moduleName] = &v + } + // Increment the test count in the module. + atomic.AddInt32(modulesCounters[moduleName], 1) + + // Initialize suite counters if not already present. + if _, ok := suitesCounters[suiteName]; !ok { + var v int32 + suitesCounters[suiteName] = &v + } + // Increment the test count in the suite. + atomic.AddInt32(suitesCounters[suiteName], 1) + + // Create or retrieve the module, suite, and test for CI visibility. + module := session.GetOrCreateModuleWithFramework(moduleName, testFramework, runtime.Version()) + suite := module.GetOrCreateSuite(suiteName) + test := suite.CreateTest(t.Name()) + test.SetTestFunc(originalFunc) + + // Get the metadata regarding the execution (in case is already created from the additional features) + execMeta := getTestMetadata(t) + if execMeta == nil { + // in case there's no additional features then we create the metadata for this execution and defer the disposal + execMeta = createTestMetadata(t) + defer deleteTestMetadata(t) + } + + // Because this is a subtest let's propagate some execution metadata from the parent test + testPrivateFields := getTestPrivateFields(t) + if testPrivateFields.parent != nil { + parentExecMeta := getTestMetadataFromPointer(*testPrivateFields.parent) + if parentExecMeta != nil { + if parentExecMeta.isANewTest { + execMeta.isANewTest = true + } + if parentExecMeta.isARetry { + execMeta.isARetry = true + } + } + } + + // Set the CI visibility test. + execMeta.test = test + + // If the execution is for a new test we tag the test event from early flake detection + if execMeta.isANewTest { + // Set the is new test tag + test.SetTag(constants.TestIsNew, "true") + } + + // If the execution is a retry we tag the test event + if execMeta.isARetry { + // Set the retry tag + test.SetTag(constants.TestIsRetry, "true") + } + + defer func() { + if r := recover(); r != nil { + // Handle panic and set error information. + test.SetErrorInfo("panic", fmt.Sprint(r), utils.GetStacktrace(1)) + test.Close(integrations.ResultStatusFail) + checkModuleAndSuite(module, suite) + // this is not an internal test. Retries are not applied to subtest (because the parent internal test is going to be retried) + // so for this case we avoid closing CI Visibility, but we don't stop the panic from happening. + // it will be handled by `t.Run` + if checkIfCIVisibilityExitIsRequiredByPanic() { + integrations.ExitCiVisibility() + } + panic(r) + } else { + // Normal finalization: determine the test result based on its state. + if t.Failed() { + test.SetTag(ext.Error, true) + suite.SetTag(ext.Error, true) + module.SetTag(ext.Error, true) + test.Close(integrations.ResultStatusFail) + } else if t.Skipped() { + test.Close(integrations.ResultStatusSkip) + } else { + test.Close(integrations.ResultStatusPass) + } + checkModuleAndSuite(module, suite) + } + }() + + // Execute the original test function. + f(t) + } + + setInstrumentationMetadata(runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(instrumentedFn)).Pointer()), &instrumentationMetadata{IsInternal: true}) + return instrumentedFn +} + +// instrumentSetErrorInfo helper function to set an error in the `*testing.T, *testing.B, *testing.common` CI Visibility span +func instrumentSetErrorInfo(tb testing.TB, errType string, errMessage string, skip int) { + // Check if CI Visibility was disabled using the kill switch before + if !isCiVisibilityEnabled() { + return + } + + // Get the CI Visibility span and check if we can set the error type, message and stack + ciTestItem := getTestMetadata(tb) + if ciTestItem != nil && ciTestItem.test != nil && ciTestItem.error.CompareAndSwap(0, 1) { + ciTestItem.test.SetErrorInfo(errType, errMessage, utils.GetStacktrace(2+skip)) + } +} + +// instrumentCloseAndSkip helper function to close and skip with a reason a `*testing.T, *testing.B, *testing.common` CI Visibility span +func instrumentCloseAndSkip(tb testing.TB, skipReason string) { + // Check if CI Visibility was disabled using the kill switch before + if !isCiVisibilityEnabled() { + return + } + + // Get the CI Visibility span and check if we can mark it as skipped and close it + ciTestItem := getTestMetadata(tb) + if ciTestItem != nil && ciTestItem.test != nil && ciTestItem.skipped.CompareAndSwap(0, 1) { + ciTestItem.test.CloseWithFinishTimeAndSkipReason(integrations.ResultStatusSkip, time.Now(), skipReason) + } +} + +// instrumentSkipNow helper function to close and skip a `*testing.T, *testing.B, *testing.common` CI Visibility span +func instrumentSkipNow(tb testing.TB) { + // Check if CI Visibility was disabled using the kill switch before + if !isCiVisibilityEnabled() { + return + } + + // Get the CI Visibility span and check if we can mark it as skipped and close it + ciTestItem := getTestMetadata(tb) + if ciTestItem != nil && ciTestItem.test != nil && ciTestItem.skipped.CompareAndSwap(0, 1) { + ciTestItem.test.Close(integrations.ResultStatusSkip) + } +} + +// instrumentTestingBFunc helper function to instrument a benchmark function func(*testing.B) +func instrumentTestingBFunc(pb *testing.B, name string, f func(*testing.B)) (string, func(*testing.B)) { + // Check if CI Visibility was disabled using the kill switch before instrumenting + if !isCiVisibilityEnabled() { + return name, f + } + + // Reflect the function to obtain its pointer. + fReflect := reflect.Indirect(reflect.ValueOf(f)) + moduleName, suiteName := utils.GetModuleAndSuiteName(fReflect.Pointer()) + originalFunc := runtime.FuncForPC(fReflect.Pointer()) + + // Avoid instrumenting twice + if hasCiVisibilityBenchmarkFunc(originalFunc) { + return name, f + } + + instrumentedFunc := func(b *testing.B) { + // The sub-benchmark implementation relies on creating a dummy sub benchmark (called [DD:TestVisibility]) with + // a Run over the original sub benchmark function to get the child results without interfering measurements + // By doing this the name of the sub-benchmark are changed + // from: + // benchmark/child + // to: + // benchmark/[DD:TestVisibility]/child + // We use regex and decrement the depth level of the benchmark to restore the original name + + // Initialize module counters if not already present. + if _, ok := modulesCounters[moduleName]; !ok { + var v int32 + modulesCounters[moduleName] = &v + } + // Increment the test count in the module. + atomic.AddInt32(modulesCounters[moduleName], 1) + + // Initialize suite counters if not already present. + if _, ok := suitesCounters[suiteName]; !ok { + var v int32 + suitesCounters[suiteName] = &v + } + // Increment the test count in the suite. + atomic.AddInt32(suitesCounters[suiteName], 1) + + // Decrement level. + bpf := getBenchmarkPrivateFields(b) + bpf.AddLevel(-1) + + startTime := time.Now() + module := session.GetOrCreateModuleWithFrameworkAndStartTime(moduleName, testFramework, runtime.Version(), startTime) + suite := module.GetOrCreateSuiteWithStartTime(suiteName, startTime) + test := suite.CreateTestWithStartTime(fmt.Sprintf("%s/%s", pb.Name(), name), startTime) + test.SetTestFunc(originalFunc) + + // Restore the original name without the sub-benchmark auto name. + *bpf.name = subBenchmarkAutoNameRegex.ReplaceAllString(*bpf.name, "") + + // Run original benchmark. + var iPfOfB *benchmarkPrivateFields + var recoverFunc *func(r any) + instrumentedFunc := func(b *testing.B) { + // Stop the timer to do the initialization and replacements. + b.StopTimer() + + defer func() { + if r := recover(); r != nil { + if recoverFunc != nil { + fn := *recoverFunc + fn(r) + } + panic(r) + } + }() + + // First time we get the private fields of the inner testing.B. + iPfOfB = getBenchmarkPrivateFields(b) + // Replace this function with the original one (executed only once - the first iteration[b.run1]). + *iPfOfB.benchFunc = f + + // Get the metadata regarding the execution (in case is already created from the additional features) + execMeta := getTestMetadata(b) + if execMeta == nil { + // in case there's no additional features then we create the metadata for this execution and defer the disposal + execMeta = createTestMetadata(b) + defer deleteTestMetadata(b) + } + + // Set the CI visibility test. + execMeta.test = test + + // Enable the timer again. + b.ResetTimer() + b.StartTimer() + + // Execute original func + f(b) + } + + setCiVisibilityBenchmarkFunc(runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(instrumentedFunc)).Pointer())) + b.Run(name, instrumentedFunc) + + endTime := time.Now() + results := iPfOfB.result + + // Set benchmark data for CI visibility. + test.SetBenchmarkData("duration", map[string]any{ + "run": results.N, + "mean": results.NsPerOp(), + }) + test.SetBenchmarkData("memory_total_operations", map[string]any{ + "run": results.N, + "mean": results.AllocsPerOp(), + "statistics.max": results.MemAllocs, + }) + test.SetBenchmarkData("mean_heap_allocations", map[string]any{ + "run": results.N, + "mean": results.AllocedBytesPerOp(), + }) + test.SetBenchmarkData("total_heap_allocations", map[string]any{ + "run": results.N, + "mean": iPfOfB.result.MemBytes, + }) + if len(results.Extra) > 0 { + mapConverted := map[string]any{} + for k, v := range results.Extra { + mapConverted[k] = v + } + test.SetBenchmarkData("extra", mapConverted) + } + + // Define a function to handle panic during benchmark finalization. + panicFunc := func(r any) { + test.SetErrorInfo("panic", fmt.Sprint(r), utils.GetStacktrace(1)) + suite.SetTag(ext.Error, true) + module.SetTag(ext.Error, true) + test.Close(integrations.ResultStatusFail) + checkModuleAndSuite(module, suite) + integrations.ExitCiVisibility() + } + recoverFunc = &panicFunc + + // Normal finalization: determine the benchmark result based on its state. + if iPfOfB.B.Failed() { + test.SetTag(ext.Error, true) + suite.SetTag(ext.Error, true) + module.SetTag(ext.Error, true) + test.CloseWithFinishTime(integrations.ResultStatusFail, endTime) + } else if iPfOfB.B.Skipped() { + test.CloseWithFinishTime(integrations.ResultStatusSkip, endTime) + } else { + test.CloseWithFinishTime(integrations.ResultStatusPass, endTime) + } + + checkModuleAndSuite(module, suite) + } + setCiVisibilityBenchmarkFunc(originalFunc) + setCiVisibilityBenchmarkFunc(runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(instrumentedFunc)).Pointer())) + return subBenchmarkAutoName, instrumentedFunc +} diff --git a/internal/civisibility/integrations/gotesting/reflections.go b/internal/civisibility/integrations/gotesting/reflections.go index c01fc4f5f5..745aab1468 100644 --- a/internal/civisibility/integrations/gotesting/reflections.go +++ b/internal/civisibility/integrations/gotesting/reflections.go @@ -157,6 +157,45 @@ func getTestParentPrivateFields(t *testing.T) *commonPrivateFields { return nil } +// contextMatcher is collection of required private fields from testing.context.match +type contextMatcher struct { + mu *sync.RWMutex + subNames *map[string]int32 +} + +// ClearSubNames clears the subname map used for creating unique names for subtests +func (c *contextMatcher) ClearSubNames() { + c.mu.Lock() + defer c.mu.Unlock() + *c.subNames = map[string]int32{} +} + +// getTestContextMatcherPrivateFields is a method to retrieve all required privates field from +// testing.T.context.match, returning a contextMatcher instance +func getTestContextMatcherPrivateFields(t *testing.T) *contextMatcher { + indirectValue := reflect.Indirect(reflect.ValueOf(t)) + contextMember := indirectValue.FieldByName("context") + if !contextMember.IsValid() { + return nil + } + contextMember = contextMember.Elem() + matchMember := contextMember.FieldByName("match") + if !matchMember.IsValid() { + return nil + } + matchMember = matchMember.Elem() + + fields := &contextMatcher{} + if ptr, err := getFieldPointerFromValue(matchMember, "mu"); err == nil { + fields.mu = (*sync.RWMutex)(ptr) + } + if ptr, err := getFieldPointerFromValue(matchMember, "subNames"); err == nil { + fields.subNames = (*map[string]int32)(ptr) + } + + return fields +} + // copyTestWithoutParent tries to copy all private fields except the t.parent from a *testing.T to another func copyTestWithoutParent(source *testing.T, target *testing.T) { // Copy important field values diff --git a/internal/civisibility/integrations/gotesting/testcontroller_test.go b/internal/civisibility/integrations/gotesting/testcontroller_test.go new file mode 100644 index 0000000000..46e4f493ea --- /dev/null +++ b/internal/civisibility/integrations/gotesting/testcontroller_test.go @@ -0,0 +1,492 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package gotesting + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "testing" + + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer" + "gopkg.in/DataDog/dd-trace-go.v1/internal" + "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants" + "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations" + "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/net" + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" +) + +var currentM *testing.M +var mTracer mocktracer.Tracer + +// TestMain is the entry point for testing and runs before any test. +func TestMain(m *testing.M) { + log.SetLevel(log.LevelDebug) + + // We need to spawn separated test process for each scenario + scenarios := []string{"TestFlakyTestRetries", "TestEarlyFlakeDetection", "TestFlakyTestRetriesAndEarlyFlakeDetection"} + + if internal.BoolEnv(scenarios[0], false) { + fmt.Printf("Scenario %s started.\n", scenarios[0]) + runFlakyTestRetriesTests(m) + } else if internal.BoolEnv(scenarios[1], false) { + fmt.Printf("Scenario %s started.\n", scenarios[1]) + runEarlyFlakyTestDetectionTests(m) + } else if internal.BoolEnv(scenarios[2], false) { + fmt.Printf("Scenario %s started.\n", scenarios[2]) + runFlakyTestRetriesWithEarlyFlakyTestDetectionTests(m) + } else { + fmt.Println("Starting tests...") + for _, v := range scenarios { + cmd := exec.Command(os.Args[0], os.Args[1:]...) + cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=true", v)) + fmt.Printf("Running scenario: %s:\n", v) + err := cmd.Run() + fmt.Printf("Done.\n\n") + if err != nil { + if exiterr, ok := err.(*exec.ExitError); ok { + fmt.Printf("Scenario %s failed with exit code: %d\n", v, exiterr.ExitCode()) + os.Exit(exiterr.ExitCode()) + } else { + fmt.Printf("cmd.Run: %v\n", err) + os.Exit(1) + } + break + } + } + } + + os.Exit(0) +} + +func runFlakyTestRetriesTests(m *testing.M) { + // mock the settings api to enable automatic test retries + server := setUpHttpServer(true, false, nil) + defer server.Close() + + // set a custom retry count + os.Setenv(constants.CIVisibilityFlakyRetryCountEnvironmentVariable, "10") + + // initialize the mock tracer for doing assertions on the finished spans + currentM = m + mTracer = integrations.InitializeCIVisibilityMock() + + // execute the tests, we are expecting some tests to fail and check the assertion later + exitCode := RunM(m) + if exitCode != 1 { + panic("expected the exit code to be 1. We have a failing test on purpose.") + } + + // get all finished spans + finishedSpans := mTracer.FinishedSpans() + + // 1 session span + // 1 module span + // 2 suite span (testing_test.go and reflections_test.go) + // 5 tests from reflections_test.go + // 1 TestMyTest01 + // 1 TestMyTest02 + 2 subtests + // 1 Test_Foo + 3 subtests + // 1 TestWithExternalCalls + 2 subtests + // 1 TestSkip + // 1 TestRetryWithPanic + 3 retry tests from testing_test.go + // 1 TestRetryWithFail + 3 retry tests from testing_test.go + // 1 TestRetryAlwaysFail + 10 retry tests from testing_test.go + // 1 TestNormalPassingAfterRetryAlwaysFail + // 1 TestEarlyFlakeDetection + // 2 normal spans from testing_test.go + + // check spans by resource name + checkSpansByResourceName(finishedSpans, "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations/gotesting", 1) + checkSpansByResourceName(finishedSpans, "reflections_test.go", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest01", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02/sub01", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02/sub01/sub03", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/yellow_should_return_color", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/banana_should_return_fruit", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/duck_should_return_animal", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestWithExternalCalls", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestWithExternalCalls/default", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestWithExternalCalls/custom-name", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestSkip", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryWithPanic", 4) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryWithFail", 4) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryAlwaysFail", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestNormalPassingAfterRetryAlwaysFail", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestEarlyFlakeDetection", 1) + + // check spans by tag + checkSpansByTagName(finishedSpans, constants.TestIsRetry, 16) + + // check spans by type + checkSpansByType(finishedSpans, + 44, + 1, + 1, + 2, + 38, + 2) + + os.Exit(0) +} + +func runEarlyFlakyTestDetectionTests(m *testing.M) { + // mock the settings api to enable automatic test retries + server := setUpHttpServer(false, true, &net.EfdResponseData{ + Tests: net.EfdResponseDataModules{ + "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations/gotesting": net.EfdResponseDataSuites{ + "reflections_test.go": []string{ + "TestGetFieldPointerFrom", + "TestGetInternalTestArray", + "TestGetInternalBenchmarkArray", + "TestCommonPrivateFields_AddLevel", + "TestGetBenchmarkPrivateFields", + }, + }, + }, + }) + defer server.Close() + + // initialize the mock tracer for doing assertions on the finished spans + currentM = m + mTracer = integrations.InitializeCIVisibilityMock() + + // execute the tests, we are expecting some tests to fail and check the assertion later + exitCode := RunM(m) + if exitCode != 1 { + panic("expected the exit code to be 1. We have a failing test on purpose.") + } + + // get all finished spans + finishedSpans := mTracer.FinishedSpans() + + // 1 session span + // 1 module span + // 2 suite span (testing_test.go and reflections_test.go) + // 5 tests from reflections_test.go + // 11 TestMyTest01 + // 11 TestMyTest02 + 22 subtests + // 11 Test_Foo + 33 subtests + // 11 TestWithExternalCalls + 22 subtests + // 11 TestSkip + // 11 TestRetryWithPanic + // 11 TestRetryWithFail + // 11 TestRetryAlwaysFail + // 11 TestNormalPassingAfterRetryAlwaysFail + // 11 TestEarlyFlakeDetection + // 22 normal spans from testing_test.go + + // check spans by resource name + checkSpansByResourceName(finishedSpans, "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations/gotesting", 1) + checkSpansByResourceName(finishedSpans, "reflections_test.go", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest01", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02/sub01", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02/sub01/sub03", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/yellow_should_return_color", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/banana_should_return_fruit", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/duck_should_return_animal", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestWithExternalCalls", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestWithExternalCalls/default", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestWithExternalCalls/custom-name", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestSkip", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryWithPanic", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryWithFail", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryAlwaysFail", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestNormalPassingAfterRetryAlwaysFail", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestEarlyFlakeDetection", 11) + + // check spans by tag + checkSpansByTagName(finishedSpans, constants.TestIsNew, 187) + checkSpansByTagName(finishedSpans, constants.TestIsRetry, 170) + + // check spans by type + checkSpansByType(finishedSpans, + 218, + 1, + 1, + 2, + 192, + 22) + + os.Exit(0) +} + +func runFlakyTestRetriesWithEarlyFlakyTestDetectionTests(m *testing.M) { + // mock the settings api to enable automatic test retries + server := setUpHttpServer(true, true, &net.EfdResponseData{ + Tests: net.EfdResponseDataModules{ + "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations/gotesting": net.EfdResponseDataSuites{ + "reflections_test.go": []string{ + "TestGetFieldPointerFrom", + "TestGetInternalTestArray", + "TestGetInternalBenchmarkArray", + "TestCommonPrivateFields_AddLevel", + "TestGetBenchmarkPrivateFields", + }, + "testing_test.go": []string{ + "TestMyTest01", + "TestMyTest02", + "Test_Foo", + "TestWithExternalCalls", + "TestSkip", + "TestRetryWithPanic", + "TestRetryWithFail", + "TestRetryAlwaysFail", + "TestNormalPassingAfterRetryAlwaysFail", + }, + }, + }, + }) + defer server.Close() + + // set a custom retry count + os.Setenv(constants.CIVisibilityFlakyRetryCountEnvironmentVariable, "10") + + // initialize the mock tracer for doing assertions on the finished spans + currentM = m + mTracer = integrations.InitializeCIVisibilityMock() + + // execute the tests, we are expecting some tests to fail and check the assertion later + exitCode := RunM(m) + if exitCode != 1 { + panic("expected the exit code to be 1. We have a failing test on purpose.") + } + + // get all finished spans + finishedSpans := mTracer.FinishedSpans() + + // 1 session span + // 1 module span + // 2 suite span (testing_test.go and reflections_test.go) + // 5 tests from reflections_test.go + // 1 TestMyTest01 + // 1 TestMyTest02 + 2 subtests + // 1 Test_Foo + 3 subtests + // 1 TestWithExternalCalls + 2 subtests + // 1 TestSkip + // 1 TestRetryWithPanic + 3 retry tests from testing_test.go + // 1 TestRetryWithFail + 3 retry tests from testing_test.go + // 1 TestRetryAlwaysFail + 10 retry tests from testing_test.go + // 1 TestNormalPassingAfterRetryAlwaysFail + // 11 TestEarlyFlakeDetection + 10 retries + // 2 normal spans from testing_test.go + + // check spans by resource name + checkSpansByResourceName(finishedSpans, "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations/gotesting", 1) + checkSpansByResourceName(finishedSpans, "reflections_test.go", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest01", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02/sub01", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02/sub01/sub03", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/yellow_should_return_color", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/banana_should_return_fruit", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/duck_should_return_animal", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestWithExternalCalls", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestWithExternalCalls/default", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestWithExternalCalls/custom-name", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestSkip", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryWithPanic", 4) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryWithFail", 4) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryAlwaysFail", 11) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestNormalPassingAfterRetryAlwaysFail", 1) + checkSpansByResourceName(finishedSpans, "testing_test.go.TestEarlyFlakeDetection", 21) + + // check spans by tag + checkSpansByTagName(finishedSpans, constants.TestIsNew, 21) + checkSpansByTagName(finishedSpans, constants.TestIsRetry, 36) + + // check spans by type + checkSpansByType(finishedSpans, + 64, + 1, + 1, + 2, + 58, + 2) + + os.Exit(0) +} + +func checkSpansByType(finishedSpans []mocktracer.Span, + totalFinishedSpansCount int, sessionSpansCount int, moduleSpansCount int, + suiteSpansCount int, testSpansCount int, normalSpansCount int) { + calculatedFinishedSpans := len(finishedSpans) + fmt.Printf("Number of spans received: %d\n", calculatedFinishedSpans) + if calculatedFinishedSpans < totalFinishedSpansCount { + panic(fmt.Sprintf("expected at least %d finished spans, got %d", totalFinishedSpansCount, calculatedFinishedSpans)) + } + + sessionSpans := getSpansWithType(finishedSpans, constants.SpanTypeTestSession) + calculatedSessionSpans := len(sessionSpans) + fmt.Printf("Number of sessions received: %d\n", calculatedSessionSpans) + showResourcesNameFromSpans(sessionSpans) + if calculatedSessionSpans != sessionSpansCount { + panic(fmt.Sprintf("expected exactly %d session span, got %d", sessionSpansCount, calculatedSessionSpans)) + } + + moduleSpans := getSpansWithType(finishedSpans, constants.SpanTypeTestModule) + calculatedModuleSpans := len(moduleSpans) + fmt.Printf("Number of modules received: %d\n", calculatedModuleSpans) + showResourcesNameFromSpans(moduleSpans) + if calculatedModuleSpans != moduleSpansCount { + panic(fmt.Sprintf("expected exactly %d module span, got %d", moduleSpansCount, calculatedModuleSpans)) + } + + suiteSpans := getSpansWithType(finishedSpans, constants.SpanTypeTestSuite) + calculatedSuiteSpans := len(suiteSpans) + fmt.Printf("Number of suites received: %d\n", calculatedSuiteSpans) + showResourcesNameFromSpans(suiteSpans) + if calculatedSuiteSpans != suiteSpansCount { + panic(fmt.Sprintf("expected exactly %d suite spans, got %d", suiteSpansCount, calculatedSuiteSpans)) + } + + testSpans := getSpansWithType(finishedSpans, constants.SpanTypeTest) + calculatedTestSpans := len(testSpans) + fmt.Printf("Number of tests received: %d\n", calculatedTestSpans) + showResourcesNameFromSpans(testSpans) + if calculatedTestSpans != testSpansCount { + panic(fmt.Sprintf("expected exactly %d test spans, got %d", testSpansCount, calculatedTestSpans)) + } + + normalSpans := getSpansWithType(finishedSpans, ext.SpanTypeHTTP) + calculatedNormalSpans := len(normalSpans) + fmt.Printf("Number of http spans received: %d\n", calculatedNormalSpans) + showResourcesNameFromSpans(normalSpans) + if calculatedNormalSpans != normalSpansCount { + panic(fmt.Sprintf("expected exactly %d normal spans, got %d", normalSpansCount, calculatedNormalSpans)) + } +} + +func checkSpansByResourceName(finishedSpans []mocktracer.Span, resourceName string, count int) []mocktracer.Span { + spans := getSpansWithResourceName(finishedSpans, resourceName) + numOfSpans := len(spans) + if numOfSpans != count { + panic(fmt.Sprintf("expected exactly %d spans with resource name: %s, got %d", count, resourceName, numOfSpans)) + } + + return spans +} + +func checkSpansByTagName(finishedSpans []mocktracer.Span, tagName string, count int) []mocktracer.Span { + spans := getSpansWithTagName(finishedSpans, tagName) + numOfSpans := len(spans) + if numOfSpans != count { + panic(fmt.Sprintf("expected exactly %d spans with tag name: %s, got %d", count, tagName, numOfSpans)) + } + + return spans +} + +func setUpHttpServer(flakyRetriesEnabled bool, earlyFlakyDetectionEnabled bool, earlyFlakyDetectionData *net.EfdResponseData) *httptest.Server { + // mock the settings api to enable automatic test retries + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Printf("MockApi received request: %s\n", r.URL.Path) + + // Settings request + if r.URL.Path == "/api/v2/libraries/tests/services/setting" { + w.Header().Set("Content-Type", "application/json") + response := struct { + Data struct { + ID string `json:"id"` + Type string `json:"type"` + Attributes net.SettingsResponseData `json:"attributes"` + } `json:"data,omitempty"` + }{} + + // let's enable flaky test retries + response.Data.Attributes = net.SettingsResponseData{ + FlakyTestRetriesEnabled: flakyRetriesEnabled, + } + response.Data.Attributes.EarlyFlakeDetection.Enabled = earlyFlakyDetectionEnabled + response.Data.Attributes.EarlyFlakeDetection.SlowTestRetries.FiveS = 10 + response.Data.Attributes.EarlyFlakeDetection.SlowTestRetries.TenS = 5 + response.Data.Attributes.EarlyFlakeDetection.SlowTestRetries.ThirtyS = 3 + response.Data.Attributes.EarlyFlakeDetection.SlowTestRetries.FiveM = 2 + + fmt.Printf("MockApi sending response: %v\n", response) + json.NewEncoder(w).Encode(&response) + } else if earlyFlakyDetectionEnabled && r.URL.Path == "/api/v2/ci/libraries/tests" { + w.Header().Set("Content-Type", "application/json") + response := struct { + Data struct { + ID string `json:"id"` + Type string `json:"type"` + Attributes net.EfdResponseData `json:"attributes"` + } `json:"data,omitempty"` + }{} + + if earlyFlakyDetectionData != nil { + response.Data.Attributes = *earlyFlakyDetectionData + } + + fmt.Printf("MockApi sending response: %v\n", response) + json.NewEncoder(w).Encode(&response) + } else { + http.NotFound(w, r) + } + })) + + // set the custom agentless url and the flaky retry count env-var + fmt.Printf("Using mockapi at: %s\n", server.URL) + os.Setenv(constants.CIVisibilityAgentlessEnabledEnvironmentVariable, "1") + os.Setenv(constants.CIVisibilityAgentlessURLEnvironmentVariable, server.URL) + os.Setenv(constants.APIKeyEnvironmentVariable, "12345") + + return server +} + +func getSpansWithType(spans []mocktracer.Span, spanType string) []mocktracer.Span { + var result []mocktracer.Span + for _, span := range spans { + if span.Tag(ext.SpanType) == spanType { + result = append(result, span) + } + } + + return result +} + +func getSpansWithResourceName(spans []mocktracer.Span, resourceName string) []mocktracer.Span { + var result []mocktracer.Span + for _, span := range spans { + if span.Tag(ext.ResourceName) == resourceName { + result = append(result, span) + } + } + + return result +} + +func getSpansWithTagName(spans []mocktracer.Span, tag string) []mocktracer.Span { + var result []mocktracer.Span + for _, span := range spans { + if span.Tag(tag) != nil { + result = append(result, span) + } + } + + return result +} + +func showResourcesNameFromSpans(spans []mocktracer.Span) { + for i, span := range spans { + fmt.Printf(" [%d] = %v\n", i, span.Tag(ext.ResourceName)) + } +} diff --git a/internal/civisibility/integrations/gotesting/testing.go b/internal/civisibility/integrations/gotesting/testing.go index ac6092645c..4fa6443647 100644 --- a/internal/civisibility/integrations/gotesting/testing.go +++ b/internal/civisibility/integrations/gotesting/testing.go @@ -147,13 +147,27 @@ func (ddm *M) executeInternalTest(testInfo *testingTInfo) func(*testing.T) { // Set the CI Visibility test to the execution metadata execMeta.test = test + // If the execution is for a new test we tag the test event from early flake detection + if execMeta.isANewTest { + // Set the is new test tag + test.SetTag(constants.TestIsNew, "true") + } + // If the execution is a retry we tag the test event if execMeta.isARetry { // Set the retry tag test.SetTag(constants.TestIsRetry, "true") } + startTime := time.Now() defer func() { + duration := time.Since(startTime) + // check if is a new EFD test and the duration >= 5 min + if execMeta.isANewTest && duration.Minutes() >= 5 { + // Set the EFD retry abort reason + test.SetTag(constants.TestEarlyFlakeDetectionRetryAborted, "slow") + } + if r := recover(); r != nil { // Handle panic and set error information. execMeta.panicData = r @@ -198,7 +212,7 @@ func (ddm *M) executeInternalTest(testInfo *testingTInfo) func(*testing.T) { setInstrumentationMetadata(runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(instrumentedFunc)).Pointer()), &instrumentationMetadata{IsInternal: true}) // Get the additional feature wrapper - return applyAdditionalFeaturesToTestFunc(instrumentedFunc) + return applyAdditionalFeaturesToTestFunc(instrumentedFunc, &testInfo.commonInfo) } // instrumentInternalBenchmarks instruments the internal benchmarks for CI visibility. diff --git a/internal/civisibility/integrations/gotesting/testing_test.go b/internal/civisibility/integrations/gotesting/testing_test.go index 8d655719aa..aff2263ed8 100644 --- a/internal/civisibility/integrations/gotesting/testing_test.go +++ b/internal/civisibility/integrations/gotesting/testing_test.go @@ -6,14 +6,11 @@ package gotesting import ( - "encoding/json" "fmt" "net/http" "net/http/httptest" - "os" "runtime" "slices" - "strconv" "testing" ddhttp "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http" @@ -21,122 +18,10 @@ import ( "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer" ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants" - "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations" - "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/net" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" "github.com/stretchr/testify/assert" ) -var currentM *testing.M -var mTracer mocktracer.Tracer - -// TestMain is the entry point for testing and runs before any test. -func TestMain(m *testing.M) { - - log.SetLevel(log.LevelDebug) - - // mock the settings api to enable automatic test retries - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Printf("MockApi received request: %s\n", r.URL.Path) - - // Settings request - if r.URL.Path == "/api/v2/libraries/tests/services/setting" { - w.Header().Set("Content-Type", "application/json") - response := struct { - Data struct { - ID string `json:"id"` - Type string `json:"type"` - Attributes net.SettingsResponseData `json:"attributes"` - } `json:"data,omitempty"` - }{} - - // let's enable flaky test retries - response.Data.Attributes = net.SettingsResponseData{ - FlakyTestRetriesEnabled: true, - } - - fmt.Printf("MockApi sending response: %v\n", response) - json.NewEncoder(w).Encode(&response) - } - })) - defer server.Close() - - // set the custom agentless url and the flaky retry count env-var - fmt.Printf("Using mockapi at: %s\n", server.URL) - os.Setenv(constants.CIVisibilityAgentlessEnabledEnvironmentVariable, "1") - os.Setenv(constants.CIVisibilityAgentlessURLEnvironmentVariable, server.URL) - os.Setenv(constants.APIKeyEnvironmentVariable, "12345") - os.Setenv(constants.CIVisibilityFlakyRetryCountEnvironmentVariable, "10") - - // initialize the mock tracer for doing assertions on the finished spans - currentM = m - mTracer = integrations.InitializeCIVisibilityMock() - - // execute the tests, because we are expecting some tests to fail and check the assertion later - // we don't store the exit code from the test runner - exitCode := RunM(m) - if exitCode != 1 { - panic("expected the exit code to be 1. We have a failing test on purpose.") - } - - // get all finished spans - finishedSpans := mTracer.FinishedSpans() - - // 1 session span - // 1 module span - // 2 suite span (testing_test.go and reflections_test.go) - // 6 tests spans from testing_test.go - // 7 sub stest spans from testing_test.go - // 1 TestRetryWithPanic + 3 retry tests from testing_test.go - // 1 TestRetryWithFail + 3 retry tests from testing_test.go - // 1 TestRetryAlwaysFail + 10 retry tests from testing_test.go - // 2 normal spans from testing_test.go - // 5 tests from reflections_test.go - // 2 benchmark spans (optional - require the -bench option) - fmt.Printf("Number of spans received: %d\n", len(finishedSpans)) - if len(finishedSpans) < 37 { - panic("expected at least 37 finished spans, got " + strconv.Itoa(len(finishedSpans))) - } - - sessionSpans := getSpansWithType(finishedSpans, constants.SpanTypeTestSession) - fmt.Printf("Number of sessions received: %d\n", len(sessionSpans)) - showResourcesNameFromSpans(sessionSpans) - if len(sessionSpans) != 1 { - panic("expected exactly 1 session span, got " + strconv.Itoa(len(sessionSpans))) - } - - moduleSpans := getSpansWithType(finishedSpans, constants.SpanTypeTestModule) - fmt.Printf("Number of modules received: %d\n", len(moduleSpans)) - showResourcesNameFromSpans(moduleSpans) - if len(moduleSpans) != 1 { - panic("expected exactly 1 module span, got " + strconv.Itoa(len(moduleSpans))) - } - - suiteSpans := getSpansWithType(finishedSpans, constants.SpanTypeTestSuite) - fmt.Printf("Number of suites received: %d\n", len(suiteSpans)) - showResourcesNameFromSpans(suiteSpans) - if len(suiteSpans) != 2 { - panic("expected exactly 2 suite spans, got " + strconv.Itoa(len(suiteSpans))) - } - - testSpans := getSpansWithType(finishedSpans, constants.SpanTypeTest) - fmt.Printf("Number of tests received: %d\n", len(testSpans)) - showResourcesNameFromSpans(testSpans) - if len(testSpans) != 37 { - panic("expected exactly 37 test spans, got " + strconv.Itoa(len(testSpans))) - } - - httpSpans := getSpansWithType(finishedSpans, ext.SpanTypeHTTP) - fmt.Printf("Number of http spans received: %d\n", len(httpSpans)) - showResourcesNameFromSpans(httpSpans) - if len(httpSpans) != 2 { - panic("expected exactly 2 normal spans, got " + strconv.Itoa(len(httpSpans))) - } - - os.Exit(0) -} - // TestMyTest01 demonstrates instrumentation of InternalTests func TestMyTest01(t *testing.T) { assertTest(t) @@ -314,11 +199,23 @@ func TestRetryWithFail(t *testing.T) { func TestRetryAlwaysFail(t *testing.T) { t.Parallel() - t.Fatal("Always fail") + t.Fatal("Always fail to test the auto retries feature") } func TestNormalPassingAfterRetryAlwaysFail(t *testing.T) {} +var run int + +func TestEarlyFlakeDetection(t *testing.T) { + run++ + fmt.Printf(" Run: %d", run) + if run%2 == 0 { + fmt.Println(" Failed") + t.FailNow() + } + fmt.Println(" Passed") +} + // BenchmarkFirst demonstrates benchmark instrumentation with sub-benchmarks. func BenchmarkFirst(gb *testing.B) { @@ -451,20 +348,3 @@ func assertCommon(assert *assert.Assertions, span mocktracer.Span) { } assert.Contains(spanTags, constants.CIWorkspacePath) } - -func getSpansWithType(spans []mocktracer.Span, spanType string) []mocktracer.Span { - var result []mocktracer.Span - for _, span := range spans { - if span.Tag(ext.SpanType) == spanType { - result = append(result, span) - } - } - - return result -} - -func showResourcesNameFromSpans(spans []mocktracer.Span) { - for i, span := range spans { - fmt.Printf(" [%d] = %v\n", i, span.Tag(ext.ResourceName)) - } -} diff --git a/internal/civisibility/integrations/manual_api_ddtest.go b/internal/civisibility/integrations/manual_api_ddtest.go index 834929fa3d..5ca025a783 100644 --- a/internal/civisibility/integrations/manual_api_ddtest.go +++ b/internal/civisibility/integrations/manual_api_ddtest.go @@ -183,7 +183,7 @@ func (t *tslvTest) SetTestFunc(fn *runtime.Func) { }) // if we found an endLine we check is greater than the calculated startLine - if endLine > startLine { + if endLine >= startLine { t.SetTag(constants.TestSourceEndLine, endLine) } }