diff --git a/api/server_test.go b/api/server_test.go index 2e7df53baee..991efd41532 100644 --- a/api/server_test.go +++ b/api/server_test.go @@ -81,10 +81,19 @@ func TestWithEngine(t *testing.T) { logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, builtinMetrics, logger) + testState := &lib.TestRunState{ + TestPreInitState: &lib.TestPreInitState{ + Logger: logger, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + }, + Options: lib.Options{}, + Runner: &minirunner.MiniRunner{}, + } + + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) + engine, err := core.NewEngine(testState, execScheduler, nil) require.NoError(t, err) rw := httptest.NewRecorder() diff --git a/api/v1/group_routes_test.go b/api/v1/group_routes_test.go index f5765bbaa5c..68b2f6e747d 100644 --- a/api/v1/group_routes_test.go +++ b/api/v1/group_routes_test.go @@ -26,7 +26,6 @@ import ( "net/http/httptest" "testing" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -38,6 +37,25 @@ import ( "go.k6.io/k6/metrics" ) +func getTestPreInitState(tb testing.TB) *lib.TestPreInitState { + reg := metrics.NewRegistry() + return &lib.TestPreInitState{ + Logger: testutils.NewLogger(tb), + RuntimeOptions: lib.RuntimeOptions{}, + Registry: reg, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(reg), + } +} + +func getTestRunState(tb testing.TB, options lib.Options, runner lib.Runner) *lib.TestRunState { + require.NoError(tb, runner.SetOptions(runner.GetOptions().Apply(options))) + return &lib.TestRunState{ + TestPreInitState: getTestPreInitState(tb), + Options: options, + Runner: runner, + } +} + func TestGetGroups(t *testing.T) { g0, err := lib.NewGroup("", nil) assert.NoError(t, err) @@ -46,14 +64,10 @@ func TestGetGroups(t *testing.T) { g2, err := g1.Group("group 2") assert.NoError(t, err) - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Group: g0}, builtinMetrics, logger) + testState := getTestRunState(t, lib.Options{}, &minirunner.MiniRunner{Group: g0}) + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) + engine, err := core.NewEngine(testState, execScheduler, nil) require.NoError(t, err) t.Run("list", func(t *testing.T) { diff --git a/api/v1/metric_routes_test.go b/api/v1/metric_routes_test.go index 9e5aa2d728f..c071e1ffc89 100644 --- a/api/v1/metric_routes_test.go +++ b/api/v1/metric_routes_test.go @@ -26,7 +26,6 @@ import ( "net/http/httptest" "testing" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v3" @@ -34,7 +33,6 @@ import ( "go.k6.io/k6/core" "go.k6.io/k6/core/local" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/metrics" ) @@ -42,15 +40,12 @@ import ( func TestGetMetrics(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - testMetric, err := registry.NewMetric("my_metric", metrics.Trend, metrics.Time) + testState := getTestRunState(t, lib.Options{}, &minirunner.MiniRunner{}) + testMetric, err := testState.Registry.NewMetric("my_metric", metrics.Trend, metrics.Time) require.NoError(t, err) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, builtinMetrics, logger) + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) + engine, err := core.NewEngine(testState, execScheduler, nil) require.NoError(t, err) engine.MetricsEngine.ObservedMetrics = map[string]*metrics.Metric{ @@ -104,15 +99,12 @@ func TestGetMetrics(t *testing.T) { func TestGetMetric(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - testMetric, err := registry.NewMetric("my_metric", metrics.Trend, metrics.Time) + testState := getTestRunState(t, lib.Options{}, &minirunner.MiniRunner{}) + testMetric, err := testState.Registry.NewMetric("my_metric", metrics.Trend, metrics.Time) require.NoError(t, err) - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, builtinMetrics, logger) + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) + engine, err := core.NewEngine(testState, execScheduler, nil) require.NoError(t, err) engine.MetricsEngine.ObservedMetrics = map[string]*metrics.Metric{ diff --git a/api/v1/setup_teardown_routes_test.go b/api/v1/setup_teardown_routes_test.go index a202125a762..020acfa1238 100644 --- a/api/v1/setup_teardown_routes_test.go +++ b/api/v1/setup_teardown_routes_test.go @@ -31,7 +31,6 @@ import ( "testing" "time" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v3" @@ -40,10 +39,8 @@ import ( "go.k6.io/k6/core/local" "go.k6.io/k6/js" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/types" "go.k6.io/k6/loader" - "go.k6.io/k6/metrics" ) func TestSetupData(t *testing.T) { @@ -140,32 +137,28 @@ func TestSetupData(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - + piState := getTestPreInitState(t) runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, - &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: testCase.script}, - nil, + piState, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: testCase.script}, nil, ) require.NoError(t, err) - runner.SetOptions(lib.Options{ + require.NoError(t, runner.SetOptions(lib.Options{ Paused: null.BoolFrom(true), VUs: null.IntFrom(2), Iterations: null.IntFrom(3), NoSetup: null.BoolFrom(true), SetupTimeout: types.NullDurationFrom(5 * time.Second), TeardownTimeout: types.NullDurationFrom(5 * time.Second), - }) - execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, logger) + })) + testState := &lib.TestRunState{ + TestPreInitState: piState, + Options: runner.GetOptions(), + Runner: runner, + } + + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, runner.GetOptions(), lib.RuntimeOptions{}, nil, logger, registry) + engine, err := core.NewEngine(testState, execScheduler, nil) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) diff --git a/api/v1/status_routes_test.go b/api/v1/status_routes_test.go index 70b530646c8..8161848b924 100644 --- a/api/v1/status_routes_test.go +++ b/api/v1/status_routes_test.go @@ -29,7 +29,6 @@ import ( "testing" "time" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v3" @@ -37,21 +36,16 @@ import ( "go.k6.io/k6/core" "go.k6.io/k6/core/local" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/minirunner" - "go.k6.io/k6/metrics" ) func TestGetStatus(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, builtinMetrics, logger) + testState := getTestRunState(t, lib.Options{}, &minirunner.MiniRunner{}) + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) + engine, err := core.NewEngine(testState, execScheduler, nil) require.NoError(t, err) rw := httptest.NewRecorder() @@ -128,21 +122,17 @@ func TestPatchStatus(t *testing.T) { for name, testCase := range testData { t.Run(name, func(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) scenarios := lib.ScenarioConfigs{} err := json.Unmarshal([]byte(` {"external": {"executor": "externally-controlled", "vus": 0, "maxVUs": 10, "duration": "0"}}`), &scenarios) require.NoError(t, err) - options := lib.Options{Scenarios: scenarios} - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Options: options}, builtinMetrics, logger) + testState := getTestRunState(t, lib.Options{Scenarios: scenarios}, &minirunner.MiniRunner{}) + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, options, lib.RuntimeOptions{}, nil, logger, registry) + engine, err := core.NewEngine(testState, execScheduler, nil) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) diff --git a/cmd/archive.go b/cmd/archive.go index 3cc4fe07c7c..9fdee894ae1 100644 --- a/cmd/archive.go +++ b/cmd/archive.go @@ -43,13 +43,13 @@ func (c *cmdArchive) run(cmd *cobra.Command, args []string) error { // an execution shortcut option (e.g. `iterations` or `duration`), // we will have multiple conflicting execution options since the // derivation will set `scenarios` as well. - err = test.initRunner.SetOptions(test.consolidatedConfig.Options) + testRunState, err := test.buildTestRunState(test.consolidatedConfig.Options) if err != nil { return err } // Archive. - arc := test.initRunner.MakeArchive() + arc := testRunState.Runner.MakeArchive() f, err := c.gs.fs.Create(c.archiveOut) if err != nil { return err diff --git a/cmd/cloud.go b/cmd/cloud.go index 3d272b09a41..a3171b2a332 100644 --- a/cmd/cloud.go +++ b/cmd/cloud.go @@ -102,7 +102,7 @@ func (c *cmdCloud) run(cmd *cobra.Command, args []string) error { // an execution shortcut option (e.g. `iterations` or `duration`), // we will have multiple conflicting execution options since the // derivation will set `scenarios` as well. - err = test.initRunner.SetOptions(test.consolidatedConfig.Options) + testRunState, err := test.buildTestRunState(test.consolidatedConfig.Options) if err != nil { return err } @@ -112,7 +112,7 @@ func (c *cmdCloud) run(cmd *cobra.Command, args []string) error { // TODO: move those validations to a separate function and reuse validateConfig()? modifyAndPrintBar(c.gs, progressBar, pb.WithConstProgress(0, "Building the archive...")) - arc := test.initRunner.MakeArchive() + arc := testRunState.Runner.MakeArchive() // TODO: Fix this // We reuse cloud.Config for parsing options.ext.loadimpact, but this probably shouldn't be diff --git a/cmd/inspect.go b/cmd/inspect.go index 916e8b788e2..e3533392f60 100644 --- a/cmd/inspect.go +++ b/cmd/inspect.go @@ -40,7 +40,7 @@ func getCmdInspect(gs *globalState) *cobra.Command { Long: `Inspect a script or archive.`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - test, err := loadAndConfigureTest(gs, cmd, args, nil) + test, err := loadTest(gs, cmd, args) if err != nil { return err } @@ -82,16 +82,20 @@ func getCmdInspect(gs *globalState) *cobra.Command { // derive the value of `scenarios` and calculate the max test duration and VUs. func inspectOutputWithExecRequirements(gs *globalState, cmd *cobra.Command, test *loadedTest) (interface{}, error) { // we don't actually support CLI flags here, so we pass nil as the getter - if err := test.consolidateDeriveAndValidateConfig(gs, cmd, nil); err != nil { + configuredTest, err := test.consolidateDeriveAndValidateConfig(gs, cmd, nil) + if err != nil { return nil, err } - et, err := lib.NewExecutionTuple(test.derivedConfig.ExecutionSegment, test.derivedConfig.ExecutionSegmentSequence) + et, err := lib.NewExecutionTuple( + configuredTest.derivedConfig.ExecutionSegment, + configuredTest.derivedConfig.ExecutionSegmentSequence, + ) if err != nil { return nil, err } - executionPlan := test.derivedConfig.Scenarios.GetFullExecutionRequirements(et) + executionPlan := configuredTest.derivedConfig.Scenarios.GetFullExecutionRequirements(et) duration, _ := lib.GetEndOffset(executionPlan) return struct { @@ -99,7 +103,7 @@ func inspectOutputWithExecRequirements(gs *globalState, cmd *cobra.Command, test TotalDuration types.NullDuration `json:"totalDuration"` MaxVUs uint64 `json:"maxVUs"` }{ - test.derivedConfig.Options, + configuredTest.derivedConfig.Options, types.NewNullDuration(duration, true), lib.GetMaxPossibleVUs(executionPlan), }, nil diff --git a/cmd/integration_tests/eventloop/eventloop_test.go b/cmd/integration_tests/eventloop/eventloop_test.go index 3fa440f0cb6..df21c9ba02c 100644 --- a/cmd/integration_tests/eventloop/eventloop_test.go +++ b/cmd/integration_tests/eventloop/eventloop_test.go @@ -16,7 +16,6 @@ import ( "go.k6.io/k6/lib" "go.k6.io/k6/lib/executor" "go.k6.io/k6/lib/testutils" - "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" "go.k6.io/k6/loader" "go.k6.io/k6/metrics" @@ -29,30 +28,49 @@ func eventLoopTest(t *testing.T, script []byte, testHandle func(context.Context, logHook := &testutils.SimpleLogrusHook{HookedLevels: []logrus.Level{logrus.InfoLevel, logrus.WarnLevel, logrus.ErrorLevel}} logger.AddHook(logHook) - script = []byte(`import {setTimeout} from "k6/x/events"; - ` + string(script)) registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, - &loader.SourceData{ - URL: &url.URL{Path: "/script.js"}, - Data: script, - }, - nil, - ) + piState := &lib.TestPreInitState{ + Logger: logger, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + } + + script = []byte("import {setTimeout} from 'k6/x/events';\n" + string(script)) + runner, err := js.New(piState, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) require.NoError(t, err) - ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, logger, - lib.Options{ - TeardownTimeout: types.NullDurationFrom(time.Second), - SetupTimeout: types.NullDurationFrom(time.Second), - }, builtinMetrics) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() + newOpts, err := executor.DeriveScenariosFromShortcuts(lib.Options{ + MetricSamplesBufferSize: null.NewInt(200, false), + TeardownTimeout: types.NullDurationFrom(time.Second), + SetupTimeout: types.NullDurationFrom(time.Second), + }.Apply(runner.GetOptions()), nil) + require.NoError(t, err) + require.Empty(t, newOpts.Validate()) + require.NoError(t, runner.SetOptions(newOpts)) + + testState := &lib.TestRunState{ + TestPreInitState: piState, + Options: newOpts, + Runner: runner, + } + + execScheduler, err := local.NewExecutionScheduler(testState) + require.NoError(t, err) + + samples := make(chan metrics.SampleContainer, newOpts.MetricSamplesBufferSize.Int64) + go func() { + for { + select { + case <-samples: + case <-ctx.Done(): + return + } + } + }() + + require.NoError(t, execScheduler.Init(ctx, samples)) errCh := make(chan error, 1) go func() { errCh <- execScheduler.Run(ctx, ctx, samples) }() @@ -198,42 +216,3 @@ export default function() { }, msgs) }) } - -func newTestExecutionScheduler( - t *testing.T, runner lib.Runner, logger *logrus.Logger, opts lib.Options, builtinMetrics *metrics.BuiltinMetrics, -) (ctx context.Context, cancel func(), execScheduler *local.ExecutionScheduler, samples chan metrics.SampleContainer) { - if runner == nil { - runner = &minirunner.MiniRunner{} - } - ctx, cancel = context.WithCancel(context.Background()) - newOpts, err := executor.DeriveScenariosFromShortcuts(lib.Options{ - MetricSamplesBufferSize: null.NewInt(200, false), - }.Apply(runner.GetOptions()).Apply(opts), nil) - require.NoError(t, err) - require.Empty(t, newOpts.Validate()) - - require.NoError(t, runner.SetOptions(newOpts)) - - if logger == nil { - logger = logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - } - - execScheduler, err = local.NewExecutionScheduler(runner, builtinMetrics, logger) - require.NoError(t, err) - - samples = make(chan metrics.SampleContainer, newOpts.MetricSamplesBufferSize.Int64) - go func() { - for { - select { - case <-samples: - case <-ctx.Done(): - return - } - } - }() - - require.NoError(t, execScheduler.Init(ctx, samples)) - - return ctx, cancel, execScheduler, samples -} diff --git a/cmd/outputs.go b/cmd/outputs.go index 08893380f4f..ffb73e6d85a 100644 --- a/cmd/outputs.go +++ b/cmd/outputs.go @@ -77,7 +77,9 @@ func getPossibleIDList(constrs map[string]func(output.Params) (output.Output, er return strings.Join(res, ", ") } -func createOutputs(gs *globalState, test *loadedTest, executionPlan []lib.ExecutionStep) ([]output.Output, error) { +func createOutputs( + gs *globalState, test *loadedAndConfiguredTest, executionPlan []lib.ExecutionStep, +) ([]output.Output, error) { outputConstructors, err := getAllOutputConstructors() if err != nil { return nil, err @@ -90,7 +92,7 @@ func createOutputs(gs *globalState, test *loadedTest, executionPlan []lib.Execut StdErr: gs.stdErr, FS: gs.fs, ScriptOptions: test.derivedConfig.Options, - RuntimeOptions: test.runtimeOptions, + RuntimeOptions: test.preInitState.RuntimeOptions, ExecutionPlan: executionPlan, } result := make([]output.Output, 0, len(test.derivedConfig.Out)) @@ -120,7 +122,7 @@ func createOutputs(gs *globalState, test *loadedTest, executionPlan []lib.Execut } if builtinMetricOut, ok := out.(output.WithBuiltinMetrics); ok { - builtinMetricOut.SetBuiltinMetrics(test.builtInMetrics) + builtinMetricOut.SetBuiltinMetrics(test.preInitState.BuiltinMetrics) } result = append(result, out) diff --git a/cmd/run.go b/cmd/run.go index 652de3717e4..ddaffd2e65d 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -65,7 +65,8 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { // Write the full consolidated *and derived* options back to the Runner. conf := test.derivedConfig - if err = test.initRunner.SetOptions(conf.Options); err != nil { + testRunState, err := test.buildTestRunState(conf.Options) + if err != nil { return err } @@ -86,10 +87,10 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { runCtx, runCancel := context.WithCancel(lingerCtx) defer runCancel() - logger := c.gs.logger + logger := testRunState.Logger // Create a local execution scheduler wrapping the runner. logger.Debug("Initializing the execution scheduler...") - execScheduler, err := local.NewExecutionScheduler(test.initRunner, test.builtInMetrics, logger) + execScheduler, err := local.NewExecutionScheduler(testRunState) if err != nil { return err } @@ -125,10 +126,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { // TODO: remove this completely // Create the engine. initBar.Modify(pb.WithConstProgress(0, "Init engine")) - engine, err := core.NewEngine( - execScheduler, conf.Options, test.runtimeOptions, - outputs, logger, test.metricsRegistry, - ) + engine, err := core.NewEngine(testRunState, execScheduler, outputs) if err != nil { return err } @@ -230,7 +228,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { } // Handle the end-of-test summary. - if !test.runtimeOptions.NoSummary.Bool { + if !testRunState.RuntimeOptions.NoSummary.Bool { engine.MetricsEngine.MetricsLock.Lock() // TODO: refactor so this is not needed summaryResult, err := test.initRunner.HandleSummary(globalCtx, &lib.Summary{ Metrics: engine.MetricsEngine.ObservedMetrics, @@ -268,8 +266,8 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { logger.Debug("Waiting for engine processes to finish...") engineWait() logger.Debug("Everything has finished, exiting k6!") - if test.keywriter != nil { - if err := test.keywriter.Close(); err != nil { + if test.keyLogger != nil { + if err := test.keyLogger.Close(); err != nil { logger.WithError(err).Warn("Error while closing the SSLKEYLOGFILE") } } diff --git a/cmd/runtime_options_test.go b/cmd/runtime_options_test.go index a5d642fecbf..8130da41e43 100644 --- a/cmd/runtime_options_test.go +++ b/cmd/runtime_options_test.go @@ -81,12 +81,15 @@ func testRuntimeOptionsCase(t *testing.T, tc runtimeOptionsTestCase) { ts := newGlobalTestState(t) // TODO: move upwards, make this into an almost full integration test registry := metrics.NewRegistry() test := &loadedTest{ - sourceRootPath: "script.js", - source: &loader.SourceData{Data: jsCode.Bytes(), URL: &url.URL{Path: "/script.js", Scheme: "file"}}, - fileSystems: map[string]afero.Fs{"file": fs}, - runtimeOptions: rtOpts, - metricsRegistry: registry, - builtInMetrics: metrics.RegisterBuiltinMetrics(registry), + sourceRootPath: "script.js", + source: &loader.SourceData{Data: jsCode.Bytes(), URL: &url.URL{Path: "/script.js", Scheme: "file"}}, + fileSystems: map[string]afero.Fs{"file": fs}, + preInitState: &lib.TestPreInitState{ + Logger: ts.logger, + RuntimeOptions: rtOpts, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + }, } require.NoError(t, test.initializeFirstRunner(ts.globalState)) @@ -97,12 +100,15 @@ func testRuntimeOptionsCase(t *testing.T, tc runtimeOptionsTestCase) { getRunnerErr := func(rtOpts lib.RuntimeOptions) *loadedTest { return &loadedTest{ - sourceRootPath: "script.tar", - source: &loader.SourceData{Data: archiveBuf.Bytes(), URL: &url.URL{Path: "/script.tar", Scheme: "file"}}, - fileSystems: map[string]afero.Fs{"file": fs}, - runtimeOptions: rtOpts, - metricsRegistry: registry, - builtInMetrics: metrics.RegisterBuiltinMetrics(registry), + sourceRootPath: "script.tar", + source: &loader.SourceData{Data: archiveBuf.Bytes(), URL: &url.URL{Path: "/script.tar", Scheme: "file"}}, + fileSystems: map[string]afero.Fs{"file": fs}, + preInitState: &lib.TestPreInitState{ + Logger: ts.logger, + RuntimeOptions: rtOpts, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + }, } } diff --git a/cmd/test_load.go b/cmd/test_load.go index 6bc39d10239..0b2c2d3abb4 100644 --- a/cmd/test_load.go +++ b/cmd/test_load.go @@ -25,31 +25,20 @@ const ( testTypeArchive = "archive" ) -// loadedTest contains all of data, details and dependencies of a fully-loaded -// and configured k6 test. +// loadedTest contains all of data, details and dependencies of a loaded +// k6 test, but without any config consolidation. type loadedTest struct { - sourceRootPath string // contains the raw string the user supplied - pwd string - source *loader.SourceData - fs afero.Fs - fileSystems map[string]afero.Fs - runtimeOptions lib.RuntimeOptions - metricsRegistry *metrics.Registry - builtInMetrics *metrics.BuiltinMetrics - initRunner lib.Runner // TODO: rename to something more appropriate - keywriter io.Closer - - // Only set if cliConfigGetter is supplied to loadAndConfigureTest() or if - // consolidateDeriveAndValidateConfig() is manually called. - consolidatedConfig Config - derivedConfig Config + sourceRootPath string // contains the raw string the user supplied + pwd string + source *loader.SourceData + fs afero.Fs + fileSystems map[string]afero.Fs + preInitState *lib.TestPreInitState + initRunner lib.Runner // TODO: rename to something more appropriate + keyLogger io.Closer } -func loadAndConfigureTest( - gs *globalState, cmd *cobra.Command, args []string, - // supply this if you want the test config consolidated and validated - cliConfigGetter func(flags *pflag.FlagSet) (Config, error), // TODO: obviate -) (*loadedTest, error) { +func loadTest(gs *globalState, cmd *cobra.Command, args []string) (*loadedTest, error) { if len(args) < 1 { return nil, fmt.Errorf("k6 needs at least one argument to load the test") } @@ -73,15 +62,20 @@ func loadAndConfigureTest( } registry := metrics.NewRegistry() + state := &lib.TestPreInitState{ + Logger: gs.logger, + RuntimeOptions: runtimeOptions, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + } + test := &loadedTest{ - pwd: pwd, - sourceRootPath: sourceRootPath, - source: src, - fs: gs.fs, - fileSystems: fileSystems, - runtimeOptions: runtimeOptions, - metricsRegistry: registry, - builtInMetrics: metrics.RegisterBuiltinMetrics(registry), + pwd: pwd, + sourceRootPath: sourceRootPath, + source: src, + fs: gs.fs, + fileSystems: fileSystems, + preInitState: state, } gs.logger.Debugf("Initializing k6 runner for '%s' (%s)...", sourceRootPath, resolvedPath) @@ -89,13 +83,6 @@ func loadAndConfigureTest( return nil, fmt.Errorf("could not initialize '%s': %w", sourceRootPath, err) } gs.logger.Debug("Runner successfully initialized!") - - if cliConfigGetter != nil { - if err := test.consolidateDeriveAndValidateConfig(gs, cmd, cliConfigGetter); err != nil { - return nil, err - } - } - return test, nil } @@ -103,22 +90,16 @@ func (lt *loadedTest) initializeFirstRunner(gs *globalState) error { testPath := lt.source.URL.String() logger := gs.logger.WithField("test_path", testPath) - testType := lt.runtimeOptions.TestType.String + testType := lt.preInitState.RuntimeOptions.TestType.String if testType == "" { logger.Debug("Detecting test type for...") testType = detectTestType(lt.source.Data) } - state := &lib.RuntimeState{ - Logger: gs.logger, - RuntimeOptions: lt.runtimeOptions, - BuiltinMetrics: lt.builtInMetrics, - Registry: lt.metricsRegistry, - } - if lt.runtimeOptions.KeyWriter.Valid { + if lt.preInitState.RuntimeOptions.KeyWriter.Valid { logger.Warnf("SSLKEYLOGFILE was specified, logging TLS connection keys to '%s'...", - lt.runtimeOptions.KeyWriter.String) - keylogFilename := lt.runtimeOptions.KeyWriter.String + lt.preInitState.RuntimeOptions.KeyWriter.String) + keylogFilename := lt.preInitState.RuntimeOptions.KeyWriter.String // if path is absolute - no point doing anything if !filepath.IsAbs(keylogFilename) { // filepath.Abs could be used but it will get the pwd from `os` package instead of what is in lt.pwd @@ -129,13 +110,13 @@ func (lt *loadedTest) initializeFirstRunner(gs *globalState) error { if err != nil { return fmt.Errorf("couldn't get absolute path for keylog file: %w", err) } - lt.keywriter = f - state.KeyLogger = &syncWriter{w: f} + lt.keyLogger = f + lt.preInitState.KeyLogger = &syncWriter{w: f} } switch testType { case testTypeJS: logger.Debug("Trying to load as a JS test...") - runner, err := js.New(state, lt.source, lt.fileSystems) + runner, err := js.New(lt.preInitState, lt.source, lt.fileSystems) // TODO: should we use common.UnwrapGojaInterruptedError() here? if err != nil { return fmt.Errorf("could not load JS test '%s': %w", testPath, err) @@ -156,7 +137,7 @@ func (lt *loadedTest) initializeFirstRunner(gs *globalState) error { switch arc.Type { case testTypeJS: logger.Debug("Evaluating JS from archive bundle...") - lt.initRunner, err = js.NewFromArchive(state, arc) + lt.initRunner, err = js.NewFromArchive(lt.preInitState, arc) if err != nil { return fmt.Errorf("could not load JS from test archive bundle '%s': %w", testPath, err) } @@ -192,50 +173,88 @@ func detectTestType(data []byte) string { func (lt *loadedTest) consolidateDeriveAndValidateConfig( gs *globalState, cmd *cobra.Command, cliConfGetter func(flags *pflag.FlagSet) (Config, error), // TODO: obviate -) error { +) (*loadedAndConfiguredTest, error) { var cliConfig Config if cliConfGetter != nil { gs.logger.Debug("Parsing CLI flags...") var err error cliConfig, err = cliConfGetter(cmd.Flags()) if err != nil { - return err + return nil, err } } gs.logger.Debug("Consolidating config layers...") consolidatedConfig, err := getConsolidatedConfig(gs, cliConfig, lt.initRunner.GetOptions()) if err != nil { - return err + return nil, err } gs.logger.Debug("Parsing thresholds and validating config...") // Parse the thresholds, only if the --no-threshold flag is not set. // If parsing the threshold expressions failed, consider it as an // invalid configuration error. - if !lt.runtimeOptions.NoThresholds.Bool { + if !lt.preInitState.RuntimeOptions.NoThresholds.Bool { for metricName, thresholdsDefinition := range consolidatedConfig.Options.Thresholds { err = thresholdsDefinition.Parse() if err != nil { - return errext.WithExitCodeIfNone(err, exitcodes.InvalidConfig) + return nil, errext.WithExitCodeIfNone(err, exitcodes.InvalidConfig) } - err = thresholdsDefinition.Validate(metricName, lt.metricsRegistry) + err = thresholdsDefinition.Validate(metricName, lt.preInitState.Registry) if err != nil { - return errext.WithExitCodeIfNone(err, exitcodes.InvalidConfig) + return nil, errext.WithExitCodeIfNone(err, exitcodes.InvalidConfig) } } } derivedConfig, err := deriveAndValidateConfig(consolidatedConfig, lt.initRunner.IsExecutable, gs.logger) if err != nil { - return err + return nil, err + } + + return &loadedAndConfiguredTest{ + loadedTest: lt, + consolidatedConfig: consolidatedConfig, + derivedConfig: derivedConfig, + }, nil +} + +// loadedAndConfiguredTest contains the whole loadedTest, as well as the +// consolidated test config and the full test run state. +type loadedAndConfiguredTest struct { + *loadedTest + consolidatedConfig Config + derivedConfig Config +} + +func loadAndConfigureTest( + gs *globalState, cmd *cobra.Command, args []string, + cliConfigGetter func(flags *pflag.FlagSet) (Config, error), +) (*loadedAndConfiguredTest, error) { + test, err := loadTest(gs, cmd, args) + if err != nil { + return nil, err + } + + return test.consolidateDeriveAndValidateConfig(gs, cmd, cliConfigGetter) +} + +func (lct *loadedAndConfiguredTest) buildTestRunState( + configToReinject lib.Options, +) (*lib.TestRunState, error) { + // This might be the full derived or just the consodlidated options + if err := lct.initRunner.SetOptions(configToReinject); err != nil { + return nil, err } - lt.consolidatedConfig = consolidatedConfig - lt.derivedConfig = derivedConfig + // TODO: init atlas root node, etc. - return nil + return &lib.TestRunState{ + TestPreInitState: lct.preInitState, + Runner: lct.initRunner, + Options: lct.derivedConfig.Options, // we will always run with the derived options + }, nil } type syncWriter struct { diff --git a/converter/har/converter_test.go b/converter/har/converter_test.go index f5d259a080a..aeacfc8324b 100644 --- a/converter/har/converter_test.go +++ b/converter/har/converter_test.go @@ -62,7 +62,7 @@ func TestBuildK6RequestObject(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) _, err = js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, diff --git a/core/engine.go b/core/engine.go index 30df149e8fc..3e852422a4c 100644 --- a/core/engine.go +++ b/core/engine.go @@ -72,10 +72,7 @@ type Engine struct { } // NewEngine instantiates a new Engine, without doing any heavy initialization. -func NewEngine( - ex lib.ExecutionScheduler, opts lib.Options, rtOpts lib.RuntimeOptions, outputs []output.Output, logger *logrus.Logger, - registry *metrics.Registry, -) (*Engine, error) { +func NewEngine(testState *lib.TestRunState, ex lib.ExecutionScheduler, outputs []output.Output) (*Engine, error) { if ex == nil { return nil, errors.New("missing ExecutionScheduler instance") } @@ -83,26 +80,26 @@ func NewEngine( e := &Engine{ ExecutionScheduler: ex, - runtimeOptions: rtOpts, - Samples: make(chan metrics.SampleContainer, opts.MetricSamplesBufferSize.Int64), + runtimeOptions: testState.RuntimeOptions, + Samples: make(chan metrics.SampleContainer, testState.Options.MetricSamplesBufferSize.Int64), stopChan: make(chan struct{}), - logger: logger.WithField("component", "engine"), + logger: testState.Logger.WithField("component", "engine"), } - me, err := engine.NewMetricsEngine(registry, ex.GetState(), opts, rtOpts, logger) + me, err := engine.NewMetricsEngine(ex.GetState()) if err != nil { return nil, err } e.MetricsEngine = me - if !(rtOpts.NoSummary.Bool && rtOpts.NoThresholds.Bool) { + if !(testState.RuntimeOptions.NoSummary.Bool && testState.RuntimeOptions.NoThresholds.Bool) { e.ingester = me.GetIngester() outputs = append(outputs, e.ingester) } - e.OutputManager = output.NewManager(outputs, logger, func(err error) { + e.OutputManager = output.NewManager(outputs, testState.Logger, func(err error) { if err != nil { - logger.WithError(err).Error("Received error to stop from output") + testState.Logger.WithError(err).Error("Received error to stop from output") } e.Stop() }) diff --git a/core/engine_test.go b/core/engine_test.go index a6485132fb7..f1c8f14730d 100644 --- a/core/engine_test.go +++ b/core/engine_test.go @@ -58,32 +58,51 @@ type testStruct struct { run func() error runCancel func() wait func() + piState *lib.TestPreInitState +} + +func getTestPreInitState(tb testing.TB) *lib.TestPreInitState { + reg := metrics.NewRegistry() + return &lib.TestPreInitState{ + Logger: testutils.NewLogger(tb), + RuntimeOptions: lib.RuntimeOptions{}, + Registry: reg, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(reg), + } +} + +func getTestRunState( + tb testing.TB, piState *lib.TestPreInitState, options lib.Options, runner lib.Runner, +) *lib.TestRunState { + require.Empty(tb, options.Validate()) + require.NoError(tb, runner.SetOptions(options)) + return &lib.TestRunState{ + TestPreInitState: piState, + Options: options, + Runner: runner, + } } // Wrapper around NewEngine that applies a logger and manages the options. -func newTestEngineWithRegistry( //nolint:golint - t *testing.T, runTimeout *time.Duration, runner lib.Runner, outputs []output.Output, opts lib.Options, - registry *metrics.Registry, +func newTestEngineWithTestPreInitState( //nolint:golint + t *testing.T, runTimeout *time.Duration, runner lib.Runner, outputs []output.Output, + opts lib.Options, piState *lib.TestPreInitState, ) *testStruct { if runner == nil { runner = &minirunner.MiniRunner{} } - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) newOpts, err := executor.DeriveScenariosFromShortcuts(lib.Options{ MetricSamplesBufferSize: null.NewInt(200, false), - }.Apply(runner.GetOptions()).Apply(opts), logger) + }.Apply(runner.GetOptions()).Apply(opts), piState.Logger) require.NoError(t, err) - require.Empty(t, newOpts.Validate()) - require.NoError(t, runner.SetOptions(newOpts)) + testRunState := getTestRunState(t, piState, newOpts, runner) - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, logger) + execScheduler, err := local.NewExecutionScheduler(testRunState) require.NoError(t, err) - engine, err := NewEngine(execScheduler, opts, lib.RuntimeOptions{}, outputs, logger, registry) + engine, err := NewEngine(testRunState, execScheduler, outputs) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) @@ -109,6 +128,7 @@ func newTestEngineWithRegistry( //nolint:golint waitFn() engine.OutputManager.StopOutputs() }, + piState: piState, } return test } @@ -116,7 +136,7 @@ func newTestEngineWithRegistry( //nolint:golint func newTestEngine( t *testing.T, runTimeout *time.Duration, runner lib.Runner, outputs []output.Output, opts lib.Options, ) *testStruct { - return newTestEngineWithRegistry(t, runTimeout, runner, outputs, opts, metrics.NewRegistry()) + return newTestEngineWithTestPreInitState(t, runTimeout, runner, outputs, opts, getTestPreInitState(t)) } func TestEngineRun(t *testing.T) { @@ -156,8 +176,8 @@ func TestEngineRun(t *testing.T) { t.Run("collects samples", func(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - testMetric, err := registry.NewMetric("test_metric", metrics.Trend) + piState := getTestPreInitState(t) + testMetric, err := piState.Registry.NewMetric("test_metric", metrics.Trend) require.NoError(t, err) signalChan := make(chan interface{}) @@ -173,10 +193,10 @@ func TestEngineRun(t *testing.T) { } mockOutput := mockoutput.New() - test := newTestEngineWithRegistry(t, nil, runner, []output.Output{mockOutput}, lib.Options{ + test := newTestEngineWithTestPreInitState(t, nil, runner, []output.Output{mockOutput}, lib.Options{ VUs: null.IntFrom(1), Iterations: null.IntFrom(1), - }, registry) + }, piState) errC := make(chan error) go func() { errC <- test.run() }() @@ -226,8 +246,8 @@ func TestEngineStopped(t *testing.T) { func TestEngineOutput(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - testMetric, err := registry.NewMetric("test_metric", metrics.Trend) + piState := getTestPreInitState(t) + testMetric, err := piState.Registry.NewMetric("test_metric", metrics.Trend) require.NoError(t, err) runner := &minirunner.MiniRunner{ @@ -238,10 +258,10 @@ func TestEngineOutput(t *testing.T) { } mockOutput := mockoutput.New() - test := newTestEngineWithRegistry(t, nil, runner, []output.Output{mockOutput}, lib.Options{ + test := newTestEngineWithTestPreInitState(t, nil, runner, []output.Output{mockOutput}, lib.Options{ VUs: null.IntFrom(1), Iterations: null.IntFrom(1), - }, registry) + }, piState) assert.NoError(t, test.run()) test.wait() @@ -269,8 +289,8 @@ func TestEngine_processSamples(t *testing.T) { t.Run("metric", func(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - metric, err := registry.NewMetric("my_metric", metrics.Gauge) + piState := getTestPreInitState(t) + metric, err := piState.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) done := make(chan struct{}) @@ -281,7 +301,7 @@ func TestEngine_processSamples(t *testing.T) { return nil }, } - test := newTestEngineWithRegistry(t, nil, runner, nil, lib.Options{}, registry) + test := newTestEngineWithTestPreInitState(t, nil, runner, nil, lib.Options{}, piState) go func() { assert.NoError(t, test.run()) @@ -301,8 +321,8 @@ func TestEngine_processSamples(t *testing.T) { t.Run("submetric", func(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - metric, err := registry.NewMetric("my_metric", metrics.Gauge) + piState := getTestPreInitState(t) + metric, err := piState.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) ths := metrics.NewThresholds([]string{`value<2`}) @@ -317,11 +337,11 @@ func TestEngine_processSamples(t *testing.T) { return nil }, } - test := newTestEngineWithRegistry(t, nil, runner, nil, lib.Options{ + test := newTestEngineWithTestPreInitState(t, nil, runner, nil, lib.Options{ Thresholds: map[string]metrics.Thresholds{ "my_metric{a:1}": ths, }, - }, registry) + }, piState) go func() { assert.NoError(t, test.run()) @@ -347,8 +367,8 @@ func TestEngine_processSamples(t *testing.T) { func TestEngineThresholdsWillAbort(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - metric, err := registry.NewMetric("my_metric", metrics.Gauge) + piState := getTestPreInitState(t) + metric, err := piState.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) // The incoming samples for the metric set it to 1.25. Considering @@ -369,9 +389,7 @@ func TestEngineThresholdsWillAbort(t *testing.T) { return nil }, } - test := newTestEngineWithRegistry(t, nil, runner, nil, lib.Options{ - Thresholds: thresholds, - }, registry) + test := newTestEngineWithTestPreInitState(t, nil, runner, nil, lib.Options{Thresholds: thresholds}, piState) go func() { assert.NoError(t, test.run()) @@ -390,8 +408,8 @@ func TestEngineThresholdsWillAbort(t *testing.T) { func TestEngineAbortedByThresholds(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - metric, err := registry.NewMetric("my_metric", metrics.Gauge) + piState := getTestPreInitState(t) + metric, err := piState.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) // The MiniRunner sets the value of the metric to 1.25. Considering @@ -415,7 +433,7 @@ func TestEngineAbortedByThresholds(t *testing.T) { }, } - test := newTestEngineWithRegistry(t, nil, runner, nil, lib.Options{Thresholds: thresholds}, registry) + test := newTestEngineWithTestPreInitState(t, nil, runner, nil, lib.Options{Thresholds: thresholds}, piState) defer test.wait() go func() { @@ -465,12 +483,12 @@ func TestEngine_processThresholds(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - gaugeMetric, err := registry.NewMetric("my_metric", metrics.Gauge) + piState := getTestPreInitState(t) + gaugeMetric, err := piState.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) - counterMetric, err := registry.NewMetric("used_counter", metrics.Counter) + counterMetric, err := piState.Registry.NewMetric("used_counter", metrics.Counter) require.NoError(t, err) - _, err = registry.NewMetric("unused_counter", metrics.Counter) + _, err = piState.Registry.NewMetric("unused_counter", metrics.Counter) require.NoError(t, err) thresholds := make(map[string]metrics.Thresholds, len(data.ths)) @@ -482,8 +500,8 @@ func TestEngine_processThresholds(t *testing.T) { } runner := &minirunner.MiniRunner{} - test := newTestEngineWithRegistry( - t, nil, runner, nil, lib.Options{Thresholds: thresholds}, registry, + test := newTestEngineWithTestPreInitState( + t, nil, runner, nil, lib.Options{Thresholds: thresholds}, piState, ) test.engine.OutputManager.AddMetricSamples( @@ -593,14 +611,8 @@ func TestSentReceivedMetrics(t *testing.T) { } runTest := func(t *testing.T, ts testScript, tc testCase, noConnReuse bool) (float64, float64) { - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r, err := js.New( - &lib.RuntimeState{ - Logger: testutils.NewLogger(t), - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, + getTestPreInitState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: []byte(ts.Code)}, nil, ) @@ -732,14 +744,8 @@ func TestRunTags(t *testing.T) { } `)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r, err := js.New( - &lib.RuntimeState{ - Logger: testutils.NewLogger(t), - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, + getTestPreInitState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil, ) @@ -815,14 +821,8 @@ func TestSetupException(t *testing.T) { throw new Error("baz"); } `), 0x666)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ - Logger: testutils.NewLogger(t), - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, + getTestPreInitState(t), &loader.SourceData{URL: &url.URL{Scheme: "file", Path: "/script.js"}, Data: script}, map[string]afero.Fs{"file": memfs}, ) @@ -868,15 +868,9 @@ func TestVuInitException(t *testing.T) { } `) - logger := testutils.NewLogger(t) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + piState := getTestPreInitState(t) runner, err := js.New( - &lib.RuntimeState{ - Logger: testutils.NewLogger(t), - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, + piState, &loader.SourceData{URL: &url.URL{Scheme: "file", Path: "/script.js"}, Data: script}, nil, ) @@ -884,12 +878,12 @@ func TestVuInitException(t *testing.T) { opts, err := executor.DeriveScenariosFromShortcuts(runner.GetOptions(), nil) require.NoError(t, err) - require.Empty(t, opts.Validate()) - require.NoError(t, runner.SetOptions(opts)) - execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, logger) + testState := getTestRunState(t, piState, opts, runner) + + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := NewEngine(execScheduler, opts, lib.RuntimeOptions{}, nil, logger, registry) + engine, err := NewEngine(testState, execScheduler, nil) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -942,14 +936,8 @@ func TestEmittedMetricsWhenScalingDown(t *testing.T) { }; `)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ - Logger: testutils.NewLogger(t), - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, + getTestPreInitState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil, ) @@ -1031,14 +1019,8 @@ func TestMetricsEmission(t *testing.T) { if !isWindows { t.Parallel() } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ - Logger: testutils.NewLogger(t), - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, + getTestPreInitState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: []byte(fmt.Sprintf(` import { sleep } from "k6"; import { Counter } from "k6/metrics"; @@ -1143,14 +1125,8 @@ func TestMinIterationDurationInSetupTeardownStage(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ - Logger: testutils.NewLogger(t), - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, + getTestPreInitState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script)}, nil, ) @@ -1175,8 +1151,8 @@ func TestMinIterationDurationInSetupTeardownStage(t *testing.T) { func TestEngineRunsTeardownEvenAfterTestRunIsAborted(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - testMetric, err := registry.NewMetric("teardown_metric", metrics.Counter) + piState := getTestPreInitState(t) + testMetric, err := piState.Registry.NewMetric("teardown_metric", metrics.Counter) require.NoError(t, err) var test *testStruct @@ -1192,9 +1168,9 @@ func TestEngineRunsTeardownEvenAfterTestRunIsAborted(t *testing.T) { } mockOutput := mockoutput.New() - test = newTestEngineWithRegistry(t, nil, runner, []output.Output{mockOutput}, lib.Options{ + test = newTestEngineWithTestPreInitState(t, nil, runner, []output.Output{mockOutput}, lib.Options{ VUs: null.IntFrom(1), Iterations: null.IntFrom(1), - }, registry) + }, piState) assert.NoError(t, test.run()) test.wait() @@ -1261,14 +1237,13 @@ func TestActiveVUsCount(t *testing.T) { rtOpts := lib.RuntimeOptions{CompatibilityMode: null.StringFrom("base")} registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - RuntimeOptions: rtOpts, - }, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) + piState := &lib.TestPreInitState{ + Logger: logger, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + RuntimeOptions: rtOpts, + } + runner, err := js.New(piState, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) require.NoError(t, err) mockOutput := mockoutput.New() @@ -1279,11 +1254,11 @@ func TestActiveVUsCount(t *testing.T) { MetricSamplesBufferSize: null.NewInt(200, false), }.Apply(runner.GetOptions()), nil) require.NoError(t, err) - require.Empty(t, opts.Validate()) - require.NoError(t, runner.SetOptions(opts)) - execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, logger) + + testState := getTestRunState(t, piState, opts, runner) + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := NewEngine(execScheduler, opts, rtOpts, []output.Output{mockOutput}, logger, registry) + engine, err := NewEngine(testState, execScheduler, []output.Output{mockOutput}) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) run, waitFn, err := engine.Init(ctx, ctx) // no need for 2 different contexts diff --git a/core/local/k6execution_test.go b/core/local/k6execution_test.go index ab39e5cf74d..fee68206cfe 100644 --- a/core/local/k6execution_test.go +++ b/core/local/k6execution_test.go @@ -88,7 +88,7 @@ func TestExecutionInfoVUSharing(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, @@ -201,7 +201,7 @@ func TestExecutionInfoScenarioIter(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, @@ -283,7 +283,7 @@ func TestSharedIterationsStable(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, @@ -418,7 +418,7 @@ func TestExecutionInfoAll(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, diff --git a/core/local/local.go b/core/local/local.go index 926c297fe71..c4c01b17829 100644 --- a/core/local/local.go +++ b/core/local/local.go @@ -38,10 +38,6 @@ import ( // ExecutionScheduler is the local implementation of lib.ExecutionScheduler type ExecutionScheduler struct { - runner lib.Runner - options lib.Options - logger logrus.FieldLogger - initProgress *pb.ProgressBar executorConfigs []lib.ExecutorConfig // sorted by (startTime, ID) executors []lib.Executor // sorted by (startTime, ID), excludes executors with no work @@ -62,10 +58,8 @@ var _ lib.ExecutionScheduler = &ExecutionScheduler{} // instance, without initializing it beyond the bare minimum. Specifically, it // creates the needed executor instances and a lot of state placeholders, but it // doesn't initialize the executors and it doesn't initialize or run VUs. -func NewExecutionScheduler( - runner lib.Runner, builtinMetrics *metrics.BuiltinMetrics, logger logrus.FieldLogger, -) (*ExecutionScheduler, error) { - options := runner.GetOptions() +func NewExecutionScheduler(trs *lib.TestRunState) (*ExecutionScheduler, error) { + options := trs.Options et, err := lib.NewExecutionTuple(options.ExecutionSegment, options.ExecutionSegmentSequence) if err != nil { return nil, err @@ -74,7 +68,7 @@ func NewExecutionScheduler( maxPlannedVUs := lib.GetMaxPlannedVUs(executionPlan) maxPossibleVUs := lib.GetMaxPossibleVUs(executionPlan) - executionState := lib.NewExecutionState(options, et, builtinMetrics, maxPlannedVUs, maxPossibleVUs) + executionState := lib.NewExecutionState(trs, et, maxPlannedVUs, maxPossibleVUs) maxDuration, _ := lib.GetEndOffset(executionPlan) // we don't care if the end offset is final executorConfigs := options.Scenarios.GetSortedConfigs() @@ -82,13 +76,13 @@ func NewExecutionScheduler( // Only take executors which have work. for _, sc := range executorConfigs { if !sc.HasWork(et) { - logger.Warnf( + trs.Logger.Warnf( "Executor '%s' is disabled for segment %s due to lack of work!", sc.GetName(), options.ExecutionSegment, ) continue } - s, err := sc.NewExecutor(executionState, logger.WithFields(logrus.Fields{ + s, err := sc.NewExecutor(executionState, trs.Logger.WithFields(logrus.Fields{ "scenario": sc.GetName(), "executor": sc.GetType(), })) @@ -105,10 +99,6 @@ func NewExecutionScheduler( } return &ExecutionScheduler{ - runner: runner, - logger: logger, - options: options, - initProgress: pb.New(pb.WithConstLeft("Init")), executors: executors, executorConfigs: executorConfigs, @@ -123,8 +113,8 @@ func NewExecutionScheduler( } // GetRunner returns the wrapped lib.Runner instance. -func (e *ExecutionScheduler) GetRunner() lib.Runner { - return e.runner +func (e *ExecutionScheduler) GetRunner() lib.Runner { // TODO: remove + return e.state.Test.Runner } // GetState returns a pointer to the execution state struct for the local @@ -165,12 +155,12 @@ func (e *ExecutionScheduler) GetExecutionPlan() []lib.ExecutionStep { // in the Init() method, and also passed to executors so they can initialize // any unplanned VUs themselves. func (e *ExecutionScheduler) initVU( - samplesOut chan<- metrics.SampleContainer, logger *logrus.Entry, + samplesOut chan<- metrics.SampleContainer, logger logrus.FieldLogger, ) (lib.InitializedVU, error) { // Get the VU IDs here, so that the VUs are (mostly) ordered by their // number in the channel buffer vuIDLocal, vuIDGlobal := e.state.GetUniqueVUIdentifiers() - vu, err := e.runner.NewVU(vuIDLocal, vuIDGlobal, samplesOut) + vu, err := e.state.Test.Runner.NewVU(vuIDLocal, vuIDGlobal, samplesOut) if err != nil { return nil, errext.WithHint(err, fmt.Sprintf("error while initializing VU #%d", vuIDGlobal)) } @@ -201,7 +191,7 @@ func (e *ExecutionScheduler) getRunStats() string { func (e *ExecutionScheduler) initVUsConcurrently( ctx context.Context, samplesOut chan<- metrics.SampleContainer, count uint64, - concurrency int, logger *logrus.Entry, + concurrency int, logger logrus.FieldLogger, ) chan error { doneInits := make(chan error, count) // poor man's early-return waitgroup limiter := make(chan struct{}) @@ -233,7 +223,7 @@ func (e *ExecutionScheduler) initVUsConcurrently( } func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- metrics.SampleContainer) { - e.logger.Debug("Starting emission of VUs and VUsMax metrics...") + e.state.Test.Logger.Debug("Starting emission of VUs and VUsMax metrics...") emitMetrics := func() { t := time.Now() @@ -241,17 +231,17 @@ func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- me Samples: []metrics.Sample{ { Time: t, - Metric: e.state.BuiltinMetrics.VUs, + Metric: e.state.Test.BuiltinMetrics.VUs, Value: float64(e.state.GetCurrentlyActiveVUsCount()), - Tags: e.options.RunTags, + Tags: e.state.Test.Options.RunTags, }, { Time: t, - Metric: e.state.BuiltinMetrics.VUsMax, + Metric: e.state.Test.BuiltinMetrics.VUsMax, Value: float64(e.state.GetInitializedVUsCount()), - Tags: e.options.RunTags, + Tags: e.state.Test.Options.RunTags, }, }, - Tags: e.options.RunTags, + Tags: e.state.Test.Options.RunTags, Time: t, } metrics.PushIfNotDone(ctx, out, samples) @@ -261,7 +251,7 @@ func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- me go func() { defer func() { ticker.Stop() - e.logger.Debug("Metrics emission of VUs and VUsMax metrics stopped") + e.state.Test.Logger.Debug("Metrics emission of VUs and VUsMax metrics stopped") close(e.vusEmissionStopped) }() @@ -283,7 +273,7 @@ func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- me func (e *ExecutionScheduler) Init(ctx context.Context, samplesOut chan<- metrics.SampleContainer) error { e.emitVUsAndVUsMax(ctx, samplesOut) - logger := e.logger.WithField("phase", "local-execution-scheduler-init") + logger := e.state.Test.Logger.WithField("phase", "local-execution-scheduler-init") vusToInitialize := lib.GetMaxPlannedVUs(e.executionPlan) logger.WithFields(logrus.Fields{ "neededVUs": vusToInitialize, @@ -350,7 +340,7 @@ func (e *ExecutionScheduler) runExecutor( ) { executorConfig := executor.GetConfig() executorStartTime := executorConfig.GetStartTime() - executorLogger := e.logger.WithFields(logrus.Fields{ + executorLogger := e.state.Test.Logger.WithFields(logrus.Fields{ "executor": executorConfig.GetName(), "type": executorConfig.GetType(), "startTime": executorStartTime, @@ -402,7 +392,7 @@ func (e *ExecutionScheduler) Run(globalCtx, runCtx context.Context, engineOut ch }() executorsCount := len(e.executors) - logger := e.logger.WithField("phase", "local-execution-scheduler-run") + logger := e.state.Test.Logger.WithField("phase", "local-execution-scheduler-run") e.initProgress.Modify(pb.WithConstLeft("Run")) var interrupted bool defer func() { @@ -436,11 +426,11 @@ func (e *ExecutionScheduler) Run(globalCtx, runCtx context.Context, engineOut ch defer cancel() // just in case, and to shut up go vet... // Run setup() before any executors, if it's not disabled - if !e.options.NoSetup.Bool { + if !e.state.Test.Options.NoSetup.Bool { logger.Debug("Running setup()") e.state.SetExecutionStatus(lib.ExecutionStatusSetup) e.initProgress.Modify(pb.WithConstProgress(1, "setup()")) - if err := e.runner.Setup(runSubCtx, engineOut); err != nil { + if err := e.state.Test.Runner.Setup(runSubCtx, engineOut); err != nil { logger.WithField("error", err).Debug("setup() aborted by error") return err } @@ -472,14 +462,14 @@ func (e *ExecutionScheduler) Run(globalCtx, runCtx context.Context, engineOut ch } // Run teardown() after all executors are done, if it's not disabled - if !e.options.NoTeardown.Bool { + if !e.state.Test.Options.NoTeardown.Bool { logger.Debug("Running teardown()") e.state.SetExecutionStatus(lib.ExecutionStatusTeardown) e.initProgress.Modify(pb.WithConstProgress(1, "teardown()")) // We run teardown() with the global context, so it isn't interrupted by // aborts caused by thresholds or even Ctrl+C (unless used twice). - if err := e.runner.Teardown(globalCtx, engineOut); err != nil { + if err := e.state.Test.Runner.Teardown(globalCtx, engineOut); err != nil { logger.WithField("error", err).Debug("teardown() aborted by error") return err } @@ -499,7 +489,7 @@ func (e *ExecutionScheduler) SetPaused(pause bool) error { if pause { return fmt.Errorf("execution is already paused") } - e.logger.Debug("Starting execution") + e.state.Test.Logger.Debug("Starting execution") return e.state.Resume() } diff --git a/core/local/local_test.go b/core/local/local_test.go index c6273100fed..743efb90a27 100644 --- a/core/local/local_test.go +++ b/core/local/local_test.go @@ -53,6 +53,28 @@ import ( "go.k6.io/k6/metrics" ) +func getTestPreInitState(tb testing.TB) *lib.TestPreInitState { + reg := metrics.NewRegistry() + return &lib.TestPreInitState{ + Logger: testutils.NewLogger(tb), + RuntimeOptions: lib.RuntimeOptions{}, + Registry: reg, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(reg), + } +} + +func getTestRunState( + tb testing.TB, piState *lib.TestPreInitState, options lib.Options, runner lib.Runner, +) *lib.TestRunState { + require.Empty(tb, options.Validate()) + require.NoError(tb, runner.SetOptions(options)) + return &lib.TestRunState{ + TestPreInitState: piState, + Options: options, + Runner: runner, + } +} + func newTestExecutionScheduler( t *testing.T, runner lib.Runner, logger *logrus.Logger, opts lib.Options, ) (ctx context.Context, cancel func(), execScheduler *ExecutionScheduler, samples chan metrics.SampleContainer) { @@ -64,18 +86,13 @@ func newTestExecutionScheduler( MetricSamplesBufferSize: null.NewInt(200, false), }.Apply(runner.GetOptions()).Apply(opts), nil) require.NoError(t, err) - require.Empty(t, newOpts.Validate()) - require.NoError(t, runner.SetOptions(newOpts)) - - if logger == nil { - logger = logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) + testRunState := getTestRunState(t, getTestPreInitState(t), newOpts, runner) + if logger != nil { + testRunState.Logger = logger } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err = NewExecutionScheduler(runner, builtinMetrics, logger) + execScheduler, err = NewExecutionScheduler(testRunState) require.NoError(t, err) samples = make(chan metrics.SampleContainer, newOpts.MetricSamplesBufferSize.Int64) @@ -129,21 +146,16 @@ func TestExecutionSchedulerRunNonDefault(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + piState := getTestPreInitState(t) runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, &loader.SourceData{ + piState, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script), }, nil) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + testRunState := getTestRunState(t, piState, runner.GetOptions(), runner) + + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -184,7 +196,6 @@ func TestExecutionSchedulerRunEnv(t *testing.T) { scenarios: { executor: { executor: "%[1]s", - gracefulStop: "0.5s", %[2]s } } @@ -200,31 +211,37 @@ func TestExecutionSchedulerRunEnv(t *testing.T) { executorConfigs := map[string]string{ "constant-arrival-rate": ` rate: 1, - timeUnit: "0.5s", - duration: "0.5s", + timeUnit: "1s", + duration: "1s", preAllocatedVUs: 1, - maxVUs: 2,`, + maxVUs: 2, + gracefulStop: "0.5s",`, "constant-vus": ` vus: 1, - duration: "0.5s",`, + duration: "1s", + gracefulStop: "0.5s",`, "externally-controlled": ` vus: 1, - duration: "0.5s",`, + duration: "1s",`, "per-vu-iterations": ` vus: 1, - iterations: 1,`, + iterations: 1, + gracefulStop: "0.5s",`, "shared-iterations": ` vus: 1, - iterations: 1,`, + iterations: 1, + gracefulStop: "0.5s",`, "ramping-arrival-rate": ` startRate: 1, timeUnit: "0.5s", preAllocatedVUs: 1, maxVUs: 2, - stages: [ { target: 1, duration: "0.5s" } ],`, + stages: [ { target: 1, duration: "1s" } ], + gracefulStop: "0.5s",`, "ramping-vus": ` startVUs: 1, - stages: [ { target: 1, duration: "0.5s" } ],`, + stages: [ { target: 1, duration: "1s" } ], + gracefulStop: "0.5s",`, } testCases := []struct{ name, script string }{} @@ -244,23 +261,18 @@ func TestExecutionSchedulerRunEnv(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + piState := getTestPreInitState(t) + piState.RuntimeOptions = lib.RuntimeOptions{Env: map[string]string{"TESTVAR": "global"}} runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - RuntimeOptions: lib.RuntimeOptions{Env: map[string]string{"TESTVAR": "global"}}, - }, &loader.SourceData{ + piState, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script), - }, nil) + }, nil, + ) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + testRunState := getTestRunState(t, piState, runner.GetOptions(), runner) + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -316,16 +328,9 @@ func TestExecutionSchedulerSystemTags(t *testing.T) { http.get("HTTPBIN_IP_URL/"); }`) - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + piState := getTestPreInitState(t) runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, &loader.SourceData{ + piState, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(script), }, nil) @@ -335,7 +340,8 @@ func TestExecutionSchedulerSystemTags(t *testing.T) { SystemTags: &metrics.DefaultSystemTagSet, }))) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + testRunState := getTestRunState(t, piState, runner.GetOptions(), runner) + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) @@ -406,7 +412,6 @@ func TestExecutionSchedulerRunCustomTags(t *testing.T) { scenarios: { executor: { executor: "%s", - gracefulStop: "0.5s", %s } } @@ -419,31 +424,37 @@ func TestExecutionSchedulerRunCustomTags(t *testing.T) { executorConfigs := map[string]string{ "constant-arrival-rate": ` rate: 1, - timeUnit: "0.5s", - duration: "0.5s", + timeUnit: "1s", + duration: "1s", preAllocatedVUs: 1, - maxVUs: 2,`, + maxVUs: 2, + gracefulStop: "0.5s",`, "constant-vus": ` vus: 1, - duration: "0.5s",`, + duration: "1s", + gracefulStop: "0.5s",`, "externally-controlled": ` vus: 1, - duration: "0.5s",`, + duration: "1s",`, "per-vu-iterations": ` vus: 1, - iterations: 1,`, + iterations: 1, + gracefulStop: "0.5s",`, "shared-iterations": ` vus: 1, - iterations: 1,`, + iterations: 1, + gracefulStop: "0.5s",`, "ramping-arrival-rate": ` startRate: 5, timeUnit: "0.5s", preAllocatedVUs: 1, maxVUs: 2, - stages: [ { target: 10, duration: "1s" } ],`, + stages: [ { target: 10, duration: "1s" } ], + gracefulStop: "0.5s",`, "ramping-vus": ` startVUs: 1, - stages: [ { target: 1, duration: "0.5s" } ],`, + stages: [ { target: 1, duration: "0.5s" } ], + gracefulStop: "0.5s",`, } testCases := []struct{ name, script string }{} @@ -460,25 +471,17 @@ func TestExecutionSchedulerRunCustomTags(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + piState := getTestPreInitState(t) runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, - &loader.SourceData{ + piState, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script), - }, - nil) + }, nil, + ) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + testRunState := getTestRunState(t, piState, runner.GetOptions(), runner) + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) @@ -629,25 +632,20 @@ func TestExecutionSchedulerRunCustomConfigNoCrossover(t *testing.T) { }); } `) - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + piState := getTestPreInitState(t) + piState.RuntimeOptions.Env = map[string]string{"TESTGLOBALVAR": "global"} runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - RuntimeOptions: lib.RuntimeOptions{Env: map[string]string{"TESTGLOBALVAR": "global"}}, - }, &loader.SourceData{ + piState, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(script), }, - nil) + nil, + ) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + testRunState := getTestRunState(t, piState, runner.GetOptions(), runner) + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) @@ -975,12 +973,8 @@ func TestExecutionSchedulerEndIterations(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + testRunState := getTestRunState(t, getTestPreInitState(t), runner.GetOptions(), runner) + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) samples := make(chan metrics.SampleContainer, 300) @@ -1089,7 +1083,7 @@ func TestDNSResolver(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1170,17 +1164,8 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { counter.add(6, { place: "defaultAfterSleep" }); }`) - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) + piState := getTestPreInitState(t) + runner, err := js.New(piState, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) require.NoError(t, err) options, err := executor.DeriveScenariosFromShortcuts(runner.GetOptions().Apply(lib.Options{ @@ -1191,9 +1176,9 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { TeardownTimeout: types.NullDurationFrom(4 * time.Second), }), nil) require.NoError(t, err) - require.NoError(t, runner.SetOptions(options)) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + testRunState := getTestRunState(t, piState, options, runner) + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1216,7 +1201,7 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { case sampleContainer := <-sampleContainers: gotVus := false for _, s := range sampleContainer.GetSamples() { - if s.Metric == builtinMetrics.VUs || s.Metric == builtinMetrics.VUsMax { + if s.Metric == piState.BuiltinMetrics.VUs || s.Metric == piState.BuiltinMetrics.VUsMax { gotVus = true break } @@ -1260,7 +1245,7 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { } return metrics.IntoSampleTags(&tags) } - testCounter, err := registry.NewMetric("test_counter", metrics.Counter) + testCounter, err := piState.Registry.NewMetric("test_counter", metrics.Counter) require.NoError(t, err) getSample := func(expValue float64, expMetric *metrics.Metric, expTags ...string) metrics.SampleContainer { return metrics.Sample{ @@ -1277,7 +1262,7 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { net.Dialer{}, netext.NewResolver(net.LookupIP, 0, types.DNSfirst, types.DNSpreferIPv4), ).GetTrail(time.Now(), time.Now(), - true, emitIterations, getTags(expTags...), builtinMetrics) + true, emitIterations, getTags(expTags...), piState.BuiltinMetrics) } // Initially give a long time (5s) for the execScheduler to start @@ -1323,12 +1308,8 @@ func TestSetPaused(t *testing.T) { t.Parallel() t.Run("second pause is an error", func(t *testing.T) { t.Parallel() - runner := &minirunner.MiniRunner{} - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) + testRunState := getTestRunState(t, getTestPreInitState(t), lib.Options{}, &minirunner.MiniRunner{}) + sched, err := NewExecutionScheduler(testRunState) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} @@ -1340,12 +1321,8 @@ func TestSetPaused(t *testing.T) { t.Run("unpause at the start is an error", func(t *testing.T) { t.Parallel() - runner := &minirunner.MiniRunner{} - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) + testRunState := getTestRunState(t, getTestPreInitState(t), lib.Options{}, &minirunner.MiniRunner{}) + sched, err := NewExecutionScheduler(testRunState) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} err = sched.SetPaused(false) @@ -1355,12 +1332,8 @@ func TestSetPaused(t *testing.T) { t.Run("second unpause is an error", func(t *testing.T) { t.Parallel() - runner := &minirunner.MiniRunner{} - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) + testRunState := getTestRunState(t, getTestPreInitState(t), lib.Options{}, &minirunner.MiniRunner{}) + sched, err := NewExecutionScheduler(testRunState) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} require.NoError(t, sched.SetPaused(true)) @@ -1372,12 +1345,8 @@ func TestSetPaused(t *testing.T) { t.Run("an error on pausing is propagated", func(t *testing.T) { t.Parallel() - runner := &minirunner.MiniRunner{} - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) + testRunState := getTestRunState(t, getTestPreInitState(t), lib.Options{}, &minirunner.MiniRunner{}) + sched, err := NewExecutionScheduler(testRunState) require.NoError(t, err) expectedErr := errors.New("testing pausable executor error") sched.executors = []lib.Executor{pausableExecutor{err: expectedErr}} @@ -1394,13 +1363,9 @@ func TestSetPaused(t *testing.T) { VUs: null.IntFrom(1), }.Apply(runner.GetOptions()), nil) require.NoError(t, err) - require.NoError(t, runner.SetOptions(options)) - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) + testRunState := getTestRunState(t, getTestPreInitState(t), options, runner) + sched, err := NewExecutionScheduler(testRunState) require.NoError(t, err) err = sched.SetPaused(true) require.Error(t, err) @@ -1445,24 +1410,17 @@ func TestNewExecutionSchedulerHasWork(t *testing.T) { logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, - &loader.SourceData{ - URL: &url.URL{Path: "/script.js"}, - Data: script, - }, - nil, - ) + piState := &lib.TestPreInitState{ + Logger: logger, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + } + runner, err := js.New(piState, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + testRunState := getTestRunState(t, piState, runner.GetOptions(), runner) + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) assert.Len(t, execScheduler.executors, 2) diff --git a/js/bundle.go b/js/bundle.go index 025061cd8c7..41d18dd6ffc 100644 --- a/js/bundle.go +++ b/js/bundle.go @@ -74,21 +74,20 @@ type BundleInstance struct { // NewBundle creates a new bundle from a source file and a filesystem. func NewBundle( - logger logrus.FieldLogger, src *loader.SourceData, filesystems map[string]afero.Fs, rtOpts lib.RuntimeOptions, - registry *metrics.Registry, + piState *lib.TestPreInitState, src *loader.SourceData, filesystems map[string]afero.Fs, ) (*Bundle, error) { - compatMode, err := lib.ValidateCompatibilityMode(rtOpts.CompatibilityMode.String) + compatMode, err := lib.ValidateCompatibilityMode(piState.RuntimeOptions.CompatibilityMode.String) if err != nil { return nil, err } // Compile sources, both ES5 and ES6 are supported. code := string(src.Data) - c := compiler.New(logger) + c := compiler.New(piState.Logger) c.Options = compiler.Options{ CompatibilityMode: compatMode, Strict: true, - SourceMapLoader: generateSourceMapLoader(logger, filesystems), + SourceMapLoader: generateSourceMapLoader(piState.Logger, filesystems), } pgm, _, err := c.Compile(code, src.URL.String(), false) if err != nil { @@ -100,17 +99,17 @@ func NewBundle( Filename: src.URL, Source: code, Program: pgm, - BaseInitContext: NewInitContext(logger, rt, c, compatMode, filesystems, loader.Dir(src.URL)), - RuntimeOptions: rtOpts, + BaseInitContext: NewInitContext(piState.Logger, rt, c, compatMode, filesystems, loader.Dir(src.URL)), + RuntimeOptions: piState.RuntimeOptions, CompatibilityMode: compatMode, exports: make(map[string]goja.Callable), - registry: registry, + registry: piState.Registry, } - if err = bundle.instantiate(logger, rt, bundle.BaseInitContext, 0); err != nil { + if err = bundle.instantiate(piState.Logger, rt, bundle.BaseInitContext, 0); err != nil { return nil, err } - err = bundle.getExports(logger, rt, true) + err = bundle.getExports(piState.Logger, rt, true) if err != nil { return nil, err } @@ -119,13 +118,12 @@ func NewBundle( } // NewBundleFromArchive creates a new bundle from an lib.Archive. -func NewBundleFromArchive( - logger logrus.FieldLogger, arc *lib.Archive, rtOpts lib.RuntimeOptions, registry *metrics.Registry, -) (*Bundle, error) { +func NewBundleFromArchive(piState *lib.TestPreInitState, arc *lib.Archive) (*Bundle, error) { if arc.Type != "js" { return nil, fmt.Errorf("expected bundle type 'js', got '%s'", arc.Type) } + rtOpts := piState.RuntimeOptions // copy the struct from the TestPreInitState if !rtOpts.CompatibilityMode.Valid { // `k6 run --compatibility-mode=whatever archive.tar` should override // whatever value is in the archive @@ -136,18 +134,18 @@ func NewBundleFromArchive( return nil, err } - c := compiler.New(logger) + c := compiler.New(piState.Logger) c.Options = compiler.Options{ Strict: true, CompatibilityMode: compatMode, - SourceMapLoader: generateSourceMapLoader(logger, arc.Filesystems), + SourceMapLoader: generateSourceMapLoader(piState.Logger, arc.Filesystems), } pgm, _, err := c.Compile(string(arc.Data), arc.FilenameURL.String(), false) if err != nil { return nil, err } rt := goja.New() - initctx := NewInitContext(logger, rt, c, compatMode, arc.Filesystems, arc.PwdURL) + initctx := NewInitContext(piState.Logger, rt, c, compatMode, arc.Filesystems, arc.PwdURL) env := arc.Env if env == nil { @@ -168,16 +166,16 @@ func NewBundleFromArchive( RuntimeOptions: rtOpts, CompatibilityMode: compatMode, exports: make(map[string]goja.Callable), - registry: registry, + registry: piState.Registry, } - if err = bundle.instantiate(logger, rt, bundle.BaseInitContext, 0); err != nil { + if err = bundle.instantiate(piState.Logger, rt, bundle.BaseInitContext, 0); err != nil { return nil, err } // Grab exported objects, but avoid overwriting options, which would // be initialized from the metadata.json at this point. - err = bundle.getExports(logger, rt, false) + err = bundle.getExports(piState.Logger, rt, false) if err != nil { return nil, err } diff --git a/js/bundle_test.go b/js/bundle_test.go index 4d478562263..645b338f5cf 100644 --- a/js/bundle_test.go +++ b/js/bundle_test.go @@ -50,31 +50,46 @@ import ( const isWindows = runtime.GOOS == "windows" +func getTestPreInitState(tb testing.TB, logger *logrus.Logger, rtOpts *lib.RuntimeOptions) *lib.TestPreInitState { + if logger == nil { + logger = testutils.NewLogger(tb) + } + if rtOpts == nil { + rtOpts = &lib.RuntimeOptions{} + } + reg := metrics.NewRegistry() + return &lib.TestPreInitState{ + Logger: logger, + RuntimeOptions: *rtOpts, + Registry: reg, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(reg), + } +} + func getSimpleBundle(tb testing.TB, filename, data string, opts ...interface{}) (*Bundle, error) { - var ( - fs = afero.NewMemMapFs() - rtOpts = lib.RuntimeOptions{} - logger logrus.FieldLogger = testutils.NewLogger(tb) - ) + fs := afero.NewMemMapFs() + var rtOpts *lib.RuntimeOptions + var logger *logrus.Logger for _, o := range opts { switch opt := o.(type) { case afero.Fs: fs = opt case lib.RuntimeOptions: - rtOpts = opt - case logrus.FieldLogger: + rtOpts = &opt + case *logrus.Logger: logger = opt + default: + tb.Fatalf("unknown test option %q", opt) } } + return NewBundle( - logger, + getTestPreInitState(tb, logger, rtOpts), &loader.SourceData{ URL: &url.URL{Path: filename, Scheme: "file"}, Data: []byte(data), }, map[string]afero.Fs{"file": fs, "https": afero.NewMemMapFs()}, - rtOpts, - metrics.NewRegistry(), ) } @@ -489,7 +504,7 @@ func TestNewBundleFromArchive(t *testing.T) { } checkArchive := func(t *testing.T, arc *lib.Archive, rtOpts lib.RuntimeOptions, expError string) { - b, err := NewBundleFromArchive(logger, arc, rtOpts, metrics.NewRegistry()) + b, err := NewBundleFromArchive(getTestPreInitState(t, logger, &rtOpts), arc) if expError != "" { require.Error(t, err) require.Contains(t, err.Error(), expError) @@ -572,7 +587,7 @@ func TestNewBundleFromArchive(t *testing.T) { PwdURL: &url.URL{Scheme: "file", Path: "/"}, Filesystems: nil, } - b, err := NewBundleFromArchive(logger, arc, lib.RuntimeOptions{}, metrics.NewRegistry()) + b, err := NewBundleFromArchive(getTestPreInitState(t, logger, nil), arc) require.NoError(t, err) bi, err := b.Instantiate(logger, 0) require.NoError(t, err) @@ -711,7 +726,7 @@ func TestOpen(t *testing.T) { } require.NoError(t, err) - arcBundle, err := NewBundleFromArchive(logger, sourceBundle.makeArchive(), lib.RuntimeOptions{}, metrics.NewRegistry()) + arcBundle, err := NewBundleFromArchive(getTestPreInitState(t, logger, nil), sourceBundle.makeArchive()) require.NoError(t, err) @@ -811,7 +826,7 @@ func TestBundleEnv(t *testing.T) { require.NoError(t, err) logger := testutils.NewLogger(t) - b2, err := NewBundleFromArchive(logger, b1.makeArchive(), lib.RuntimeOptions{}, metrics.NewRegistry()) + b2, err := NewBundleFromArchive(getTestPreInitState(t, logger, nil), b1.makeArchive()) require.NoError(t, err) bundles := map[string]*Bundle{"Source": b1, "Archive": b2} @@ -848,7 +863,7 @@ func TestBundleNotSharable(t *testing.T) { require.NoError(t, err) logger := testutils.NewLogger(t) - b2, err := NewBundleFromArchive(logger, b1.makeArchive(), lib.RuntimeOptions{}, metrics.NewRegistry()) + b2, err := NewBundleFromArchive(getTestPreInitState(t, logger, nil), b1.makeArchive()) require.NoError(t, err) bundles := map[string]*Bundle{"Source": b1, "Archive": b2} diff --git a/js/console_test.go b/js/console_test.go index 14917cca408..cdb5760a491 100644 --- a/js/console_test.go +++ b/js/console_test.go @@ -58,12 +58,14 @@ func getSimpleRunner(tb testing.TB, filename, data string, opts ...interface{}) rtOpts = opt case *logrus.Logger: logger = opt + default: + tb.Fatalf("unknown test option %q", opt) } } registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) return New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, RuntimeOptions: rtOpts, BuiltinMetrics: builtinMetrics, @@ -83,8 +85,9 @@ func extractLogger(fl logrus.FieldLogger) *logrus.Logger { return e.Logger case *logrus.Logger: return e + default: + panic(fmt.Sprintf("unknown logrus.FieldLogger option %q", fl)) } - return nil } func TestConsoleLogWithGojaNativeObject(t *testing.T) { diff --git a/js/init_and_modules_test.go b/js/init_and_modules_test.go index cf74a7b0115..73014711fe5 100644 --- a/js/init_and_modules_test.go +++ b/js/init_and_modules_test.go @@ -78,7 +78,7 @@ func TestNewJSRunnerWithCustomModule(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, @@ -115,7 +115,7 @@ func TestNewJSRunnerWithCustomModule(t *testing.T) { assert.Equal(t, checkModule.vuCtxCalled, 2) runnerFromArc, err := js.NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, diff --git a/js/module_loading_test.go b/js/module_loading_test.go index fdd722f49b5..b32d8d33a94 100644 --- a/js/module_loading_test.go +++ b/js/module_loading_test.go @@ -109,7 +109,7 @@ func TestLoadOnceGlobalVars(t *testing.T) { arc := r1.MakeArchive() registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - r2, err := NewFromArchive(&lib.RuntimeState{ + r2, err := NewFromArchive(&lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -167,7 +167,7 @@ func TestLoadExportsIsUsableInModule(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -222,7 +222,7 @@ func TestLoadDoesntBreakHTTPGet(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -275,7 +275,7 @@ func TestLoadGlobalVarsAreNotSharedBetweenVUs(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -347,7 +347,7 @@ func TestLoadCycle(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -417,7 +417,7 @@ func TestLoadCycleBinding(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -489,7 +489,7 @@ func TestBrowserified(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -540,7 +540,7 @@ func TestLoadingUnexistingModuleDoesntPanic(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -582,7 +582,7 @@ func TestLoadingSourceMapsDoesntErrorOut(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -642,7 +642,7 @@ func TestOptionsAreGloballyReadable(t *testing.T) { arc := r1.MakeArchive() registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - r2, err := NewFromArchive(&lib.RuntimeState{ + r2, err := NewFromArchive(&lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -701,7 +701,7 @@ func TestOptionsAreNotGloballyWritable(t *testing.T) { arc := r1.MakeArchive() registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - r2, err := NewFromArchive(&lib.RuntimeState{ + r2, err := NewFromArchive(&lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, diff --git a/js/modules/k6/marshalling_test.go b/js/modules/k6/marshalling_test.go index da47142f58f..62be5f04429 100644 --- a/js/modules/k6/marshalling_test.go +++ b/js/modules/k6/marshalling_test.go @@ -119,7 +119,7 @@ func TestSetupDataMarshalling(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, diff --git a/js/runner.go b/js/runner.go index d88d3e4bb23..5154abe4792 100644 --- a/js/runner.go +++ b/js/runner.go @@ -38,7 +38,6 @@ import ( "github.com/dop251/goja" "github.com/oxtoacart/bpool" - "github.com/sirupsen/logrus" "github.com/spf13/afero" "golang.org/x/net/http2" "golang.org/x/time/rate" @@ -65,11 +64,9 @@ var _ lib.Runner = &Runner{} var nameToCertWarning sync.Once type Runner struct { - Bundle *Bundle - Logger *logrus.Logger - defaultGroup *lib.Group - builtinMetrics *metrics.BuiltinMetrics - registry *metrics.Registry + Bundle *Bundle + preInitState *lib.TestPreInitState + defaultGroup *lib.Group BaseDialer net.Dialer Resolver netext.Resolver @@ -79,34 +76,30 @@ type Runner struct { console *console setupData []byte - - keylogger io.Writer } -// New returns a new Runner for the provide source -func New( - rs *lib.RuntimeState, src *loader.SourceData, filesystems map[string]afero.Fs, -) (*Runner, error) { - bundle, err := NewBundle(rs.Logger, src, filesystems, rs.RuntimeOptions, rs.Registry) +// New returns a new Runner for the provided source +func New(piState *lib.TestPreInitState, src *loader.SourceData, filesystems map[string]afero.Fs) (*Runner, error) { + bundle, err := NewBundle(piState, src, filesystems) if err != nil { return nil, err } - return NewFromBundle(rs, bundle) + return NewFromBundle(piState, bundle) } // NewFromArchive returns a new Runner from the source in the provided archive -func NewFromArchive(rs *lib.RuntimeState, arc *lib.Archive) (*Runner, error) { - bundle, err := NewBundleFromArchive(rs.Logger, arc, rs.RuntimeOptions, rs.Registry) +func NewFromArchive(piState *lib.TestPreInitState, arc *lib.Archive) (*Runner, error) { + bundle, err := NewBundleFromArchive(piState, arc) if err != nil { return nil, err } - return NewFromBundle(rs, bundle) + return NewFromBundle(piState, bundle) } // NewFromBundle returns a new Runner from the provided Bundle -func NewFromBundle(rs *lib.RuntimeState, b *Bundle) (*Runner, error) { +func NewFromBundle(piState *lib.TestPreInitState, b *Bundle) (*Runner, error) { defaultGroup, err := lib.NewGroup("", nil) if err != nil { return nil, err @@ -115,20 +108,17 @@ func NewFromBundle(rs *lib.RuntimeState, b *Bundle) (*Runner, error) { defDNS := types.DefaultDNSConfig() r := &Runner{ Bundle: b, - Logger: rs.Logger, + preInitState: piState, defaultGroup: defaultGroup, BaseDialer: net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, }, - console: newConsole(rs.Logger), + console: newConsole(piState.Logger), Resolver: netext.NewResolver( net.LookupIP, 0, defDNS.Select.DNSSelect, defDNS.Policy.DNSPolicy), ActualResolver: net.LookupIP, - builtinMetrics: rs.BuiltinMetrics, - registry: rs.Registry, - keylogger: rs.KeyLogger, } err = r.SetOptions(r.Bundle.Options) @@ -152,7 +142,7 @@ func (r *Runner) NewVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.Sampl //nolint:funlen func (r *Runner) newVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.SampleContainer) (*VU, error) { // Instantiate a new bundle, make a VU out of it. - bi, err := r.Bundle.Instantiate(r.Logger, idLocal) + bi, err := r.Bundle.Instantiate(r.preInitState.Logger, idLocal) if err != nil { return nil, err } @@ -203,15 +193,17 @@ func (r *Runner) newVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.Sampl MaxVersion: uint16(tlsVersions.Max), Certificates: certs, Renegotiation: tls.RenegotiateFreelyAsClient, - KeyLogWriter: r.keylogger, + KeyLogWriter: r.preInitState.KeyLogger, } // Follow NameToCertificate in https://pkg.go.dev/crypto/tls@go1.17.6#Config, leave this field nil // when it is empty if len(nameToCert) > 0 { nameToCertWarning.Do(func() { - r.Logger.Warn("tlsAuth.domains option could be removed in the next releases, it's recommended to leave it empty " + - "and let k6 automatically detect from the provided certificate. It follows the Go's NameToCertificate " + - "deprecation - https://pkg.go.dev/crypto/tls@go1.17#Config.") + r.preInitState.Logger.Warn( + "tlsAuth.domains option could be removed in the next releases, it's recommended to leave it empty " + + "and let k6 automatically detect from the provided certificate. It follows the Go's NameToCertificate " + + "deprecation - https://pkg.go.dev/crypto/tls@go1.17#Config.", + ) }) //nolint:staticcheck // ignore SA1019 we can deprecate it but we have to continue to support the previous code. tlsConfig.NameToCertificate = nameToCert @@ -254,7 +246,7 @@ func (r *Runner) newVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.Sampl } vu.state = &lib.State{ - Logger: vu.Runner.Logger, + Logger: vu.Runner.preInitState.Logger, Options: vu.Runner.Bundle.Options, Transport: vu.Transport, Dialer: vu.Dialer, @@ -267,7 +259,7 @@ func (r *Runner) newVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.Sampl Samples: vu.Samples, Tags: lib.NewTagMap(vu.Runner.Bundle.Options.RunTags.CloneTags()), Group: r.defaultGroup, - BuiltinMetrics: r.builtinMetrics, + BuiltinMetrics: r.preInitState.BuiltinMetrics, } vu.moduleVUImpl.state = vu.state _ = vu.Runtime.Set("console", vu.Console) @@ -442,7 +434,7 @@ func (r *Runner) SetOptions(opts lib.Options) error { // TODO: validate that all exec values are either nil or valid exported methods (or HTTP requests in the future) if opts.ConsoleOutput.Valid { - c, err := newFileConsole(opts.ConsoleOutput.String, r.Logger.Formatter) + c, err := newFileConsole(opts.ConsoleOutput.String, r.preInitState.Logger.Formatter) if err != nil { return err } @@ -819,7 +811,7 @@ func (u *VU) runFn( sampleTags := metrics.NewSampleTags(u.state.CloneTags()) u.state.Samples <- u.Dialer.GetTrail( - startTime, endTime, isFullIteration, isDefault, sampleTags, u.Runner.builtinMetrics) + startTime, endTime, isFullIteration, isDefault, sampleTags, u.Runner.preInitState.BuiltinMetrics) return v, isFullIteration, endTime.Sub(startTime), err } diff --git a/js/runner_test.go b/js/runner_test.go index 66913ec2b00..821af0c65b6 100644 --- a/js/runner_test.go +++ b/js/runner_test.go @@ -110,7 +110,7 @@ func TestRunnerGetDefaultGroup(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -127,7 +127,7 @@ func TestRunnerOptions(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -217,7 +217,7 @@ func TestOptionsPropagationToScript(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -310,15 +310,17 @@ func TestSetupDataIsolation(t *testing.T) { options := runner.GetOptions() require.Empty(t, options.Validate()) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, testutils.NewLogger(t)) + testRunState := &lib.TestRunState{ + TestPreInitState: runner.preInitState, + Options: options, + Runner: runner, + } + + execScheduler, err := local.NewExecutionScheduler(testRunState) require.NoError(t, err) mockOutput := mockoutput.New() - engine, err := core.NewEngine( - execScheduler, options, lib.RuntimeOptions{}, []output.Output{mockOutput}, testutils.NewLogger(t), registry, - ) + engine, err := core.NewEngine(testRunState, execScheduler, []output.Output{mockOutput}) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) defer engine.OutputManager.StopOutputs() @@ -511,7 +513,7 @@ func TestRunnerIntegrationImports(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -548,7 +550,7 @@ func TestVURunContext(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -599,7 +601,7 @@ func TestVURunInterrupt(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -641,7 +643,7 @@ func TestVURunInterruptDoesntPanic(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -706,7 +708,7 @@ func TestVUIntegrationGroups(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -766,7 +768,7 @@ func TestVUIntegrationMetrics(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -849,7 +851,7 @@ func TestVUIntegrationInsecureRequests(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -860,7 +862,7 @@ func TestVUIntegrationInsecureRequests(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - r.Logger, _ = logtest.NewNullLogger() + r.preInitState.Logger, _ = logtest.NewNullLogger() initVU, err := r.NewVU(1, 1, make(chan metrics.SampleContainer, 100)) require.NoError(t, err) @@ -900,7 +902,7 @@ func TestVUIntegrationBlacklistOption(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -941,7 +943,7 @@ func TestVUIntegrationBlacklistScript(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -984,7 +986,7 @@ func TestVUIntegrationBlockHostnamesOption(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1027,7 +1029,7 @@ func TestVUIntegrationBlockHostnamesScript(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1081,7 +1083,7 @@ func TestVUIntegrationHosts(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1157,7 +1159,7 @@ func TestVUIntegrationTLSConfig(t *testing.T) { require.NoError(t, r1.SetOptions(lib.Options{Throw: null.BoolFrom(true)}.Apply(data.opts))) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1169,7 +1171,7 @@ func TestVUIntegrationTLSConfig(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - r.Logger, _ = logtest.NewNullLogger() + r.preInitState.Logger, _ = logtest.NewNullLogger() initVU, err := r.NewVU(1, 1, make(chan metrics.SampleContainer, 100)) require.NoError(t, err) @@ -1328,7 +1330,7 @@ func TestVUIntegrationCookiesReset(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1388,7 +1390,7 @@ func TestVUIntegrationCookiesNoReset(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1428,7 +1430,7 @@ func TestVUIntegrationVUID(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1584,7 +1586,7 @@ func TestVUIntegrationClientCerts(t *testing.T) { } require.NoError(t, r1.SetOptions(opt)) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1596,7 +1598,7 @@ func TestVUIntegrationClientCerts(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - r.Logger, _ = logtest.NewNullLogger() + r.preInitState.Logger, _ = logtest.NewNullLogger() initVU, err := r.NewVU(1, 1, make(chan metrics.SampleContainer, 100)) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1749,7 +1751,7 @@ func TestArchiveRunningIntegrity(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1794,7 +1796,7 @@ func TestArchiveNotPanicking(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -2007,7 +2009,7 @@ func TestVUPanic(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -2075,7 +2077,7 @@ func runMultiFileTestCase(t *testing.T, tc multiFileTestCase, tb *httpmultibin.H registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, @@ -2117,7 +2119,7 @@ func runMultiFileTestCase(t *testing.T, tc multiFileTestCase, tb *httpmultibin.H arc := runner.MakeArchive() runnerFromArc, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, @@ -2342,7 +2344,7 @@ func TestForceHTTP1Feature(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -2438,9 +2440,13 @@ func TestExecutionInfo(t *testing.T) { initVU, err := r.NewVU(1, 10, samples) require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(r, builtinMetrics, testutils.NewLogger(t)) + testRunState := &lib.TestRunState{ + TestPreInitState: r.preInitState, + Options: r.GetOptions(), + Runner: r, + } + + execScheduler, err := local.NewExecutionScheduler(testRunState) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) diff --git a/js/share_test.go b/js/share_test.go index be5bae83326..a5efaf6d904 100644 --- a/js/share_test.go +++ b/js/share_test.go @@ -85,7 +85,7 @@ exports.default = function() { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, diff --git a/lib/execution.go b/lib/execution.go index dd8959b5828..6dc109312aa 100644 --- a/lib/execution.go +++ b/lib/execution.go @@ -141,19 +141,18 @@ const ( // around pausing, and uninitializedUnplannedVUs for restricting the number of // unplanned VUs being initialized. type ExecutionState struct { - // A copy of the options, so the different executors have access to them. - // They will need to access things like the current execution segment, the - // per-run metrics tags, etc. + // A portal to the broader test run state, so the different executors have + // access to the test options, built-in metrics, etc.. They will need to + // access things like the current execution segment, the per-run metrics + // tags, different metrics to emit, etc. // - // Obviously, they are not meant to be changed... They should be a constant - // during the execution of a single test, but we can't easily enforce that - // via the Go type system... - Options Options + // Obviously, things here are not meant to be changed... They should be a + // constant during the execution of a single test, but we can't easily + // enforce that via the Go type system... + Test *TestRunState ExecutionTuple *ExecutionTuple // TODO Rename, possibly move - BuiltinMetrics *metrics.BuiltinMetrics - // vus is the shared channel buffer that contains all of the VUs that have // been initialized and aren't currently being used by a executor. // @@ -276,8 +275,7 @@ type ExecutionState struct { // with zeros. It also makes sure that the initial state is unpaused, by // setting resumeNotify to an already closed channel. func NewExecutionState( - options Options, et *ExecutionTuple, builtinMetrics *metrics.BuiltinMetrics, - maxPlannedVUs, maxPossibleVUs uint64, + testRunState *TestRunState, et *ExecutionTuple, maxPlannedVUs, maxPossibleVUs uint64, ) *ExecutionState { resumeNotify := make(chan struct{}) close(resumeNotify) // By default the ExecutionState starts unpaused @@ -286,9 +284,8 @@ func NewExecutionState( segIdx := NewSegmentedIndex(et) return &ExecutionState{ - Options: options, + Test: testRunState, ExecutionTuple: et, - BuiltinMetrics: builtinMetrics, vus: make(chan InitializedVU, maxPossibleVUs), diff --git a/lib/executor/base_executor.go b/lib/executor/base_executor.go index 830abf234f7..e8e57cddf0e 100644 --- a/lib/executor/base_executor.go +++ b/lib/executor/base_executor.go @@ -93,11 +93,11 @@ func (bs *BaseExecutor) GetProgress() *pb.ProgressBar { // getMetricTags returns a tag set that can be used to emit metrics by the // executor. The VU ID is optional. func (bs *BaseExecutor) getMetricTags(vuID *uint64) *metrics.SampleTags { - tags := bs.executionState.Options.RunTags.CloneTags() - if bs.executionState.Options.SystemTags.Has(metrics.TagScenario) { + tags := bs.executionState.Test.Options.RunTags.CloneTags() + if bs.executionState.Test.Options.SystemTags.Has(metrics.TagScenario) { tags["scenario"] = bs.config.GetName() } - if vuID != nil && bs.executionState.Options.SystemTags.Has(metrics.TagVU) { + if vuID != nil && bs.executionState.Test.Options.SystemTags.Has(metrics.TagVU) { tags["vu"] = strconv.FormatUint(*vuID, 10) } return metrics.IntoSampleTags(&tags) diff --git a/lib/executor/common_test.go b/lib/executor/common_test.go index b092b1204f4..a1cc917c343 100644 --- a/lib/executor/common_test.go +++ b/lib/executor/common_test.go @@ -42,7 +42,25 @@ func simpleRunner(vuFn func(context.Context, *lib.State) error) lib.Runner { } } -func setupExecutor(t testing.TB, config lib.ExecutorConfig, es *lib.ExecutionState, runner lib.Runner) ( +func getTestRunState(tb testing.TB, options lib.Options, runner lib.Runner) *lib.TestRunState { + reg := metrics.NewRegistry() + piState := &lib.TestPreInitState{ + Logger: testutils.NewLogger(tb), + RuntimeOptions: lib.RuntimeOptions{}, + Registry: reg, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(reg), + } + + require.NoError(tb, runner.SetOptions(options)) + + return &lib.TestRunState{ + TestPreInitState: piState, + Options: options, + Runner: runner, + } +} + +func setupExecutor(t testing.TB, config lib.ExecutorConfig, es *lib.ExecutionState) ( context.Context, context.CancelFunc, lib.Executor, *testutils.SimpleLogrusHook, ) { ctx, cancel := context.WithCancel(context.Background()) @@ -56,7 +74,7 @@ func setupExecutor(t testing.TB, config lib.ExecutorConfig, es *lib.ExecutionSta initVUFunc := func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { idl, idg := es.GetUniqueVUIdentifiers() - return runner.NewVU(idl, idg, engineOut) + return es.Test.Runner.NewVU(idl, idg, engineOut) } es.SetInitVUFunc(initVUFunc) @@ -83,3 +101,54 @@ func initializeVUs( es.AddInitializedVU(vu) } } + +type executorTest struct { + options lib.Options + state *lib.ExecutionState + + ctx context.Context //nolint + cancel context.CancelFunc + executor lib.Executor + logHook *testutils.SimpleLogrusHook +} + +func setupExecutorTest( + t testing.TB, segmentStr, sequenceStr string, extraOptions lib.Options, + runner lib.Runner, config lib.ExecutorConfig, +) *executorTest { + var err error + var segment *lib.ExecutionSegment + if segmentStr != "" { + segment, err = lib.NewExecutionSegmentFromString(segmentStr) + require.NoError(t, err) + } + + var sequence lib.ExecutionSegmentSequence + if sequenceStr != "" { + sequence, err = lib.NewExecutionSegmentSequenceFromString(sequenceStr) + require.NoError(t, err) + } + + et, err := lib.NewExecutionTuple(segment, &sequence) + require.NoError(t, err) + + options := lib.Options{ + ExecutionSegment: segment, + ExecutionSegmentSequence: &sequence, + }.Apply(runner.GetOptions()).Apply(extraOptions) + + testRunState := getTestRunState(t, options, runner) + + execReqs := config.GetExecutionRequirements(et) + es := lib.NewExecutionState(testRunState, et, lib.GetMaxPlannedVUs(execReqs), lib.GetMaxPossibleVUs(execReqs)) + ctx, cancel, executor, logHook := setupExecutor(t, config, es) + + return &executorTest{ + options: options, + state: es, + ctx: ctx, + cancel: cancel, + executor: executor, + logHook: logHook, + } +} diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go index c97cdfcf985..070b9916b37 100644 --- a/lib/executor/constant_arrival_rate.go +++ b/lib/executor/constant_arrival_rate.go @@ -337,7 +337,7 @@ func (car ConstantArrivalRate) Run(parentCtx context.Context, out chan<- metrics int64(car.config.TimeUnit.TimeDuration()), )).TimeDuration() - droppedIterationMetric := car.executionState.BuiltinMetrics.DroppedIterations + droppedIterationMetric := car.executionState.Test.BuiltinMetrics.DroppedIterations shownWarning := false metricTags := car.getMetricTags(nil) for li, gi := 0, start; ; li, gi = li+1, gi+offsets[li%len(offsets)] { diff --git a/lib/executor/constant_arrival_rate_test.go b/lib/executor/constant_arrival_rate_test.go index 7bca39fa9d8..f1315efb02f 100644 --- a/lib/executor/constant_arrival_rate_test.go +++ b/lib/executor/constant_arrival_rate_test.go @@ -34,7 +34,6 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" "go.k6.io/k6/metrics" ) @@ -68,24 +67,18 @@ func getTestConstantArrivalRateConfig() *ConstantArrivalRateConfig { func TestConstantArrivalRateRunNotEnoughAllocatedVUsWarn(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, getTestConstantArrivalRateConfig(), es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - time.Sleep(time.Second) - return nil - }), - ) - defer cancel() + + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + time.Sleep(time.Second) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestConstantArrivalRateConfig()) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) - entries := logHook.Drain() + require.NoError(t, test.executor.Run(test.ctx, engineOut)) + entries := test.logHook.Drain() require.NotEmpty(t, entries) for _, entry := range entries { require.Equal(t, @@ -97,20 +90,16 @@ func TestConstantArrivalRateRunNotEnoughAllocatedVUsWarn(t *testing.T) { func TestConstantArrivalRateRunCorrectRate(t *testing.T) { t.Parallel() + var count int64 - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, getTestConstantArrivalRateConfig(), es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - atomic.AddInt64(&count, 1) - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + atomic.AddInt64(&count, 1) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestConstantArrivalRateConfig()) + defer test.cancel() + var wg sync.WaitGroup wg.Add(1) go func() { @@ -125,114 +114,108 @@ func TestConstantArrivalRateRunCorrectRate(t *testing.T) { } }() engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) wg.Wait() - require.NoError(t, err) - require.Empty(t, logHook.Drain()) + require.Empty(t, test.logHook.Drain()) } //nolint:tparallel,paralleltest // this is flaky if ran with other tests func TestConstantArrivalRateRunCorrectTiming(t *testing.T) { // t.Parallel() tests := []struct { - segment *lib.ExecutionSegment - sequence *lib.ExecutionSegmentSequence + segment string + sequence string start time.Duration steps []int64 }{ { - segment: newExecutionSegmentFromString("0:1/3"), + segment: "0:1/3", start: time.Millisecond * 20, steps: []int64{40, 60, 60, 60, 60, 60, 60}, }, { - segment: newExecutionSegmentFromString("1/3:2/3"), + segment: "1/3:2/3", start: time.Millisecond * 20, steps: []int64{60, 60, 60, 60, 60, 60, 40}, }, { - segment: newExecutionSegmentFromString("2/3:1"), + segment: "2/3:1", start: time.Millisecond * 20, steps: []int64{40, 60, 60, 60, 60, 60, 60}, }, { - segment: newExecutionSegmentFromString("1/6:3/6"), + segment: "1/6:3/6", start: time.Millisecond * 20, steps: []int64{40, 80, 40, 80, 40, 80, 40}, }, { - segment: newExecutionSegmentFromString("1/6:3/6"), - sequence: newExecutionSegmentSequenceFromString("1/6,3/6"), + segment: "1/6:3/6", + sequence: "1/6,3/6", start: time.Millisecond * 20, steps: []int64{40, 80, 40, 80, 40, 80, 40}, }, // sequences { - segment: newExecutionSegmentFromString("0:1/3"), - sequence: newExecutionSegmentSequenceFromString("0,1/3,2/3,1"), + segment: "0:1/3", + sequence: "0,1/3,2/3,1", start: time.Millisecond * 0, steps: []int64{60, 60, 60, 60, 60, 60, 40}, }, { - segment: newExecutionSegmentFromString("1/3:2/3"), - sequence: newExecutionSegmentSequenceFromString("0,1/3,2/3,1"), + segment: "1/3:2/3", + sequence: "0,1/3,2/3,1", start: time.Millisecond * 20, steps: []int64{60, 60, 60, 60, 60, 60, 40}, }, { - segment: newExecutionSegmentFromString("2/3:1"), - sequence: newExecutionSegmentSequenceFromString("0,1/3,2/3,1"), + segment: "2/3:1", + sequence: "0,1/3,2/3,1", start: time.Millisecond * 40, steps: []int64{60, 60, 60, 60, 60, 100}, }, } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) for _, test := range tests { test := test t.Run(fmt.Sprintf("segment %s sequence %s", test.segment, test.sequence), func(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(test.segment, test.sequence) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{ - ExecutionSegment: test.segment, - ExecutionSegmentSequence: test.sequence, - }, et, builtinMetrics, 10, 50) + var count int64 - seconds := 2 + startTime := time.Now() + expectedTimeInt64 := int64(test.start) + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + current := atomic.AddInt64(&count, 1) + + expectedTime := test.start + if current != 1 { + expectedTime = time.Duration(atomic.AddInt64(&expectedTimeInt64, + int64(time.Millisecond)*test.steps[(current-2)%int64(len(test.steps))])) + } + + // FIXME: replace this check with a unit test asserting that the scheduling is correct, + // without depending on the execution time itself + assert.WithinDuration(t, + startTime.Add(expectedTime), + time.Now(), + time.Millisecond*24, + "%d expectedTime %s", current, expectedTime, + ) + + return nil + }) + config := getTestConstantArrivalRateConfig() + seconds := 2 config.Duration.Duration = types.Duration(time.Second * time.Duration(seconds)) - newET, err := es.ExecutionTuple.GetNewExecutionTupleFromValue(config.MaxVUs.Int64) + execTest := setupExecutorTest( + t, test.segment, test.sequence, lib.Options{}, runner, config, + ) + defer execTest.cancel() + + newET, err := execTest.state.ExecutionTuple.GetNewExecutionTupleFromValue(config.MaxVUs.Int64) require.NoError(t, err) rateScaled := newET.ScaleInt64(config.Rate.Int64) - startTime := time.Now() - expectedTimeInt64 := int64(test.start) - ctx, cancel, executor, logHook := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - current := atomic.AddInt64(&count, 1) - - expectedTime := test.start - if current != 1 { - expectedTime = time.Duration(atomic.AddInt64(&expectedTimeInt64, - int64(time.Millisecond)*test.steps[(current-2)%int64(len(test.steps))])) - } - - // FIXME: replace this check with a unit test asserting that the scheduling is correct, - // without depending on the execution time itself - assert.WithinDuration(t, - startTime.Add(expectedTime), - time.Now(), - time.Millisecond*24, - "%d expectedTime %s", current, expectedTime, - ) - - return nil - }), - ) - defer cancel() var wg sync.WaitGroup wg.Add(1) go func() { @@ -248,10 +231,10 @@ func TestConstantArrivalRateRunCorrectTiming(t *testing.T) { }() startTime = time.Now() engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) + err = execTest.executor.Run(execTest.ctx, engineOut) wg.Wait() require.NoError(t, err) - require.Empty(t, logHook.Drain()) + require.Empty(t, execTest.logHook.Drain()) }) } } @@ -263,8 +246,6 @@ func TestArrivalRateCancel(t *testing.T) { "constant": getTestConstantArrivalRateConfig(), "ramping": getTestRampingArrivalRateConfig(), } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) for name, config := range testCases { config := config t.Run(name, func(t *testing.T) { @@ -272,32 +253,31 @@ func TestArrivalRateCancel(t *testing.T) { ch := make(chan struct{}) errCh := make(chan error, 1) weAreDoneCh := make(chan struct{}) - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, config, es, simpleRunner(func(ctx context.Context, _ *lib.State) error { - select { - case <-ch: - <-ch - default: - } - return nil - })) - defer cancel() + + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + select { + case <-ch: + <-ch + default: + } + return nil + }) + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() engineOut := make(chan metrics.SampleContainer, 1000) - errCh <- executor.Run(ctx, engineOut) + errCh <- test.executor.Run(test.ctx, engineOut) close(weAreDoneCh) }() time.Sleep(time.Second) ch <- struct{}{} - cancel() + test.cancel() time.Sleep(time.Second) select { case <-weAreDoneCh: @@ -308,7 +288,7 @@ func TestArrivalRateCancel(t *testing.T) { <-weAreDoneCh wg.Wait() require.NoError(t, <-errCh) - require.Empty(t, logHook.Drain()) + require.Empty(t, test.logHook.Drain()) }) } } @@ -316,8 +296,6 @@ func TestArrivalRateCancel(t *testing.T) { func TestConstantArrivalRateDroppedIterations(t *testing.T) { t.Parallel() var count int64 - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) config := &ConstantArrivalRateConfig{ BaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(0 * time.Second)}, @@ -328,22 +306,17 @@ func TestConstantArrivalRateDroppedIterations(t *testing.T) { MaxVUs: null.IntFrom(5), } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - atomic.AddInt64(&count, 1) - <-ctx.Done() - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + atomic.AddInt64(&count, 1) + <-ctx.Done() + return nil + }) + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) - logs := logHook.Drain() + require.NoError(t, test.executor.Run(test.ctx, engineOut)) + logs := test.logHook.Drain() require.Len(t, logs, 1) assert.Contains(t, logs[0].Message, "cannot initialize more") assert.Equal(t, int64(5), count) @@ -371,36 +344,24 @@ func TestConstantArrivalRateGlobalIters(t *testing.T) { {"0,1/4,3/4,1", "3/4:1", []uint64{3, 8, 13, 18}}, } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) for _, tc := range testCases { tc := tc t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { t.Parallel() - ess, err := lib.NewExecutionSegmentSequenceFromString(tc.seq) - require.NoError(t, err) - seg, err := lib.NewExecutionSegmentFromString(tc.seg) - require.NoError(t, err) - et, err := lib.NewExecutionTuple(seg, &ess) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 5, 5) - - runner := &minirunner.MiniRunner{} - ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) - defer cancel() gotIters := []uint64{} var mx sync.Mutex - runner.Fn = func(ctx context.Context, state *lib.State, _ chan<- metrics.SampleContainer) error { + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { mx.Lock() gotIters = append(gotIters, state.GetScenarioGlobalVUIter()) mx.Unlock() return nil - } + }) + test := setupExecutorTest(t, tc.seg, tc.seq, lib.Options{}, runner, config) + defer test.cancel() engineOut := make(chan metrics.SampleContainer, 100) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) assert.Equal(t, tc.expIters, gotIters) }) } diff --git a/lib/executor/constant_vus_test.go b/lib/executor/constant_vus_test.go index 4eb8306a171..f0c4fb8b56e 100644 --- a/lib/executor/constant_vus_test.go +++ b/lib/executor/constant_vus_test.go @@ -45,26 +45,23 @@ func getTestConstantVUsConfig() ConstantVUsConfig { func TestConstantVUsRun(t *testing.T) { t.Parallel() var result sync.Map - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, getTestConstantVUsConfig(), es, - simpleRunner(func(ctx context.Context, state *lib.State) error { - select { - case <-ctx.Done(): - return nil - default: - } - currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) - result.Store(state.VUID, currIter.(uint64)+1) - time.Sleep(210 * time.Millisecond) + + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { + select { + case <-ctx.Done(): return nil - }), - ) - defer cancel() - err = executor.Run(ctx, nil) - require.NoError(t, err) + default: + } + currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) + result.Store(state.VUID, currIter.(uint64)+1) //nolint:forcetypeassert + time.Sleep(210 * time.Millisecond) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestConstantVUsConfig()) + defer test.cancel() + + require.NoError(t, test.executor.Run(test.ctx, nil)) var totalIters uint64 result.Range(func(key, value interface{}) bool { diff --git a/lib/executor/execution_test.go b/lib/executor/execution_test.go index 44724531211..f3bb312c6c2 100644 --- a/lib/executor/execution_test.go +++ b/lib/executor/execution_test.go @@ -61,7 +61,7 @@ func TestExecutionStateVUIDs(t *testing.T) { require.NoError(t, err) start, offsets, _ := et.GetStripedOffsets() - es := lib.NewExecutionState(lib.Options{}, et, nil, 0, 0) + es := lib.NewExecutionState(nil, et, 0, 0) idl, idg := es.GetUniqueVUIdentifiers() assert.Equal(t, uint64(1), idl) @@ -102,7 +102,7 @@ func TestExecutionStateGettingVUsWhenNonAreAvailable(t *testing.T) { t.Parallel() et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 0, 0) + es := lib.NewExecutionState(nil, et, 0, 0) logHook := &testutils.SimpleLogrusHook{HookedLevels: []logrus.Level{logrus.WarnLevel}} testLog := logrus.New() testLog.AddHook(logHook) @@ -128,7 +128,7 @@ func TestExecutionStateGettingVUs(t *testing.T) { et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 20) + es := lib.NewExecutionState(nil, et, 10, 20) es.SetInitVUFunc(func(_ context.Context, _ *logrus.Entry) (lib.InitializedVU, error) { return &minirunner.VU{}, nil }) @@ -193,7 +193,7 @@ func TestMarkStartedPanicsOnSecondRun(t *testing.T) { t.Parallel() et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 0, 0) + es := lib.NewExecutionState(nil, et, 0, 0) require.False(t, es.HasStarted()) es.MarkStarted() require.True(t, es.HasStarted()) @@ -204,7 +204,7 @@ func TestMarkEnded(t *testing.T) { t.Parallel() et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 0, 0) + es := lib.NewExecutionState(nil, et, 0, 0) require.False(t, es.HasEnded()) es.MarkEnded() require.True(t, es.HasEnded()) diff --git a/lib/executor/externally_controlled_test.go b/lib/executor/externally_controlled_test.go index 28b33b40497..20d93588deb 100644 --- a/lib/executor/externally_controlled_test.go +++ b/lib/executor/externally_controlled_test.go @@ -48,20 +48,15 @@ func getTestExternallyControlledConfig() ExternallyControlledConfig { func TestExternallyControlledRun(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) - doneIters := new(uint64) - ctx, cancel, executor, _ := setupExecutor( - t, getTestExternallyControlledConfig(), es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - time.Sleep(200 * time.Millisecond) - atomic.AddUint64(doneIters, 1) - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + time.Sleep(200 * time.Millisecond) + atomic.AddUint64(doneIters, 1) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestExternallyControlledConfig()) + defer test.cancel() var ( wg sync.WaitGroup @@ -71,9 +66,9 @@ func TestExternallyControlledRun(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - es.MarkStarted() - errCh <- executor.Run(ctx, nil) - es.MarkEnded() + test.state.MarkStarted() + errCh <- test.executor.Run(test.ctx, nil) + test.state.MarkEnded() close(doneCh) }() @@ -83,7 +78,7 @@ func TestExternallyControlledRun(t *testing.T) { MaxVUs: null.IntFrom(maxVUs), Duration: types.NullDurationFrom(2 * time.Second), } - err := executor.(*ExternallyControlled).UpdateConfig(ctx, newConfig) + err := test.executor.(*ExternallyControlled).UpdateConfig(test.ctx, newConfig) //nolint:forcetypeassert if errMsg != "" { assert.EqualError(t, err, errMsg) } else { @@ -94,7 +89,7 @@ func TestExternallyControlledRun(t *testing.T) { var resultVUCount [][]int64 snapshot := func() { resultVUCount = append(resultVUCount, - []int64{es.GetCurrentlyActiveVUsCount(), es.GetInitializedVUsCount()}) + []int64{test.state.GetCurrentlyActiveVUsCount(), test.state.GetInitializedVUsCount()}) } wg.Add(1) diff --git a/lib/executor/per_vu_iterations.go b/lib/executor/per_vu_iterations.go index d609fcbbfdf..d90db1e2a03 100644 --- a/lib/executor/per_vu_iterations.go +++ b/lib/executor/per_vu_iterations.go @@ -218,7 +218,7 @@ func (pvi PerVUIterations) Run(parentCtx context.Context, out chan<- metrics.Sam activeVUs.Done() } - droppedIterationMetric := pvi.executionState.BuiltinMetrics.DroppedIterations + droppedIterationMetric := pvi.executionState.Test.BuiltinMetrics.DroppedIterations handleVU := func(initVU lib.InitializedVU) { defer handleVUsWG.Done() ctx, cancel := context.WithCancel(maxDurationCtx) diff --git a/lib/executor/per_vu_iterations_test.go b/lib/executor/per_vu_iterations_test.go index 517b76a49ea..4ee367d0c1e 100644 --- a/lib/executor/per_vu_iterations_test.go +++ b/lib/executor/per_vu_iterations_test.go @@ -49,23 +49,18 @@ func getTestPerVUIterationsConfig() PerVUIterationsConfig { func TestPerVUIterationsRun(t *testing.T) { t.Parallel() var result sync.Map - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, getTestPerVUIterationsConfig(), es, - simpleRunner(func(ctx context.Context, state *lib.State) error { - currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) - result.Store(state.VUID, currIter.(uint64)+1) - return nil - }), - ) - defer cancel() + + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { + currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) + result.Store(state.VUID, currIter.(uint64)+1) //nolint:forcetypeassert + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestPerVUIterationsConfig()) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) var totalIters uint64 result.Range(func(key, value interface{}) bool { @@ -85,26 +80,21 @@ func TestPerVUIterationsRunVariableVU(t *testing.T) { result sync.Map slowVUID = uint64(1) ) - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, getTestPerVUIterationsConfig(), es, - simpleRunner(func(ctx context.Context, state *lib.State) error { - if state.VUID == slowVUID { - time.Sleep(200 * time.Millisecond) - } - currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) - result.Store(state.VUID, currIter.(uint64)+1) - return nil - }), - ) - defer cancel() + + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { + if state.VUID == slowVUID { + time.Sleep(200 * time.Millisecond) + } + currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) + result.Store(state.VUID, currIter.(uint64)+1) //nolint:forcetypeassert + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestPerVUIterationsConfig()) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) val, ok := result.Load(slowVUID) assert.True(t, ok) @@ -128,8 +118,6 @@ func TestPerVUIterationsRunVariableVU(t *testing.T) { func TestPerVuIterationsEmitDroppedIterations(t *testing.T) { t.Parallel() var count int64 - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) config := PerVUIterationsConfig{ VUs: null.IntFrom(5), @@ -137,22 +125,18 @@ func TestPerVuIterationsEmitDroppedIterations(t *testing.T) { MaxDuration: types.NullDurationFrom(1 * time.Second), } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - atomic.AddInt64(&count, 1) - <-ctx.Done() - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + atomic.AddInt64(&count, 1) + <-ctx.Done() + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) - assert.Empty(t, logHook.Drain()) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) + assert.Empty(t, test.logHook.Drain()) assert.Equal(t, int64(5), count) assert.Equal(t, float64(95), sumMetricValues(engineOut, metrics.DroppedIterationsName)) } diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index 07f611e42a9..9cf473a64f4 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -457,7 +457,7 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- metrics shownWarning := false metricTags := varr.getMetricTags(nil) go varr.config.cal(varr.et, ch) - droppedIterationMetric := varr.executionState.BuiltinMetrics.DroppedIterations + droppedIterationMetric := varr.executionState.Test.BuiltinMetrics.DroppedIterations for nextTime := range ch { select { case <-regDurationDone: diff --git a/lib/executor/ramping_arrival_rate_test.go b/lib/executor/ramping_arrival_rate_test.go index 8d542ba709d..3ef99121614 100644 --- a/lib/executor/ramping_arrival_rate_test.go +++ b/lib/executor/ramping_arrival_rate_test.go @@ -35,7 +35,6 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" "go.k6.io/k6/metrics" ) @@ -66,23 +65,18 @@ func getTestRampingArrivalRateConfig() *RampingArrivalRateConfig { func TestRampingArrivalRateRunNotEnoughAllocatedVUsWarn(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, getTestRampingArrivalRateConfig(), es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - time.Sleep(time.Second) - return nil - }), - ) - defer cancel() + + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + time.Sleep(time.Second) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestRampingArrivalRateConfig()) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) - entries := logHook.Drain() + require.NoError(t, test.executor.Run(test.ctx, engineOut)) + entries := test.logHook.Drain() require.NotEmpty(t, entries) for _, entry := range entries { require.Equal(t, @@ -95,19 +89,14 @@ func TestRampingArrivalRateRunNotEnoughAllocatedVUsWarn(t *testing.T) { func TestRampingArrivalRateRunCorrectRate(t *testing.T) { t.Parallel() var count int64 - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, getTestRampingArrivalRateConfig(), es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - atomic.AddInt64(&count, 1) - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + atomic.AddInt64(&count, 1) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestRampingArrivalRateConfig()) + defer test.cancel() + var wg sync.WaitGroup wg.Add(1) go func() { @@ -128,19 +117,28 @@ func TestRampingArrivalRateRunCorrectRate(t *testing.T) { assert.InDelta(t, 50, currentCount, 3) }() engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) wg.Wait() - require.NoError(t, err) - require.Empty(t, logHook.Drain()) + require.Empty(t, test.logHook.Drain()) } func TestRampingArrivalRateRunUnplannedVUs(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 1, 3) + + config := &RampingArrivalRateConfig{ + TimeUnit: types.NullDurationFrom(time.Second), + Stages: []Stage{ + { + // the minus one makes it so only 9 iterations will be started instead of 10 + // as the 10th happens to be just at the end and sometimes doesn't get executed :( + Duration: types.NullDurationFrom(time.Second*2 - 1), + Target: null.IntFrom(10), + }, + }, + PreAllocatedVUs: null.IntFrom(1), + MaxVUs: null.IntFrom(3), + } + var count int64 ch := make(chan struct{}) // closed when new unplannedVU is started and signal to get to next iterations ch2 := make(chan struct{}) // closed when a second iteration was started on an old VU in order to test it won't start a second unplanned VU in parallel or at all @@ -154,24 +152,12 @@ func TestRampingArrivalRateRunUnplannedVUs(t *testing.T) { return nil }) - ctx, cancel, executor, logHook := setupExecutor( - t, &RampingArrivalRateConfig{ - TimeUnit: types.NullDurationFrom(time.Second), - Stages: []Stage{ - { - // the minus one makes it so only 9 iterations will be started instead of 10 - // as the 10th happens to be just at the end and sometimes doesn't get executed :( - Duration: types.NullDurationFrom(time.Second*2 - 1), - Target: null.IntFrom(10), - }, - }, - PreAllocatedVUs: null.IntFrom(1), - MaxVUs: null.IntFrom(3), - }, - es, runner) - defer cancel() + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - es.SetInitVUFunc(func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { + test.state.SetInitVUFunc(func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { cur := atomic.LoadInt64(&count) require.Equal(t, cur, int64(1)) time.Sleep(time.Second / 2) @@ -190,13 +176,12 @@ func TestRampingArrivalRateRunUnplannedVUs(t *testing.T) { time.Sleep(time.Millisecond * 200) cur = atomic.LoadInt64(&count) require.NotEqual(t, cur, int64(2)) - idl, idg := es.GetUniqueVUIdentifiers() + idl, idg := test.state.GetUniqueVUIdentifiers() return runner.NewVU(idl, idg, engineOut) }) - err = executor.Run(ctx, engineOut) - assert.NoError(t, err) - assert.Empty(t, logHook.Drain()) + assert.NoError(t, test.executor.Run(test.ctx, engineOut)) + assert.Empty(t, test.logHook.Drain()) droppedIters := sumMetricValues(engineOut, metrics.DroppedIterationsName) assert.Equal(t, count+int64(droppedIters), int64(9)) @@ -204,11 +189,19 @@ func TestRampingArrivalRateRunUnplannedVUs(t *testing.T) { func TestRampingArrivalRateRunCorrectRateWithSlowRate(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 1, 3) + + config := &RampingArrivalRateConfig{ + TimeUnit: types.NullDurationFrom(time.Second), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(time.Second * 2), + Target: null.IntFrom(10), + }, + }, + PreAllocatedVUs: null.IntFrom(1), + MaxVUs: null.IntFrom(3), + } + var count int64 ch := make(chan struct{}) // closed when new unplannedVU is started and signal to get to next iterations runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -219,22 +212,12 @@ func TestRampingArrivalRateRunCorrectRateWithSlowRate(t *testing.T) { return nil }) - ctx, cancel, executor, logHook := setupExecutor( - t, &RampingArrivalRateConfig{ - TimeUnit: types.NullDurationFrom(time.Second), - Stages: []Stage{ - { - Duration: types.NullDurationFrom(time.Second * 2), - Target: null.IntFrom(10), - }, - }, - PreAllocatedVUs: null.IntFrom(1), - MaxVUs: null.IntFrom(3), - }, - es, runner) - defer cancel() + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - es.SetInitVUFunc(func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { + test.state.SetInitVUFunc(func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { t.Log("init") cur := atomic.LoadInt64(&count) require.Equal(t, cur, int64(1)) @@ -244,56 +227,50 @@ func TestRampingArrivalRateRunCorrectRateWithSlowRate(t *testing.T) { cur = atomic.LoadInt64(&count) require.NotEqual(t, cur, int64(1)) - idl, idg := es.GetUniqueVUIdentifiers() + idl, idg := test.state.GetUniqueVUIdentifiers() return runner.NewVU(idl, idg, engineOut) }) - err = executor.Run(ctx, engineOut) - assert.NoError(t, err) - assert.Empty(t, logHook.Drain()) - assert.Equal(t, int64(0), es.GetCurrentlyActiveVUsCount()) - assert.Equal(t, int64(2), es.GetInitializedVUsCount()) + assert.NoError(t, test.executor.Run(test.ctx, engineOut)) + assert.Empty(t, test.logHook.Drain()) + assert.Equal(t, int64(0), test.state.GetCurrentlyActiveVUsCount()) + assert.Equal(t, int64(2), test.state.GetInitializedVUsCount()) } func TestRampingArrivalRateRunGracefulStop(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 10) + + config := &RampingArrivalRateConfig{ + TimeUnit: types.NullDurationFrom(1 * time.Second), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(2 * time.Second), + Target: null.IntFrom(10), + }, + }, + StartRate: null.IntFrom(10), + PreAllocatedVUs: null.IntFrom(10), + MaxVUs: null.IntFrom(10), + BaseConfig: BaseConfig{ + GracefulStop: types.NullDurationFrom(5 * time.Second), + }, + } runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { time.Sleep(5 * time.Second) return nil }) - ctx, cancel, executor, _ := setupExecutor( - t, &RampingArrivalRateConfig{ - TimeUnit: types.NullDurationFrom(1 * time.Second), - Stages: []Stage{ - { - Duration: types.NullDurationFrom(2 * time.Second), - Target: null.IntFrom(10), - }, - }, - StartRate: null.IntFrom(10), - PreAllocatedVUs: null.IntFrom(10), - MaxVUs: null.IntFrom(10), - BaseConfig: BaseConfig{ - GracefulStop: types.NullDurationFrom(5 * time.Second), - }, - }, - es, runner) - defer cancel() + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() engineOut := make(chan metrics.SampleContainer, 1000) defer close(engineOut) - err = executor.Run(ctx, engineOut) - assert.NoError(t, err) - assert.Equal(t, int64(0), es.GetCurrentlyActiveVUsCount()) - assert.Equal(t, int64(10), es.GetInitializedVUsCount()) - assert.Equal(t, uint64(10), es.GetFullIterationCount()) + assert.NoError(t, test.executor.Run(test.ctx, engineOut)) + assert.Equal(t, int64(0), test.state.GetCurrentlyActiveVUsCount()) + assert.Equal(t, int64(10), test.state.GetInitializedVUsCount()) + assert.Equal(t, uint64(10), test.state.GetFullIterationCount()) } func BenchmarkRampingArrivalRateRun(b *testing.B) { @@ -316,19 +293,18 @@ func BenchmarkRampingArrivalRateRun(b *testing.B) { } }() - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState( - lib.Options{}, mustNewExecutionTuple(nil, nil), builtinMetrics, - uint64(tc.prealloc.Int64), uint64(tc.prealloc.Int64), - ) - var count int64 runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { atomic.AddInt64(&count, 1) return nil }) + testRunState := getTestRunState(b, lib.Options{}, runner) + es := lib.NewExecutionState( + testRunState, mustNewExecutionTuple(nil, nil), + uint64(tc.prealloc.Int64), uint64(tc.prealloc.Int64), + ) + // an high target to get the highest rate target := int64(1e9) @@ -347,8 +323,7 @@ func BenchmarkRampingArrivalRateRun(b *testing.B) { }, PreAllocatedVUs: tc.prealloc, MaxVUs: tc.prealloc, - }, - es, runner) + }, es) defer cancel() b.ResetTimer() @@ -740,32 +715,21 @@ func TestRampingArrivalRateGlobalIters(t *testing.T) { tc := tc t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { t.Parallel() - ess, err := lib.NewExecutionSegmentSequenceFromString(tc.seq) - require.NoError(t, err) - seg, err := lib.NewExecutionSegmentFromString(tc.seg) - require.NoError(t, err) - et, err := lib.NewExecutionTuple(seg, &ess) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 5, 5) - - runner := &minirunner.MiniRunner{} - ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) - defer cancel() gotIters := []uint64{} var mx sync.Mutex - runner.Fn = func(ctx context.Context, state *lib.State, _ chan<- metrics.SampleContainer) error { + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { mx.Lock() gotIters = append(gotIters, state.GetScenarioGlobalVUIter()) mx.Unlock() return nil - } + }) + + test := setupExecutorTest(t, tc.seg, tc.seq, lib.Options{}, runner, config) + defer test.cancel() engineOut := make(chan metrics.SampleContainer, 100) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) assert.Equal(t, tc.expIters, gotIters) }) } @@ -787,13 +751,6 @@ func TestRampingArrivalRateCornerCase(t *testing.T) { et, err := lib.NewExecutionTuple(newExecutionSegmentFromString("1/5:2/5"), newExecutionSegmentSequenceFromString("0,1/5,2/5,1")) require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - - executor, err := config.NewExecutor(es, nil) - require.NoError(t, err) - require.False(t, executor.GetConfig().HasWork(et)) + require.False(t, config.HasWork(et)) } diff --git a/lib/executor/ramping_vus_test.go b/lib/executor/ramping_vus_test.go index bd38bafe965..fbf30495a94 100644 --- a/lib/executor/ramping_vus_test.go +++ b/lib/executor/ramping_vus_test.go @@ -82,21 +82,18 @@ func TestRampingVUsRun(t *testing.T) { } var iterCount int64 - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - // Sleeping for a weird duration somewhat offset from the - // executor ticks to hopefully keep race conditions out of - // our control from failing the test. - time.Sleep(300 * time.Millisecond) - atomic.AddInt64(&iterCount, 1) - return nil - }), - ) - defer cancel() + + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + // Sleeping for a weird duration somewhat offset from the + // executor ticks to hopefully keep race conditions out of + // our control from failing the test. + time.Sleep(300 * time.Millisecond) + atomic.AddInt64(&iterCount, 1) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() sampleTimes := []time.Duration{ 500 * time.Millisecond, @@ -105,12 +102,12 @@ func TestRampingVUsRun(t *testing.T) { } errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil) }() + go func() { errCh <- test.executor.Run(test.ctx, nil) }() result := make([]int64, len(sampleTimes)) for i, d := range sampleTimes { time.Sleep(d) - result[i] = es.GetCurrentlyActiveVUsCount() + result[i] = test.state.GetCurrentlyActiveVUsCount() } require.NoError(t, <-errCh) @@ -139,25 +136,22 @@ func TestRampingVUsGracefulStopWaits(t *testing.T) { stop = make(chan struct{}) // the itearation should stop ) - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - close(started) - defer close(stopped) - select { - case <-ctx.Done(): - t.Fatal("The iterations should've ended before the context") - case <-stop: - } - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + close(started) + defer close(stopped) + select { + case <-ctx.Done(): + t.Fatal("The iterations should've ended before the context") + case <-stop: + } + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil) }() + go func() { errCh <- test.executor.Run(test.ctx, nil) }() <-started // 500 milliseconds more then the duration and 500 less then the gracefulStop @@ -188,25 +182,22 @@ func TestRampingVUsGracefulStopStops(t *testing.T) { stop = make(chan struct{}) // the itearation should stop ) - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - close(started) - defer close(stopped) - select { - case <-ctx.Done(): - case <-stop: - t.Fatal("The iterations shouldn't have ended before the context") - } - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + close(started) + defer close(stopped) + select { + case <-ctx.Done(): + case <-stop: + t.Fatal("The iterations shouldn't have ended before the context") + } + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil) }() + go func() { errCh <- test.executor.Run(test.ctx, nil) }() <-started // 500 milliseconds more then the gracefulStop + duration @@ -242,29 +233,26 @@ func TestRampingVUsGracefulRampDown(t *testing.T) { stop = make(chan struct{}) // the itearation should stop ) - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, state *lib.State) error { - if state.VUID == 1 { // the first VU will wait here to do stuff - close(started) - defer close(stopped) - select { - case <-ctx.Done(): - t.Fatal("The iterations can't have ended before the context") - case <-stop: - } - } else { // all other (1) VUs will just sleep long enough - time.Sleep(2500 * time.Millisecond) + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { + if state.VUID == 1 { // the first VU will wait here to do stuff + close(started) + defer close(stopped) + select { + case <-ctx.Done(): + t.Fatal("The iterations can't have ended before the context") + case <-stop: } - return nil - }), - ) - defer cancel() + } else { // all other (1) VUs will just sleep long enough + time.Sleep(2500 * time.Millisecond) + } + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil) }() + go func() { errCh <- test.executor.Run(test.ctx, nil) }() <-started // 500 milliseconds more then the gracefulRampDown + duration @@ -333,7 +321,7 @@ func TestRampingVUsHandleRemainingVUs(t *testing.T) { gotVuInterrupted uint32 gotVuFinished uint32 ) - iteration := func(ctx context.Context, _ *lib.State) error { + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { select { case <-time.After(vuSleepDuration): atomic.AddUint32(&gotVuFinished, 1) @@ -341,19 +329,14 @@ func TestRampingVUsHandleRemainingVUs(t *testing.T) { atomic.AddUint32(&gotVuInterrupted, 1) } return nil - } + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, cfg) + defer test.cancel() // run the executor: this should finish in ~70ms // sum(stages) + GracefulRampDown - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - ctx, cancel, executor, _ := setupExecutor( - t, cfg, - lib.NewExecutionState(lib.Options{}, et, nil, maxVus, maxVus), - simpleRunner(iteration), - ) - defer cancel() - require.NoError(t, executor.Run(ctx, nil)) + require.NoError(t, test.executor.Run(test.ctx, nil)) assert.Equal(t, wantVuInterrupted, atomic.LoadUint32(&gotVuInterrupted)) assert.Equal(t, wantVuFinished, atomic.LoadUint32(&gotVuFinished)) @@ -380,17 +363,13 @@ func TestRampingVUsRampDownNoWobble(t *testing.T) { }, } - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - time.Sleep(500 * time.Millisecond) - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + time.Sleep(500 * time.Millisecond) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() sampleTimes := []time.Duration{ 100 * time.Millisecond, @@ -400,18 +379,18 @@ func TestRampingVUsRampDownNoWobble(t *testing.T) { rampDownSamples := int((config.Stages[len(config.Stages)-1].Duration.TimeDuration() + config.GracefulRampDown.TimeDuration()) / rampDownSampleTime) errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil) }() + go func() { errCh <- test.executor.Run(test.ctx, nil) }() result := make([]int64, len(sampleTimes)+rampDownSamples) for i, d := range sampleTimes { time.Sleep(d) - result[i] = es.GetCurrentlyActiveVUsCount() + result[i] = test.state.GetCurrentlyActiveVUsCount() } // Sample ramp-down at a higher rate for i := len(sampleTimes); i < rampDownSamples; i++ { time.Sleep(rampDownSampleTime) - result[i] = es.GetCurrentlyActiveVUsCount() + result[i] = test.state.GetCurrentlyActiveVUsCount() } require.NoError(t, <-errCh) diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go index cb419e2f3d2..d6d7986c5fa 100644 --- a/lib/executor/shared_iterations.go +++ b/lib/executor/shared_iterations.go @@ -238,7 +238,7 @@ func (si SharedIterations) Run(parentCtx context.Context, out chan<- metrics.Sam if attemptedIters < totalIters { metrics.PushIfNotDone(parentCtx, out, metrics.Sample{ Value: float64(totalIters - attemptedIters), - Metric: si.executionState.BuiltinMetrics.DroppedIterations, + Metric: si.executionState.Test.BuiltinMetrics.DroppedIterations, Tags: si.getMetricTags(nil), Time: time.Now(), }) } diff --git a/lib/executor/shared_iterations_test.go b/lib/executor/shared_iterations_test.go index f4e65f6aa6a..d3c5d6b3402 100644 --- a/lib/executor/shared_iterations_test.go +++ b/lib/executor/shared_iterations_test.go @@ -34,7 +34,6 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" "go.k6.io/k6/metrics" ) @@ -51,21 +50,16 @@ func getTestSharedIterationsConfig() SharedIterationsConfig { func TestSharedIterationsRun(t *testing.T) { t.Parallel() var doneIters uint64 - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, getTestSharedIterationsConfig(), es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - atomic.AddUint64(&doneIters, 1) - return nil - }), - ) - defer cancel() - err = executor.Run(ctx, nil) - require.NoError(t, err) + + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + atomic.AddUint64(&doneIters, 1) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestSharedIterationsConfig()) + defer test.cancel() + + require.NoError(t, test.executor.Run(test.ctx, nil)) assert.Equal(t, uint64(100), doneIters) } @@ -77,31 +71,26 @@ func TestSharedIterationsRunVariableVU(t *testing.T) { result sync.Map slowVUID uint64 ) - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, getTestSharedIterationsConfig(), es, - simpleRunner(func(ctx context.Context, state *lib.State) error { - time.Sleep(10 * time.Millisecond) // small wait to stabilize the test - // Pick one VU randomly and always slow it down. - sid := atomic.LoadUint64(&slowVUID) - if sid == uint64(0) { - atomic.StoreUint64(&slowVUID, state.VUID) - } - if sid == state.VUID { - time.Sleep(200 * time.Millisecond) - } - currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) - result.Store(state.VUID, currIter.(uint64)+1) - return nil - }), - ) - defer cancel() - err = executor.Run(ctx, nil) - require.NoError(t, err) + + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { + time.Sleep(10 * time.Millisecond) // small wait to stabilize the test + // Pick one VU randomly and always slow it down. + sid := atomic.LoadUint64(&slowVUID) + if sid == uint64(0) { + atomic.StoreUint64(&slowVUID, state.VUID) + } + if sid == state.VUID { + time.Sleep(200 * time.Millisecond) + } + currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) + result.Store(state.VUID, currIter.(uint64)+1) //nolint:forcetypeassert + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestSharedIterationsConfig()) + defer test.cancel() + + require.NoError(t, test.executor.Run(test.ctx, nil)) var totalIters uint64 result.Range(func(key, value interface{}) bool { @@ -120,8 +109,12 @@ func TestSharedIterationsRunVariableVU(t *testing.T) { func TestSharedIterationsEmitDroppedIterations(t *testing.T) { t.Parallel() var count int64 - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) + + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + atomic.AddInt64(&count, 1) + <-ctx.Done() + return nil + }) config := &SharedIterationsConfig{ VUs: null.IntFrom(5), @@ -129,22 +122,12 @@ func TestSharedIterationsEmitDroppedIterations(t *testing.T) { MaxDuration: types.NullDurationFrom(1 * time.Second), } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - atomic.AddInt64(&count, 1) - <-ctx.Done() - return nil - }), - ) - defer cancel() + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) - assert.Empty(t, logHook.Drain()) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) + assert.Empty(t, test.logHook.Drain()) assert.Equal(t, int64(5), count) assert.Equal(t, float64(95), sumMetricValues(engineOut, metrics.DroppedIterationsName)) } @@ -171,32 +154,21 @@ func TestSharedIterationsGlobalIters(t *testing.T) { tc := tc t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { t.Parallel() - ess, err := lib.NewExecutionSegmentSequenceFromString(tc.seq) - require.NoError(t, err) - seg, err := lib.NewExecutionSegmentFromString(tc.seg) - require.NoError(t, err) - et, err := lib.NewExecutionTuple(seg, &ess) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 5, 5) - - runner := &minirunner.MiniRunner{} - ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) - defer cancel() gotIters := []uint64{} var mx sync.Mutex - runner.Fn = func(ctx context.Context, state *lib.State, _ chan<- metrics.SampleContainer) error { + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { mx.Lock() gotIters = append(gotIters, state.GetScenarioGlobalVUIter()) mx.Unlock() return nil - } + }) + + test := setupExecutorTest(t, tc.seg, tc.seq, lib.Options{}, runner, config) + defer test.cancel() engineOut := make(chan metrics.SampleContainer, 100) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) sort.Slice(gotIters, func(i, j int) bool { return gotIters[i] < gotIters[j] }) assert.Equal(t, tc.expIters, gotIters) }) diff --git a/lib/runtime_state.go b/lib/runtime_state.go deleted file mode 100644 index d32e6c8e2ee..00000000000 --- a/lib/runtime_state.go +++ /dev/null @@ -1,18 +0,0 @@ -package lib - -import ( - "io" - - "github.com/sirupsen/logrus" - "go.k6.io/k6/metrics" -) - -// RuntimeState represents what is mostly needed during the running of a test -type RuntimeState struct { - RuntimeOptions RuntimeOptions - // TODO maybe have a struct `Metrics` with `Registry` and `Builtin` ? - Registry *metrics.Registry - BuiltinMetrics *metrics.BuiltinMetrics - KeyLogger io.Writer - Logger *logrus.Logger -} diff --git a/lib/test_state.go b/lib/test_state.go new file mode 100644 index 00000000000..94fc56382f5 --- /dev/null +++ b/lib/test_state.go @@ -0,0 +1,34 @@ +package lib + +import ( + "io" + + "github.com/sirupsen/logrus" + "go.k6.io/k6/metrics" +) + +// TestPreInitState contains all of the state that can be gathered and built +// before the test run is initialized. +type TestPreInitState struct { + RuntimeOptions RuntimeOptions + Registry *metrics.Registry + BuiltinMetrics *metrics.BuiltinMetrics + KeyLogger io.Writer + + // TODO: replace with logrus.FieldLogger when all of the tests can be fixed + Logger *logrus.Logger +} + +// TestRunState contains the pre-init state as well as all of the state and +// options that are necessary for actually running the test. +type TestRunState struct { + *TestPreInitState + + Options Options + Runner Runner // TODO: rename to something better, see type comment + + // TODO: add atlas root node + + // TODO: add other properties that are computed or derived after init, e.g. + // thresholds? +} diff --git a/lib/state.go b/lib/vu_state.go similarity index 89% rename from lib/state.go rename to lib/vu_state.go index 89be0af3b17..8b8cca79273 100644 --- a/lib/state.go +++ b/lib/vu_state.go @@ -42,11 +42,17 @@ type DialContexter interface { // State provides the volatile state for a VU. type State struct { - // Global options. - Options Options + // Global options and built-in metrics. + // + // TODO: remove them from here, the built-in metrics and the script options + // are not part of a VU's unique "state", they are global and the same for + // all VUs. Figure out how to thread them some other way, e.g. through the + // TestPreInitState. The Samples channel might also benefit from that... + Options Options + BuiltinMetrics *metrics.BuiltinMetrics // Logger. Avoid using the global logger. - // TODO change to logrus.FieldLogger when there is time to fix all the tests + // TODO: change to logrus.FieldLogger when there is time to fix all the tests Logger *logrus.Logger // Current group; all emitted metrics are tagged with this. @@ -85,8 +91,6 @@ type State struct { // unique globally across k6 instances (taking into account execution // segments). GetScenarioGlobalVUIter func() uint64 - - BuiltinMetrics *metrics.BuiltinMetrics } // CloneTags makes a copy of the tags map and returns it. diff --git a/metrics/engine/engine.go b/metrics/engine/engine.go index 080070c4d37..f7ac6d7403f 100644 --- a/metrics/engine/engine.go +++ b/metrics/engine/engine.go @@ -18,11 +18,8 @@ import ( // aggregated metric sample values. They are used to generate the end-of-test // summary and to evaluate the test thresholds. type MetricsEngine struct { - registry *metrics.Registry - executionState *lib.ExecutionState - options lib.Options - runtimeOptions lib.RuntimeOptions - logger logrus.FieldLogger + es *lib.ExecutionState + logger logrus.FieldLogger // These can be both top-level metrics or sub-metrics metricsWithThresholds []*metrics.Metric @@ -37,21 +34,15 @@ type MetricsEngine struct { } // NewMetricsEngine creates a new metrics Engine with the given parameters. -func NewMetricsEngine( - registry *metrics.Registry, executionState *lib.ExecutionState, - opts lib.Options, rtOpts lib.RuntimeOptions, logger logrus.FieldLogger, -) (*MetricsEngine, error) { +func NewMetricsEngine(es *lib.ExecutionState) (*MetricsEngine, error) { me := &MetricsEngine{ - registry: registry, - executionState: executionState, - options: opts, - runtimeOptions: rtOpts, - logger: logger.WithField("component", "metrics-engine"), + es: es, + logger: es.Test.Logger.WithField("component", "metrics-engine"), ObservedMetrics: make(map[string]*metrics.Metric), } - if !(me.runtimeOptions.NoSummary.Bool && me.runtimeOptions.NoThresholds.Bool) { + if !(me.es.Test.RuntimeOptions.NoSummary.Bool && me.es.Test.RuntimeOptions.NoThresholds.Bool) { err := me.initSubMetricsAndThresholds() if err != nil { return nil, err @@ -74,7 +65,7 @@ func (me *MetricsEngine) getThresholdMetricOrSubmetric(name string) (*metrics.Me // TODO: replace with strings.Cut after Go 1.18 nameParts := strings.SplitN(name, "{", 2) - metric := me.registry.Get(nameParts[0]) + metric := me.es.Test.Registry.Get(nameParts[0]) if metric == nil { return nil, fmt.Errorf("metric '%s' does not exist in the script", nameParts[0]) } @@ -136,10 +127,10 @@ func (me *MetricsEngine) markObserved(metric *metrics.Metric) { } func (me *MetricsEngine) initSubMetricsAndThresholds() error { - for metricName, thresholds := range me.options.Thresholds { + for metricName, thresholds := range me.es.Test.Options.Thresholds { metric, err := me.getThresholdMetricOrSubmetric(metricName) - if me.runtimeOptions.NoThresholds.Bool { + if me.es.Test.RuntimeOptions.NoThresholds.Bool { if err != nil { me.logger.WithError(err).Warnf("Invalid metric '%s' in threshold definitions", metricName) } @@ -164,7 +155,7 @@ func (me *MetricsEngine) initSubMetricsAndThresholds() error { // TODO: refactor out of here when https://github.com/grafana/k6/issues/1321 // lands and there is a better way to enable a metric with tag - if me.options.SystemTags.Has(metrics.TagExpectedResponse) { + if me.es.Test.Options.SystemTags.Has(metrics.TagExpectedResponse) { _, err := me.getThresholdMetricOrSubmetric("http_req_duration{expected_response:true}") if err != nil { return err // shouldn't happen, but ¯\_(ツ)_/¯ @@ -181,7 +172,7 @@ func (me *MetricsEngine) EvaluateThresholds(ignoreEmptySinks bool) (thresholdsTa me.MetricsLock.Lock() defer me.MetricsLock.Unlock() - t := me.executionState.GetCurrentTestRunDuration() + t := me.es.GetCurrentTestRunDuration() for _, m := range me.metricsWithThresholds { // If either the metric has no thresholds defined, or its sinks