From 3d491bfb76de890dd6d7724fd68af68ce42e8cf8 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Sun, 31 Jul 2022 11:45:46 +0300 Subject: [PATCH 01/12] Propagate lib.RuntimeState instead of passing its constituent parts --- js/bundle.go | 38 ++++++++++++++++-------------------- js/bundle_test.go | 43 ++++++++++++++++++++++++++--------------- js/runner.go | 46 ++++++++++++++++++-------------------------- js/runner_test.go | 6 +++--- lib/runtime_state.go | 4 +++- 5 files changed, 70 insertions(+), 67 deletions(-) diff --git a/js/bundle.go b/js/bundle.go index 025061cd8c7..6e653ccf0a5 100644 --- a/js/bundle.go +++ b/js/bundle.go @@ -73,22 +73,19 @@ type BundleInstance struct { } // NewBundle creates a new bundle from a source file and a filesystem. -func NewBundle( - logger logrus.FieldLogger, src *loader.SourceData, filesystems map[string]afero.Fs, rtOpts lib.RuntimeOptions, - registry *metrics.Registry, -) (*Bundle, error) { - compatMode, err := lib.ValidateCompatibilityMode(rtOpts.CompatibilityMode.String) +func NewBundle(rs *lib.RuntimeState, src *loader.SourceData, filesystems map[string]afero.Fs) (*Bundle, error) { + compatMode, err := lib.ValidateCompatibilityMode(rs.RuntimeOptions.CompatibilityMode.String) if err != nil { return nil, err } // Compile sources, both ES5 and ES6 are supported. code := string(src.Data) - c := compiler.New(logger) + c := compiler.New(rs.Logger) c.Options = compiler.Options{ CompatibilityMode: compatMode, Strict: true, - SourceMapLoader: generateSourceMapLoader(logger, filesystems), + SourceMapLoader: generateSourceMapLoader(rs.Logger, filesystems), } pgm, _, err := c.Compile(code, src.URL.String(), false) if err != nil { @@ -100,17 +97,17 @@ func NewBundle( Filename: src.URL, Source: code, Program: pgm, - BaseInitContext: NewInitContext(logger, rt, c, compatMode, filesystems, loader.Dir(src.URL)), - RuntimeOptions: rtOpts, + BaseInitContext: NewInitContext(rs.Logger, rt, c, compatMode, filesystems, loader.Dir(src.URL)), + RuntimeOptions: rs.RuntimeOptions, CompatibilityMode: compatMode, exports: make(map[string]goja.Callable), - registry: registry, + registry: rs.Registry, } - if err = bundle.instantiate(logger, rt, bundle.BaseInitContext, 0); err != nil { + if err = bundle.instantiate(rs.Logger, rt, bundle.BaseInitContext, 0); err != nil { return nil, err } - err = bundle.getExports(logger, rt, true) + err = bundle.getExports(rs.Logger, rt, true) if err != nil { return nil, err } @@ -119,13 +116,12 @@ func NewBundle( } // NewBundleFromArchive creates a new bundle from an lib.Archive. -func NewBundleFromArchive( - logger logrus.FieldLogger, arc *lib.Archive, rtOpts lib.RuntimeOptions, registry *metrics.Registry, -) (*Bundle, error) { +func NewBundleFromArchive(rs *lib.RuntimeState, arc *lib.Archive) (*Bundle, error) { if arc.Type != "js" { return nil, fmt.Errorf("expected bundle type 'js', got '%s'", arc.Type) } + rtOpts := rs.RuntimeOptions // copy the struct from the RuntimeState if !rtOpts.CompatibilityMode.Valid { // `k6 run --compatibility-mode=whatever archive.tar` should override // whatever value is in the archive @@ -136,18 +132,18 @@ func NewBundleFromArchive( return nil, err } - c := compiler.New(logger) + c := compiler.New(rs.Logger) c.Options = compiler.Options{ Strict: true, CompatibilityMode: compatMode, - SourceMapLoader: generateSourceMapLoader(logger, arc.Filesystems), + SourceMapLoader: generateSourceMapLoader(rs.Logger, arc.Filesystems), } pgm, _, err := c.Compile(string(arc.Data), arc.FilenameURL.String(), false) if err != nil { return nil, err } rt := goja.New() - initctx := NewInitContext(logger, rt, c, compatMode, arc.Filesystems, arc.PwdURL) + initctx := NewInitContext(rs.Logger, rt, c, compatMode, arc.Filesystems, arc.PwdURL) env := arc.Env if env == nil { @@ -168,16 +164,16 @@ func NewBundleFromArchive( RuntimeOptions: rtOpts, CompatibilityMode: compatMode, exports: make(map[string]goja.Callable), - registry: registry, + registry: rs.Registry, } - if err = bundle.instantiate(logger, rt, bundle.BaseInitContext, 0); err != nil { + if err = bundle.instantiate(rs.Logger, rt, bundle.BaseInitContext, 0); err != nil { return nil, err } // Grab exported objects, but avoid overwriting options, which would // be initialized from the metadata.json at this point. - err = bundle.getExports(logger, rt, false) + err = bundle.getExports(rs.Logger, rt, false) if err != nil { return nil, err } diff --git a/js/bundle_test.go b/js/bundle_test.go index 4d478562263..0f4f588f240 100644 --- a/js/bundle_test.go +++ b/js/bundle_test.go @@ -50,31 +50,44 @@ import ( const isWindows = runtime.GOOS == "windows" +func getRuntimeState(tb testing.TB, logger *logrus.Logger, rtOpts *lib.RuntimeOptions) *lib.RuntimeState { + if logger == nil { + logger = testutils.NewLogger(tb) + } + if rtOpts == nil { + rtOpts = &lib.RuntimeOptions{} + } + reg := metrics.NewRegistry() + return &lib.RuntimeState{ + Logger: logger, + RuntimeOptions: *rtOpts, + Registry: reg, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(reg), + } +} + func getSimpleBundle(tb testing.TB, filename, data string, opts ...interface{}) (*Bundle, error) { - var ( - fs = afero.NewMemMapFs() - rtOpts = lib.RuntimeOptions{} - logger logrus.FieldLogger = testutils.NewLogger(tb) - ) + fs := afero.NewMemMapFs() + var rtOpts *lib.RuntimeOptions + var logger *logrus.Logger for _, o := range opts { switch opt := o.(type) { case afero.Fs: fs = opt case lib.RuntimeOptions: - rtOpts = opt - case logrus.FieldLogger: + rtOpts = &opt + case *logrus.Logger: logger = opt } } + return NewBundle( - logger, + getRuntimeState(tb, logger, rtOpts), &loader.SourceData{ URL: &url.URL{Path: filename, Scheme: "file"}, Data: []byte(data), }, map[string]afero.Fs{"file": fs, "https": afero.NewMemMapFs()}, - rtOpts, - metrics.NewRegistry(), ) } @@ -489,7 +502,7 @@ func TestNewBundleFromArchive(t *testing.T) { } checkArchive := func(t *testing.T, arc *lib.Archive, rtOpts lib.RuntimeOptions, expError string) { - b, err := NewBundleFromArchive(logger, arc, rtOpts, metrics.NewRegistry()) + b, err := NewBundleFromArchive(getRuntimeState(t, logger, &rtOpts), arc) if expError != "" { require.Error(t, err) require.Contains(t, err.Error(), expError) @@ -572,7 +585,7 @@ func TestNewBundleFromArchive(t *testing.T) { PwdURL: &url.URL{Scheme: "file", Path: "/"}, Filesystems: nil, } - b, err := NewBundleFromArchive(logger, arc, lib.RuntimeOptions{}, metrics.NewRegistry()) + b, err := NewBundleFromArchive(getRuntimeState(t, logger, nil), arc) require.NoError(t, err) bi, err := b.Instantiate(logger, 0) require.NoError(t, err) @@ -711,7 +724,7 @@ func TestOpen(t *testing.T) { } require.NoError(t, err) - arcBundle, err := NewBundleFromArchive(logger, sourceBundle.makeArchive(), lib.RuntimeOptions{}, metrics.NewRegistry()) + arcBundle, err := NewBundleFromArchive(getRuntimeState(t, logger, nil), sourceBundle.makeArchive()) require.NoError(t, err) @@ -811,7 +824,7 @@ func TestBundleEnv(t *testing.T) { require.NoError(t, err) logger := testutils.NewLogger(t) - b2, err := NewBundleFromArchive(logger, b1.makeArchive(), lib.RuntimeOptions{}, metrics.NewRegistry()) + b2, err := NewBundleFromArchive(getRuntimeState(t, logger, nil), b1.makeArchive()) require.NoError(t, err) bundles := map[string]*Bundle{"Source": b1, "Archive": b2} @@ -848,7 +861,7 @@ func TestBundleNotSharable(t *testing.T) { require.NoError(t, err) logger := testutils.NewLogger(t) - b2, err := NewBundleFromArchive(logger, b1.makeArchive(), lib.RuntimeOptions{}, metrics.NewRegistry()) + b2, err := NewBundleFromArchive(getRuntimeState(t, logger, nil), b1.makeArchive()) require.NoError(t, err) bundles := map[string]*Bundle{"Source": b1, "Archive": b2} diff --git a/js/runner.go b/js/runner.go index d88d3e4bb23..db280d43d7b 100644 --- a/js/runner.go +++ b/js/runner.go @@ -38,7 +38,6 @@ import ( "github.com/dop251/goja" "github.com/oxtoacart/bpool" - "github.com/sirupsen/logrus" "github.com/spf13/afero" "golang.org/x/net/http2" "golang.org/x/time/rate" @@ -65,11 +64,9 @@ var _ lib.Runner = &Runner{} var nameToCertWarning sync.Once type Runner struct { - Bundle *Bundle - Logger *logrus.Logger - defaultGroup *lib.Group - builtinMetrics *metrics.BuiltinMetrics - registry *metrics.Registry + Bundle *Bundle + runtimeState *lib.RuntimeState + defaultGroup *lib.Group BaseDialer net.Dialer Resolver netext.Resolver @@ -79,15 +76,11 @@ type Runner struct { console *console setupData []byte - - keylogger io.Writer } -// New returns a new Runner for the provide source -func New( - rs *lib.RuntimeState, src *loader.SourceData, filesystems map[string]afero.Fs, -) (*Runner, error) { - bundle, err := NewBundle(rs.Logger, src, filesystems, rs.RuntimeOptions, rs.Registry) +// New returns a new Runner for the provided source +func New(rs *lib.RuntimeState, src *loader.SourceData, filesystems map[string]afero.Fs) (*Runner, error) { + bundle, err := NewBundle(rs, src, filesystems) if err != nil { return nil, err } @@ -97,7 +90,7 @@ func New( // NewFromArchive returns a new Runner from the source in the provided archive func NewFromArchive(rs *lib.RuntimeState, arc *lib.Archive) (*Runner, error) { - bundle, err := NewBundleFromArchive(rs.Logger, arc, rs.RuntimeOptions, rs.Registry) + bundle, err := NewBundleFromArchive(rs, arc) if err != nil { return nil, err } @@ -115,7 +108,7 @@ func NewFromBundle(rs *lib.RuntimeState, b *Bundle) (*Runner, error) { defDNS := types.DefaultDNSConfig() r := &Runner{ Bundle: b, - Logger: rs.Logger, + runtimeState: rs, defaultGroup: defaultGroup, BaseDialer: net.Dialer{ Timeout: 30 * time.Second, @@ -126,9 +119,6 @@ func NewFromBundle(rs *lib.RuntimeState, b *Bundle) (*Runner, error) { Resolver: netext.NewResolver( net.LookupIP, 0, defDNS.Select.DNSSelect, defDNS.Policy.DNSPolicy), ActualResolver: net.LookupIP, - builtinMetrics: rs.BuiltinMetrics, - registry: rs.Registry, - keylogger: rs.KeyLogger, } err = r.SetOptions(r.Bundle.Options) @@ -152,7 +142,7 @@ func (r *Runner) NewVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.Sampl //nolint:funlen func (r *Runner) newVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.SampleContainer) (*VU, error) { // Instantiate a new bundle, make a VU out of it. - bi, err := r.Bundle.Instantiate(r.Logger, idLocal) + bi, err := r.Bundle.Instantiate(r.runtimeState.Logger, idLocal) if err != nil { return nil, err } @@ -203,15 +193,17 @@ func (r *Runner) newVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.Sampl MaxVersion: uint16(tlsVersions.Max), Certificates: certs, Renegotiation: tls.RenegotiateFreelyAsClient, - KeyLogWriter: r.keylogger, + KeyLogWriter: r.runtimeState.KeyLogger, } // Follow NameToCertificate in https://pkg.go.dev/crypto/tls@go1.17.6#Config, leave this field nil // when it is empty if len(nameToCert) > 0 { nameToCertWarning.Do(func() { - r.Logger.Warn("tlsAuth.domains option could be removed in the next releases, it's recommended to leave it empty " + - "and let k6 automatically detect from the provided certificate. It follows the Go's NameToCertificate " + - "deprecation - https://pkg.go.dev/crypto/tls@go1.17#Config.") + r.runtimeState.Logger.Warn( + "tlsAuth.domains option could be removed in the next releases, it's recommended to leave it empty " + + "and let k6 automatically detect from the provided certificate. It follows the Go's NameToCertificate " + + "deprecation - https://pkg.go.dev/crypto/tls@go1.17#Config.", + ) }) //nolint:staticcheck // ignore SA1019 we can deprecate it but we have to continue to support the previous code. tlsConfig.NameToCertificate = nameToCert @@ -254,7 +246,7 @@ func (r *Runner) newVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.Sampl } vu.state = &lib.State{ - Logger: vu.Runner.Logger, + Logger: vu.Runner.runtimeState.Logger, Options: vu.Runner.Bundle.Options, Transport: vu.Transport, Dialer: vu.Dialer, @@ -267,7 +259,7 @@ func (r *Runner) newVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.Sampl Samples: vu.Samples, Tags: lib.NewTagMap(vu.Runner.Bundle.Options.RunTags.CloneTags()), Group: r.defaultGroup, - BuiltinMetrics: r.builtinMetrics, + BuiltinMetrics: r.runtimeState.BuiltinMetrics, } vu.moduleVUImpl.state = vu.state _ = vu.Runtime.Set("console", vu.Console) @@ -442,7 +434,7 @@ func (r *Runner) SetOptions(opts lib.Options) error { // TODO: validate that all exec values are either nil or valid exported methods (or HTTP requests in the future) if opts.ConsoleOutput.Valid { - c, err := newFileConsole(opts.ConsoleOutput.String, r.Logger.Formatter) + c, err := newFileConsole(opts.ConsoleOutput.String, r.runtimeState.Logger.Formatter) if err != nil { return err } @@ -819,7 +811,7 @@ func (u *VU) runFn( sampleTags := metrics.NewSampleTags(u.state.CloneTags()) u.state.Samples <- u.Dialer.GetTrail( - startTime, endTime, isFullIteration, isDefault, sampleTags, u.Runner.builtinMetrics) + startTime, endTime, isFullIteration, isDefault, sampleTags, u.Runner.runtimeState.BuiltinMetrics) return v, isFullIteration, endTime.Sub(startTime), err } diff --git a/js/runner_test.go b/js/runner_test.go index 66913ec2b00..f3aa93bbf8a 100644 --- a/js/runner_test.go +++ b/js/runner_test.go @@ -860,7 +860,7 @@ func TestVUIntegrationInsecureRequests(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - r.Logger, _ = logtest.NewNullLogger() + r.runtimeState.Logger, _ = logtest.NewNullLogger() initVU, err := r.NewVU(1, 1, make(chan metrics.SampleContainer, 100)) require.NoError(t, err) @@ -1169,7 +1169,7 @@ func TestVUIntegrationTLSConfig(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - r.Logger, _ = logtest.NewNullLogger() + r.runtimeState.Logger, _ = logtest.NewNullLogger() initVU, err := r.NewVU(1, 1, make(chan metrics.SampleContainer, 100)) require.NoError(t, err) @@ -1596,7 +1596,7 @@ func TestVUIntegrationClientCerts(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - r.Logger, _ = logtest.NewNullLogger() + r.runtimeState.Logger, _ = logtest.NewNullLogger() initVU, err := r.NewVU(1, 1, make(chan metrics.SampleContainer, 100)) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) diff --git a/lib/runtime_state.go b/lib/runtime_state.go index d32e6c8e2ee..b9bb625ef58 100644 --- a/lib/runtime_state.go +++ b/lib/runtime_state.go @@ -14,5 +14,7 @@ type RuntimeState struct { Registry *metrics.Registry BuiltinMetrics *metrics.BuiltinMetrics KeyLogger io.Writer - Logger *logrus.Logger + + // TODO: replace with logrus.FieldLogger when all of the tests can be fixed + Logger *logrus.Logger } From 9b4cce491ff7d7d7926f08700d67854535c599e9 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Sun, 31 Jul 2022 13:26:56 +0300 Subject: [PATCH 02/12] Use lib.RuntimeState in cmd.loadedTest instead of its constituents --- cmd/outputs.go | 4 +- cmd/run.go | 14 ++++--- cmd/runtime_options_test.go | 30 +++++++++------ cmd/test_load.go | 75 +++++++++++++++++++------------------ lib/runtime_state.go | 2 +- 5 files changed, 67 insertions(+), 58 deletions(-) diff --git a/cmd/outputs.go b/cmd/outputs.go index 08893380f4f..b1913ebdc03 100644 --- a/cmd/outputs.go +++ b/cmd/outputs.go @@ -90,7 +90,7 @@ func createOutputs(gs *globalState, test *loadedTest, executionPlan []lib.Execut StdErr: gs.stdErr, FS: gs.fs, ScriptOptions: test.derivedConfig.Options, - RuntimeOptions: test.runtimeOptions, + RuntimeOptions: test.runtimeState.RuntimeOptions, ExecutionPlan: executionPlan, } result := make([]output.Output, 0, len(test.derivedConfig.Out)) @@ -120,7 +120,7 @@ func createOutputs(gs *globalState, test *loadedTest, executionPlan []lib.Execut } if builtinMetricOut, ok := out.(output.WithBuiltinMetrics); ok { - builtinMetricOut.SetBuiltinMetrics(test.builtInMetrics) + builtinMetricOut.SetBuiltinMetrics(test.runtimeState.BuiltinMetrics) } result = append(result, out) diff --git a/cmd/run.go b/cmd/run.go index 652de3717e4..b25728f50e7 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -89,7 +89,9 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { logger := c.gs.logger // Create a local execution scheduler wrapping the runner. logger.Debug("Initializing the execution scheduler...") - execScheduler, err := local.NewExecutionScheduler(test.initRunner, test.builtInMetrics, logger) + execScheduler, err := local.NewExecutionScheduler( + test.initRunner, test.runtimeState.BuiltinMetrics, logger, + ) if err != nil { return err } @@ -126,8 +128,8 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { // Create the engine. initBar.Modify(pb.WithConstProgress(0, "Init engine")) engine, err := core.NewEngine( - execScheduler, conf.Options, test.runtimeOptions, - outputs, logger, test.metricsRegistry, + execScheduler, conf.Options, test.runtimeState.RuntimeOptions, + outputs, logger, test.runtimeState.Registry, ) if err != nil { return err @@ -230,7 +232,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { } // Handle the end-of-test summary. - if !test.runtimeOptions.NoSummary.Bool { + if !test.runtimeState.RuntimeOptions.NoSummary.Bool { engine.MetricsEngine.MetricsLock.Lock() // TODO: refactor so this is not needed summaryResult, err := test.initRunner.HandleSummary(globalCtx, &lib.Summary{ Metrics: engine.MetricsEngine.ObservedMetrics, @@ -268,8 +270,8 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { logger.Debug("Waiting for engine processes to finish...") engineWait() logger.Debug("Everything has finished, exiting k6!") - if test.keywriter != nil { - if err := test.keywriter.Close(); err != nil { + if test.runtimeState.KeyLogger != nil { + if err := test.runtimeState.KeyLogger.Close(); err != nil { logger.WithError(err).Warn("Error while closing the SSLKEYLOGFILE") } } diff --git a/cmd/runtime_options_test.go b/cmd/runtime_options_test.go index a5d642fecbf..dfc55501895 100644 --- a/cmd/runtime_options_test.go +++ b/cmd/runtime_options_test.go @@ -81,12 +81,15 @@ func testRuntimeOptionsCase(t *testing.T, tc runtimeOptionsTestCase) { ts := newGlobalTestState(t) // TODO: move upwards, make this into an almost full integration test registry := metrics.NewRegistry() test := &loadedTest{ - sourceRootPath: "script.js", - source: &loader.SourceData{Data: jsCode.Bytes(), URL: &url.URL{Path: "/script.js", Scheme: "file"}}, - fileSystems: map[string]afero.Fs{"file": fs}, - runtimeOptions: rtOpts, - metricsRegistry: registry, - builtInMetrics: metrics.RegisterBuiltinMetrics(registry), + sourceRootPath: "script.js", + source: &loader.SourceData{Data: jsCode.Bytes(), URL: &url.URL{Path: "/script.js", Scheme: "file"}}, + fileSystems: map[string]afero.Fs{"file": fs}, + runtimeState: &lib.RuntimeState{ + Logger: ts.logger, + RuntimeOptions: rtOpts, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + }, } require.NoError(t, test.initializeFirstRunner(ts.globalState)) @@ -97,12 +100,15 @@ func testRuntimeOptionsCase(t *testing.T, tc runtimeOptionsTestCase) { getRunnerErr := func(rtOpts lib.RuntimeOptions) *loadedTest { return &loadedTest{ - sourceRootPath: "script.tar", - source: &loader.SourceData{Data: archiveBuf.Bytes(), URL: &url.URL{Path: "/script.tar", Scheme: "file"}}, - fileSystems: map[string]afero.Fs{"file": fs}, - runtimeOptions: rtOpts, - metricsRegistry: registry, - builtInMetrics: metrics.RegisterBuiltinMetrics(registry), + sourceRootPath: "script.tar", + source: &loader.SourceData{Data: archiveBuf.Bytes(), URL: &url.URL{Path: "/script.tar", Scheme: "file"}}, + fileSystems: map[string]afero.Fs{"file": fs}, + runtimeState: &lib.RuntimeState{ + Logger: ts.logger, + RuntimeOptions: rtOpts, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + }, } } diff --git a/cmd/test_load.go b/cmd/test_load.go index 6bc39d10239..ff1445ceb3a 100644 --- a/cmd/test_load.go +++ b/cmd/test_load.go @@ -28,16 +28,13 @@ const ( // loadedTest contains all of data, details and dependencies of a fully-loaded // and configured k6 test. type loadedTest struct { - sourceRootPath string // contains the raw string the user supplied - pwd string - source *loader.SourceData - fs afero.Fs - fileSystems map[string]afero.Fs - runtimeOptions lib.RuntimeOptions - metricsRegistry *metrics.Registry - builtInMetrics *metrics.BuiltinMetrics - initRunner lib.Runner // TODO: rename to something more appropriate - keywriter io.Closer + sourceRootPath string // contains the raw string the user supplied + pwd string + source *loader.SourceData + fs afero.Fs + fileSystems map[string]afero.Fs + runtimeState *lib.RuntimeState + initRunner lib.Runner // TODO: rename to something more appropriate // Only set if cliConfigGetter is supplied to loadAndConfigureTest() or if // consolidateDeriveAndValidateConfig() is manually called. @@ -73,15 +70,20 @@ func loadAndConfigureTest( } registry := metrics.NewRegistry() + state := &lib.RuntimeState{ + Logger: gs.logger, + RuntimeOptions: runtimeOptions, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + } + test := &loadedTest{ - pwd: pwd, - sourceRootPath: sourceRootPath, - source: src, - fs: gs.fs, - fileSystems: fileSystems, - runtimeOptions: runtimeOptions, - metricsRegistry: registry, - builtInMetrics: metrics.RegisterBuiltinMetrics(registry), + pwd: pwd, + sourceRootPath: sourceRootPath, + source: src, + fs: gs.fs, + fileSystems: fileSystems, + runtimeState: state, } gs.logger.Debugf("Initializing k6 runner for '%s' (%s)...", sourceRootPath, resolvedPath) @@ -103,22 +105,16 @@ func (lt *loadedTest) initializeFirstRunner(gs *globalState) error { testPath := lt.source.URL.String() logger := gs.logger.WithField("test_path", testPath) - testType := lt.runtimeOptions.TestType.String + testType := lt.runtimeState.RuntimeOptions.TestType.String if testType == "" { logger.Debug("Detecting test type for...") testType = detectTestType(lt.source.Data) } - state := &lib.RuntimeState{ - Logger: gs.logger, - RuntimeOptions: lt.runtimeOptions, - BuiltinMetrics: lt.builtInMetrics, - Registry: lt.metricsRegistry, - } - if lt.runtimeOptions.KeyWriter.Valid { + if lt.runtimeState.RuntimeOptions.KeyWriter.Valid { logger.Warnf("SSLKEYLOGFILE was specified, logging TLS connection keys to '%s'...", - lt.runtimeOptions.KeyWriter.String) - keylogFilename := lt.runtimeOptions.KeyWriter.String + lt.runtimeState.RuntimeOptions.KeyWriter.String) + keylogFilename := lt.runtimeState.RuntimeOptions.KeyWriter.String // if path is absolute - no point doing anything if !filepath.IsAbs(keylogFilename) { // filepath.Abs could be used but it will get the pwd from `os` package instead of what is in lt.pwd @@ -129,13 +125,12 @@ func (lt *loadedTest) initializeFirstRunner(gs *globalState) error { if err != nil { return fmt.Errorf("couldn't get absolute path for keylog file: %w", err) } - lt.keywriter = f - state.KeyLogger = &syncWriter{w: f} + lt.runtimeState.KeyLogger = &syncWriteCloser{w: f} } switch testType { case testTypeJS: logger.Debug("Trying to load as a JS test...") - runner, err := js.New(state, lt.source, lt.fileSystems) + runner, err := js.New(lt.runtimeState, lt.source, lt.fileSystems) // TODO: should we use common.UnwrapGojaInterruptedError() here? if err != nil { return fmt.Errorf("could not load JS test '%s': %w", testPath, err) @@ -156,7 +151,7 @@ func (lt *loadedTest) initializeFirstRunner(gs *globalState) error { switch arc.Type { case testTypeJS: logger.Debug("Evaluating JS from archive bundle...") - lt.initRunner, err = js.NewFromArchive(state, arc) + lt.initRunner, err = js.NewFromArchive(lt.runtimeState, arc) if err != nil { return fmt.Errorf("could not load JS from test archive bundle '%s': %w", testPath, err) } @@ -213,14 +208,14 @@ func (lt *loadedTest) consolidateDeriveAndValidateConfig( // Parse the thresholds, only if the --no-threshold flag is not set. // If parsing the threshold expressions failed, consider it as an // invalid configuration error. - if !lt.runtimeOptions.NoThresholds.Bool { + if !lt.runtimeState.RuntimeOptions.NoThresholds.Bool { for metricName, thresholdsDefinition := range consolidatedConfig.Options.Thresholds { err = thresholdsDefinition.Parse() if err != nil { return errext.WithExitCodeIfNone(err, exitcodes.InvalidConfig) } - err = thresholdsDefinition.Validate(metricName, lt.metricsRegistry) + err = thresholdsDefinition.Validate(metricName, lt.runtimeState.Registry) if err != nil { return errext.WithExitCodeIfNone(err, exitcodes.InvalidConfig) } @@ -238,13 +233,19 @@ func (lt *loadedTest) consolidateDeriveAndValidateConfig( return nil } -type syncWriter struct { - w io.Writer +type syncWriteCloser struct { + w io.WriteCloser m sync.Mutex } -func (cw *syncWriter) Write(b []byte) (int, error) { +func (cw *syncWriteCloser) Write(b []byte) (int, error) { cw.m.Lock() defer cw.m.Unlock() return cw.w.Write(b) } + +func (cw *syncWriteCloser) Close() error { + cw.m.Lock() + defer cw.m.Unlock() + return cw.w.Close() +} diff --git a/lib/runtime_state.go b/lib/runtime_state.go index b9bb625ef58..777ce718d35 100644 --- a/lib/runtime_state.go +++ b/lib/runtime_state.go @@ -13,7 +13,7 @@ type RuntimeState struct { // TODO maybe have a struct `Metrics` with `Registry` and `Builtin` ? Registry *metrics.Registry BuiltinMetrics *metrics.BuiltinMetrics - KeyLogger io.Writer + KeyLogger io.WriteCloser // TODO: replace with logrus.FieldLogger when all of the tests can be fixed Logger *logrus.Logger From 769136c018686a79ed698ca87a6a5fb691195519 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Sun, 31 Jul 2022 15:05:21 +0300 Subject: [PATCH 03/12] Pass lib.RuntimeState to the ExecutionScheduler directly --- api/server_test.go | 8 +- api/v1/group_routes_test.go | 21 +- api/v1/metric_routes_test.go | 24 +-- api/v1/setup_teardown_routes_test.go | 23 +-- api/v1/status_routes_test.go | 21 +- .../eventloop/eventloop_test.go | 93 ++++----- cmd/run.go | 6 +- core/engine_test.go | 169 +++++++--------- core/local/local.go | 38 ++-- core/local/local_test.go | 186 ++++++------------ js/runner_test.go | 11 +- lib/runtime_state.go | 5 +- lib/state.go | 14 +- 13 files changed, 232 insertions(+), 387 deletions(-) diff --git a/api/server_test.go b/api/server_test.go index 2e7df53baee..93b85ef403d 100644 --- a/api/server_test.go +++ b/api/server_test.go @@ -81,8 +81,12 @@ func TestWithEngine(t *testing.T) { logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, builtinMetrics, logger) + rs := &lib.RuntimeState{ + Logger: logger, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + } + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, rs) require.NoError(t, err) engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) require.NoError(t, err) diff --git a/api/v1/group_routes_test.go b/api/v1/group_routes_test.go index f5765bbaa5c..de08b36fad4 100644 --- a/api/v1/group_routes_test.go +++ b/api/v1/group_routes_test.go @@ -26,7 +26,6 @@ import ( "net/http/httptest" "testing" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -38,6 +37,16 @@ import ( "go.k6.io/k6/metrics" ) +func getRuntimeState(tb testing.TB) *lib.RuntimeState { + reg := metrics.NewRegistry() + return &lib.RuntimeState{ + Logger: testutils.NewLogger(tb), + RuntimeOptions: lib.RuntimeOptions{}, + Registry: reg, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(reg), + } +} + func TestGetGroups(t *testing.T) { g0, err := lib.NewGroup("", nil) assert.NoError(t, err) @@ -46,14 +55,10 @@ func TestGetGroups(t *testing.T) { g2, err := g1.Group("group 2") assert.NoError(t, err) - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Group: g0}, builtinMetrics, logger) + rs := getRuntimeState(t) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Group: g0}, rs) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) + engine, err := core.NewEngine(execScheduler, lib.Options{}, rs.RuntimeOptions, nil, rs.Logger, rs.Registry) require.NoError(t, err) t.Run("list", func(t *testing.T) { diff --git a/api/v1/metric_routes_test.go b/api/v1/metric_routes_test.go index 9e5aa2d728f..88e5b13fb1e 100644 --- a/api/v1/metric_routes_test.go +++ b/api/v1/metric_routes_test.go @@ -26,7 +26,6 @@ import ( "net/http/httptest" "testing" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v3" @@ -34,7 +33,6 @@ import ( "go.k6.io/k6/core" "go.k6.io/k6/core/local" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/metrics" ) @@ -42,15 +40,12 @@ import ( func TestGetMetrics(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - testMetric, err := registry.NewMetric("my_metric", metrics.Trend, metrics.Time) + rs := getRuntimeState(t) + testMetric, err := rs.Registry.NewMetric("my_metric", metrics.Trend, metrics.Time) require.NoError(t, err) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, builtinMetrics, logger) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, rs) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) + engine, err := core.NewEngine(execScheduler, lib.Options{}, rs.RuntimeOptions, nil, rs.Logger, rs.Registry) require.NoError(t, err) engine.MetricsEngine.ObservedMetrics = map[string]*metrics.Metric{ @@ -104,15 +99,12 @@ func TestGetMetrics(t *testing.T) { func TestGetMetric(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - testMetric, err := registry.NewMetric("my_metric", metrics.Trend, metrics.Time) + rs := getRuntimeState(t) + testMetric, err := rs.Registry.NewMetric("my_metric", metrics.Trend, metrics.Time) require.NoError(t, err) - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, builtinMetrics, logger) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, rs) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) + engine, err := core.NewEngine(execScheduler, lib.Options{}, rs.RuntimeOptions, nil, rs.Logger, rs.Registry) require.NoError(t, err) engine.MetricsEngine.ObservedMetrics = map[string]*metrics.Metric{ diff --git a/api/v1/setup_teardown_routes_test.go b/api/v1/setup_teardown_routes_test.go index a202125a762..7f0d5d88704 100644 --- a/api/v1/setup_teardown_routes_test.go +++ b/api/v1/setup_teardown_routes_test.go @@ -31,7 +31,6 @@ import ( "testing" "time" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v3" @@ -40,10 +39,8 @@ import ( "go.k6.io/k6/core/local" "go.k6.io/k6/js" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/types" "go.k6.io/k6/loader" - "go.k6.io/k6/metrics" ) func TestSetupData(t *testing.T) { @@ -140,19 +137,9 @@ func TestSetupData(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - + rs := getRuntimeState(t) runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, - &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: testCase.script}, - nil, + rs, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: testCase.script}, nil, ) require.NoError(t, err) runner.SetOptions(lib.Options{ @@ -163,9 +150,11 @@ func TestSetupData(t *testing.T) { SetupTimeout: types.NullDurationFrom(5 * time.Second), TeardownTimeout: types.NullDurationFrom(5 * time.Second), }) - execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, logger) + execScheduler, err := local.NewExecutionScheduler(runner, rs) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, runner.GetOptions(), lib.RuntimeOptions{}, nil, logger, registry) + engine, err := core.NewEngine( + execScheduler, runner.GetOptions(), rs.RuntimeOptions, nil, rs.Logger, rs.Registry, + ) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) diff --git a/api/v1/status_routes_test.go b/api/v1/status_routes_test.go index 70b530646c8..688b0d30544 100644 --- a/api/v1/status_routes_test.go +++ b/api/v1/status_routes_test.go @@ -29,7 +29,6 @@ import ( "testing" "time" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v3" @@ -37,21 +36,16 @@ import ( "go.k6.io/k6/core" "go.k6.io/k6/core/local" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/minirunner" - "go.k6.io/k6/metrics" ) func TestGetStatus(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, builtinMetrics, logger) + rs := getRuntimeState(t) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, rs) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) + engine, err := core.NewEngine(execScheduler, lib.Options{}, rs.RuntimeOptions, nil, rs.Logger, rs.Registry) require.NoError(t, err) rw := httptest.NewRecorder() @@ -128,8 +122,6 @@ func TestPatchStatus(t *testing.T) { for name, testCase := range testData { t.Run(name, func(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) scenarios := lib.ScenarioConfigs{} err := json.Unmarshal([]byte(` @@ -138,11 +130,10 @@ func TestPatchStatus(t *testing.T) { require.NoError(t, err) options := lib.Options{Scenarios: scenarios} - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Options: options}, builtinMetrics, logger) + rs := getRuntimeState(t) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Options: options}, rs) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, options, lib.RuntimeOptions{}, nil, logger, registry) + engine, err := core.NewEngine(execScheduler, options, rs.RuntimeOptions, nil, rs.Logger, rs.Registry) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) diff --git a/cmd/integration_tests/eventloop/eventloop_test.go b/cmd/integration_tests/eventloop/eventloop_test.go index 3fa440f0cb6..286dba272c0 100644 --- a/cmd/integration_tests/eventloop/eventloop_test.go +++ b/cmd/integration_tests/eventloop/eventloop_test.go @@ -16,7 +16,6 @@ import ( "go.k6.io/k6/lib" "go.k6.io/k6/lib/executor" "go.k6.io/k6/lib/testutils" - "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" "go.k6.io/k6/loader" "go.k6.io/k6/metrics" @@ -29,30 +28,43 @@ func eventLoopTest(t *testing.T, script []byte, testHandle func(context.Context, logHook := &testutils.SimpleLogrusHook{HookedLevels: []logrus.Level{logrus.InfoLevel, logrus.WarnLevel, logrus.ErrorLevel}} logger.AddHook(logHook) - script = []byte(`import {setTimeout} from "k6/x/events"; - ` + string(script)) registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, - &loader.SourceData{ - URL: &url.URL{Path: "/script.js"}, - Data: script, - }, - nil, - ) + rs := &lib.RuntimeState{ + Logger: logger, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + } + + script = []byte("import {setTimeout} from 'k6/x/events';\n" + string(script)) + runner, err := js.New(rs, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) require.NoError(t, err) - ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, logger, - lib.Options{ - TeardownTimeout: types.NullDurationFrom(time.Second), - SetupTimeout: types.NullDurationFrom(time.Second), - }, builtinMetrics) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() + newOpts, err := executor.DeriveScenariosFromShortcuts(lib.Options{ + MetricSamplesBufferSize: null.NewInt(200, false), + TeardownTimeout: types.NullDurationFrom(time.Second), + SetupTimeout: types.NullDurationFrom(time.Second), + }.Apply(runner.GetOptions()), nil) + require.NoError(t, err) + require.Empty(t, newOpts.Validate()) + require.NoError(t, runner.SetOptions(newOpts)) + + execScheduler, err := local.NewExecutionScheduler(runner, rs) + require.NoError(t, err) + + samples := make(chan metrics.SampleContainer, newOpts.MetricSamplesBufferSize.Int64) + go func() { + for { + select { + case <-samples: + case <-ctx.Done(): + return + } + } + }() + + require.NoError(t, execScheduler.Init(ctx, samples)) errCh := make(chan error, 1) go func() { errCh <- execScheduler.Run(ctx, ctx, samples) }() @@ -198,42 +210,3 @@ export default function() { }, msgs) }) } - -func newTestExecutionScheduler( - t *testing.T, runner lib.Runner, logger *logrus.Logger, opts lib.Options, builtinMetrics *metrics.BuiltinMetrics, -) (ctx context.Context, cancel func(), execScheduler *local.ExecutionScheduler, samples chan metrics.SampleContainer) { - if runner == nil { - runner = &minirunner.MiniRunner{} - } - ctx, cancel = context.WithCancel(context.Background()) - newOpts, err := executor.DeriveScenariosFromShortcuts(lib.Options{ - MetricSamplesBufferSize: null.NewInt(200, false), - }.Apply(runner.GetOptions()).Apply(opts), nil) - require.NoError(t, err) - require.Empty(t, newOpts.Validate()) - - require.NoError(t, runner.SetOptions(newOpts)) - - if logger == nil { - logger = logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - } - - execScheduler, err = local.NewExecutionScheduler(runner, builtinMetrics, logger) - require.NoError(t, err) - - samples = make(chan metrics.SampleContainer, newOpts.MetricSamplesBufferSize.Int64) - go func() { - for { - select { - case <-samples: - case <-ctx.Done(): - return - } - } - }() - - require.NoError(t, execScheduler.Init(ctx, samples)) - - return ctx, cancel, execScheduler, samples -} diff --git a/cmd/run.go b/cmd/run.go index b25728f50e7..a444b39eb78 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -86,12 +86,10 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { runCtx, runCancel := context.WithCancel(lingerCtx) defer runCancel() - logger := c.gs.logger + logger := test.runtimeState.Logger // Create a local execution scheduler wrapping the runner. logger.Debug("Initializing the execution scheduler...") - execScheduler, err := local.NewExecutionScheduler( - test.initRunner, test.runtimeState.BuiltinMetrics, logger, - ) + execScheduler, err := local.NewExecutionScheduler(test.initRunner, test.runtimeState) if err != nil { return err } diff --git a/core/engine_test.go b/core/engine_test.go index a6485132fb7..58bf4dc5c7f 100644 --- a/core/engine_test.go +++ b/core/engine_test.go @@ -58,32 +58,40 @@ type testStruct struct { run func() error runCancel func() wait func() + rs *lib.RuntimeState +} + +func getRuntimeState(tb testing.TB) *lib.RuntimeState { + reg := metrics.NewRegistry() + return &lib.RuntimeState{ + Logger: testutils.NewLogger(tb), + RuntimeOptions: lib.RuntimeOptions{}, + Registry: reg, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(reg), + } } // Wrapper around NewEngine that applies a logger and manages the options. -func newTestEngineWithRegistry( //nolint:golint +func newTestEngineWithRuntimeState( //nolint:golint t *testing.T, runTimeout *time.Duration, runner lib.Runner, outputs []output.Output, opts lib.Options, - registry *metrics.Registry, + rs *lib.RuntimeState, ) *testStruct { if runner == nil { runner = &minirunner.MiniRunner{} } - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) newOpts, err := executor.DeriveScenariosFromShortcuts(lib.Options{ MetricSamplesBufferSize: null.NewInt(200, false), - }.Apply(runner.GetOptions()).Apply(opts), logger) + }.Apply(runner.GetOptions()).Apply(opts), rs.Logger) require.NoError(t, err) require.Empty(t, newOpts.Validate()) require.NoError(t, runner.SetOptions(newOpts)) - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, logger) + execScheduler, err := local.NewExecutionScheduler(runner, rs) require.NoError(t, err) - engine, err := NewEngine(execScheduler, opts, lib.RuntimeOptions{}, outputs, logger, registry) + engine, err := NewEngine(execScheduler, opts, rs.RuntimeOptions, outputs, rs.Logger, rs.Registry) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) @@ -116,7 +124,7 @@ func newTestEngineWithRegistry( //nolint:golint func newTestEngine( t *testing.T, runTimeout *time.Duration, runner lib.Runner, outputs []output.Output, opts lib.Options, ) *testStruct { - return newTestEngineWithRegistry(t, runTimeout, runner, outputs, opts, metrics.NewRegistry()) + return newTestEngineWithRuntimeState(t, runTimeout, runner, outputs, opts, getRuntimeState(t)) } func TestEngineRun(t *testing.T) { @@ -156,8 +164,8 @@ func TestEngineRun(t *testing.T) { t.Run("collects samples", func(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - testMetric, err := registry.NewMetric("test_metric", metrics.Trend) + rs := getRuntimeState(t) + testMetric, err := rs.Registry.NewMetric("test_metric", metrics.Trend) require.NoError(t, err) signalChan := make(chan interface{}) @@ -173,10 +181,10 @@ func TestEngineRun(t *testing.T) { } mockOutput := mockoutput.New() - test := newTestEngineWithRegistry(t, nil, runner, []output.Output{mockOutput}, lib.Options{ + test := newTestEngineWithRuntimeState(t, nil, runner, []output.Output{mockOutput}, lib.Options{ VUs: null.IntFrom(1), Iterations: null.IntFrom(1), - }, registry) + }, rs) errC := make(chan error) go func() { errC <- test.run() }() @@ -226,8 +234,8 @@ func TestEngineStopped(t *testing.T) { func TestEngineOutput(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - testMetric, err := registry.NewMetric("test_metric", metrics.Trend) + rs := getRuntimeState(t) + testMetric, err := rs.Registry.NewMetric("test_metric", metrics.Trend) require.NoError(t, err) runner := &minirunner.MiniRunner{ @@ -238,10 +246,10 @@ func TestEngineOutput(t *testing.T) { } mockOutput := mockoutput.New() - test := newTestEngineWithRegistry(t, nil, runner, []output.Output{mockOutput}, lib.Options{ + test := newTestEngineWithRuntimeState(t, nil, runner, []output.Output{mockOutput}, lib.Options{ VUs: null.IntFrom(1), Iterations: null.IntFrom(1), - }, registry) + }, rs) assert.NoError(t, test.run()) test.wait() @@ -269,8 +277,8 @@ func TestEngine_processSamples(t *testing.T) { t.Run("metric", func(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - metric, err := registry.NewMetric("my_metric", metrics.Gauge) + rs := getRuntimeState(t) + metric, err := rs.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) done := make(chan struct{}) @@ -281,7 +289,7 @@ func TestEngine_processSamples(t *testing.T) { return nil }, } - test := newTestEngineWithRegistry(t, nil, runner, nil, lib.Options{}, registry) + test := newTestEngineWithRuntimeState(t, nil, runner, nil, lib.Options{}, rs) go func() { assert.NoError(t, test.run()) @@ -301,8 +309,8 @@ func TestEngine_processSamples(t *testing.T) { t.Run("submetric", func(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - metric, err := registry.NewMetric("my_metric", metrics.Gauge) + rs := getRuntimeState(t) + metric, err := rs.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) ths := metrics.NewThresholds([]string{`value<2`}) @@ -317,11 +325,11 @@ func TestEngine_processSamples(t *testing.T) { return nil }, } - test := newTestEngineWithRegistry(t, nil, runner, nil, lib.Options{ + test := newTestEngineWithRuntimeState(t, nil, runner, nil, lib.Options{ Thresholds: map[string]metrics.Thresholds{ "my_metric{a:1}": ths, }, - }, registry) + }, rs) go func() { assert.NoError(t, test.run()) @@ -347,8 +355,8 @@ func TestEngine_processSamples(t *testing.T) { func TestEngineThresholdsWillAbort(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - metric, err := registry.NewMetric("my_metric", metrics.Gauge) + rs := getRuntimeState(t) + metric, err := rs.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) // The incoming samples for the metric set it to 1.25. Considering @@ -369,9 +377,7 @@ func TestEngineThresholdsWillAbort(t *testing.T) { return nil }, } - test := newTestEngineWithRegistry(t, nil, runner, nil, lib.Options{ - Thresholds: thresholds, - }, registry) + test := newTestEngineWithRuntimeState(t, nil, runner, nil, lib.Options{Thresholds: thresholds}, rs) go func() { assert.NoError(t, test.run()) @@ -390,8 +396,8 @@ func TestEngineThresholdsWillAbort(t *testing.T) { func TestEngineAbortedByThresholds(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - metric, err := registry.NewMetric("my_metric", metrics.Gauge) + rs := getRuntimeState(t) + metric, err := rs.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) // The MiniRunner sets the value of the metric to 1.25. Considering @@ -415,7 +421,7 @@ func TestEngineAbortedByThresholds(t *testing.T) { }, } - test := newTestEngineWithRegistry(t, nil, runner, nil, lib.Options{Thresholds: thresholds}, registry) + test := newTestEngineWithRuntimeState(t, nil, runner, nil, lib.Options{Thresholds: thresholds}, rs) defer test.wait() go func() { @@ -465,12 +471,12 @@ func TestEngine_processThresholds(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - gaugeMetric, err := registry.NewMetric("my_metric", metrics.Gauge) + rs := getRuntimeState(t) + gaugeMetric, err := rs.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) - counterMetric, err := registry.NewMetric("used_counter", metrics.Counter) + counterMetric, err := rs.Registry.NewMetric("used_counter", metrics.Counter) require.NoError(t, err) - _, err = registry.NewMetric("unused_counter", metrics.Counter) + _, err = rs.Registry.NewMetric("unused_counter", metrics.Counter) require.NoError(t, err) thresholds := make(map[string]metrics.Thresholds, len(data.ths)) @@ -482,8 +488,8 @@ func TestEngine_processThresholds(t *testing.T) { } runner := &minirunner.MiniRunner{} - test := newTestEngineWithRegistry( - t, nil, runner, nil, lib.Options{Thresholds: thresholds}, registry, + test := newTestEngineWithRuntimeState( + t, nil, runner, nil, lib.Options{Thresholds: thresholds}, rs, ) test.engine.OutputManager.AddMetricSamples( @@ -593,14 +599,8 @@ func TestSentReceivedMetrics(t *testing.T) { } runTest := func(t *testing.T, ts testScript, tc testCase, noConnReuse bool) (float64, float64) { - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r, err := js.New( - &lib.RuntimeState{ - Logger: testutils.NewLogger(t), - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, + getRuntimeState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: []byte(ts.Code)}, nil, ) @@ -732,14 +732,8 @@ func TestRunTags(t *testing.T) { } `)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r, err := js.New( - &lib.RuntimeState{ - Logger: testutils.NewLogger(t), - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, + getRuntimeState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil, ) @@ -815,14 +809,8 @@ func TestSetupException(t *testing.T) { throw new Error("baz"); } `), 0x666)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ - Logger: testutils.NewLogger(t), - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, + getRuntimeState(t), &loader.SourceData{URL: &url.URL{Scheme: "file", Path: "/script.js"}, Data: script}, map[string]afero.Fs{"file": memfs}, ) @@ -868,15 +856,9 @@ func TestVuInitException(t *testing.T) { } `) - logger := testutils.NewLogger(t) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + rs := getRuntimeState(t) runner, err := js.New( - &lib.RuntimeState{ - Logger: testutils.NewLogger(t), - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, + rs, &loader.SourceData{URL: &url.URL{Scheme: "file", Path: "/script.js"}, Data: script}, nil, ) @@ -887,9 +869,9 @@ func TestVuInitException(t *testing.T) { require.Empty(t, opts.Validate()) require.NoError(t, runner.SetOptions(opts)) - execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, logger) + execScheduler, err := local.NewExecutionScheduler(runner, rs) require.NoError(t, err) - engine, err := NewEngine(execScheduler, opts, lib.RuntimeOptions{}, nil, logger, registry) + engine, err := NewEngine(execScheduler, opts, rs.RuntimeOptions, nil, rs.Logger, rs.Registry) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -942,14 +924,8 @@ func TestEmittedMetricsWhenScalingDown(t *testing.T) { }; `)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ - Logger: testutils.NewLogger(t), - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, + getRuntimeState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil, ) @@ -1031,14 +1007,8 @@ func TestMetricsEmission(t *testing.T) { if !isWindows { t.Parallel() } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ - Logger: testutils.NewLogger(t), - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, + getRuntimeState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: []byte(fmt.Sprintf(` import { sleep } from "k6"; import { Counter } from "k6/metrics"; @@ -1143,14 +1113,8 @@ func TestMinIterationDurationInSetupTeardownStage(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ - Logger: testutils.NewLogger(t), - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, + getRuntimeState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script)}, nil, ) @@ -1175,8 +1139,8 @@ func TestMinIterationDurationInSetupTeardownStage(t *testing.T) { func TestEngineRunsTeardownEvenAfterTestRunIsAborted(t *testing.T) { t.Parallel() - registry := metrics.NewRegistry() - testMetric, err := registry.NewMetric("teardown_metric", metrics.Counter) + rs := getRuntimeState(t) + testMetric, err := rs.Registry.NewMetric("teardown_metric", metrics.Counter) require.NoError(t, err) var test *testStruct @@ -1192,9 +1156,9 @@ func TestEngineRunsTeardownEvenAfterTestRunIsAborted(t *testing.T) { } mockOutput := mockoutput.New() - test = newTestEngineWithRegistry(t, nil, runner, []output.Output{mockOutput}, lib.Options{ + test = newTestEngineWithRuntimeState(t, nil, runner, []output.Output{mockOutput}, lib.Options{ VUs: null.IntFrom(1), Iterations: null.IntFrom(1), - }, registry) + }, rs) assert.NoError(t, test.run()) test.wait() @@ -1261,14 +1225,13 @@ func TestActiveVUsCount(t *testing.T) { rtOpts := lib.RuntimeOptions{CompatibilityMode: null.StringFrom("base")} registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - RuntimeOptions: rtOpts, - }, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) + rs := &lib.RuntimeState{ + Logger: logger, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + RuntimeOptions: rtOpts, + } + runner, err := js.New(rs, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) require.NoError(t, err) mockOutput := mockoutput.New() @@ -1281,7 +1244,7 @@ func TestActiveVUsCount(t *testing.T) { require.NoError(t, err) require.Empty(t, opts.Validate()) require.NoError(t, runner.SetOptions(opts)) - execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, logger) + execScheduler, err := local.NewExecutionScheduler(runner, rs) require.NoError(t, err) engine, err := NewEngine(execScheduler, opts, rtOpts, []output.Output{mockOutput}, logger, registry) require.NoError(t, err) diff --git a/core/local/local.go b/core/local/local.go index 926c297fe71..60fac7b0f21 100644 --- a/core/local/local.go +++ b/core/local/local.go @@ -38,9 +38,9 @@ import ( // ExecutionScheduler is the local implementation of lib.ExecutionScheduler type ExecutionScheduler struct { - runner lib.Runner - options lib.Options - logger logrus.FieldLogger + runner lib.Runner + options lib.Options + runtimeState *lib.RuntimeState initProgress *pb.ProgressBar executorConfigs []lib.ExecutorConfig // sorted by (startTime, ID) @@ -62,9 +62,7 @@ var _ lib.ExecutionScheduler = &ExecutionScheduler{} // instance, without initializing it beyond the bare minimum. Specifically, it // creates the needed executor instances and a lot of state placeholders, but it // doesn't initialize the executors and it doesn't initialize or run VUs. -func NewExecutionScheduler( - runner lib.Runner, builtinMetrics *metrics.BuiltinMetrics, logger logrus.FieldLogger, -) (*ExecutionScheduler, error) { +func NewExecutionScheduler(runner lib.Runner, rs *lib.RuntimeState) (*ExecutionScheduler, error) { options := runner.GetOptions() et, err := lib.NewExecutionTuple(options.ExecutionSegment, options.ExecutionSegmentSequence) if err != nil { @@ -74,7 +72,7 @@ func NewExecutionScheduler( maxPlannedVUs := lib.GetMaxPlannedVUs(executionPlan) maxPossibleVUs := lib.GetMaxPossibleVUs(executionPlan) - executionState := lib.NewExecutionState(options, et, builtinMetrics, maxPlannedVUs, maxPossibleVUs) + executionState := lib.NewExecutionState(options, et, rs.BuiltinMetrics, maxPlannedVUs, maxPossibleVUs) maxDuration, _ := lib.GetEndOffset(executionPlan) // we don't care if the end offset is final executorConfigs := options.Scenarios.GetSortedConfigs() @@ -82,13 +80,13 @@ func NewExecutionScheduler( // Only take executors which have work. for _, sc := range executorConfigs { if !sc.HasWork(et) { - logger.Warnf( + rs.Logger.Warnf( "Executor '%s' is disabled for segment %s due to lack of work!", sc.GetName(), options.ExecutionSegment, ) continue } - s, err := sc.NewExecutor(executionState, logger.WithFields(logrus.Fields{ + s, err := sc.NewExecutor(executionState, rs.Logger.WithFields(logrus.Fields{ "scenario": sc.GetName(), "executor": sc.GetType(), })) @@ -105,9 +103,9 @@ func NewExecutionScheduler( } return &ExecutionScheduler{ - runner: runner, - logger: logger, - options: options, + runner: runner, + runtimeState: rs, + options: options, initProgress: pb.New(pb.WithConstLeft("Init")), executors: executors, @@ -165,7 +163,7 @@ func (e *ExecutionScheduler) GetExecutionPlan() []lib.ExecutionStep { // in the Init() method, and also passed to executors so they can initialize // any unplanned VUs themselves. func (e *ExecutionScheduler) initVU( - samplesOut chan<- metrics.SampleContainer, logger *logrus.Entry, + samplesOut chan<- metrics.SampleContainer, logger logrus.FieldLogger, ) (lib.InitializedVU, error) { // Get the VU IDs here, so that the VUs are (mostly) ordered by their // number in the channel buffer @@ -201,7 +199,7 @@ func (e *ExecutionScheduler) getRunStats() string { func (e *ExecutionScheduler) initVUsConcurrently( ctx context.Context, samplesOut chan<- metrics.SampleContainer, count uint64, - concurrency int, logger *logrus.Entry, + concurrency int, logger logrus.FieldLogger, ) chan error { doneInits := make(chan error, count) // poor man's early-return waitgroup limiter := make(chan struct{}) @@ -233,7 +231,7 @@ func (e *ExecutionScheduler) initVUsConcurrently( } func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- metrics.SampleContainer) { - e.logger.Debug("Starting emission of VUs and VUsMax metrics...") + e.runtimeState.Logger.Debug("Starting emission of VUs and VUsMax metrics...") emitMetrics := func() { t := time.Now() @@ -261,7 +259,7 @@ func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- me go func() { defer func() { ticker.Stop() - e.logger.Debug("Metrics emission of VUs and VUsMax metrics stopped") + e.runtimeState.Logger.Debug("Metrics emission of VUs and VUsMax metrics stopped") close(e.vusEmissionStopped) }() @@ -283,7 +281,7 @@ func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- me func (e *ExecutionScheduler) Init(ctx context.Context, samplesOut chan<- metrics.SampleContainer) error { e.emitVUsAndVUsMax(ctx, samplesOut) - logger := e.logger.WithField("phase", "local-execution-scheduler-init") + logger := e.runtimeState.Logger.WithField("phase", "local-execution-scheduler-init") vusToInitialize := lib.GetMaxPlannedVUs(e.executionPlan) logger.WithFields(logrus.Fields{ "neededVUs": vusToInitialize, @@ -350,7 +348,7 @@ func (e *ExecutionScheduler) runExecutor( ) { executorConfig := executor.GetConfig() executorStartTime := executorConfig.GetStartTime() - executorLogger := e.logger.WithFields(logrus.Fields{ + executorLogger := e.runtimeState.Logger.WithFields(logrus.Fields{ "executor": executorConfig.GetName(), "type": executorConfig.GetType(), "startTime": executorStartTime, @@ -402,7 +400,7 @@ func (e *ExecutionScheduler) Run(globalCtx, runCtx context.Context, engineOut ch }() executorsCount := len(e.executors) - logger := e.logger.WithField("phase", "local-execution-scheduler-run") + logger := e.runtimeState.Logger.WithField("phase", "local-execution-scheduler-run") e.initProgress.Modify(pb.WithConstLeft("Run")) var interrupted bool defer func() { @@ -499,7 +497,7 @@ func (e *ExecutionScheduler) SetPaused(pause bool) error { if pause { return fmt.Errorf("execution is already paused") } - e.logger.Debug("Starting execution") + e.runtimeState.Logger.Debug("Starting execution") return e.state.Resume() } diff --git a/core/local/local_test.go b/core/local/local_test.go index c6273100fed..ce8305f3f33 100644 --- a/core/local/local_test.go +++ b/core/local/local_test.go @@ -53,6 +53,16 @@ import ( "go.k6.io/k6/metrics" ) +func getRuntimeState(tb testing.TB) *lib.RuntimeState { + reg := metrics.NewRegistry() + return &lib.RuntimeState{ + Logger: testutils.NewLogger(tb), + RuntimeOptions: lib.RuntimeOptions{}, + Registry: reg, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(reg), + } +} + func newTestExecutionScheduler( t *testing.T, runner lib.Runner, logger *logrus.Logger, opts lib.Options, ) (ctx context.Context, cancel func(), execScheduler *ExecutionScheduler, samples chan metrics.SampleContainer) { @@ -68,14 +78,12 @@ func newTestExecutionScheduler( require.NoError(t, runner.SetOptions(newOpts)) - if logger == nil { - logger = logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) + rs := getRuntimeState(t) + if logger != nil { + rs.Logger = logger } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err = NewExecutionScheduler(runner, builtinMetrics, logger) + execScheduler, err = NewExecutionScheduler(runner, rs) require.NoError(t, err) samples = make(chan metrics.SampleContainer, newOpts.MetricSamplesBufferSize.Int64) @@ -129,21 +137,14 @@ func TestExecutionSchedulerRunNonDefault(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + rs := getRuntimeState(t) runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, &loader.SourceData{ + rs, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script), }, nil) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + execScheduler, err := NewExecutionScheduler(runner, rs) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -244,23 +245,17 @@ func TestExecutionSchedulerRunEnv(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + rs := getRuntimeState(t) + rs.RuntimeOptions = lib.RuntimeOptions{Env: map[string]string{"TESTVAR": "global"}} runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - RuntimeOptions: lib.RuntimeOptions{Env: map[string]string{"TESTVAR": "global"}}, - }, &loader.SourceData{ + rs, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script), - }, nil) + }, nil, + ) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + execScheduler, err := NewExecutionScheduler(runner, rs) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -316,16 +311,9 @@ func TestExecutionSchedulerSystemTags(t *testing.T) { http.get("HTTPBIN_IP_URL/"); }`) - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + rs := getRuntimeState(t) runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, &loader.SourceData{ + rs, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(script), }, nil) @@ -335,7 +323,7 @@ func TestExecutionSchedulerSystemTags(t *testing.T) { SystemTags: &metrics.DefaultSystemTagSet, }))) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + execScheduler, err := NewExecutionScheduler(runner, rs) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) @@ -460,25 +448,16 @@ func TestExecutionSchedulerRunCustomTags(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + rs := getRuntimeState(t) runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, - &loader.SourceData{ + rs, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script), - }, - nil) + }, nil, + ) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + execScheduler, err := NewExecutionScheduler(runner, rs) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) @@ -629,25 +608,19 @@ func TestExecutionSchedulerRunCustomConfigNoCrossover(t *testing.T) { }); } `) - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) + rs := getRuntimeState(t) + rs.RuntimeOptions.Env = map[string]string{"TESTGLOBALVAR": "global"} runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - RuntimeOptions: lib.RuntimeOptions{Env: map[string]string{"TESTGLOBALVAR": "global"}}, - }, &loader.SourceData{ + rs, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(script), }, - nil) + nil, + ) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + execScheduler, err := NewExecutionScheduler(runner, rs) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) @@ -975,12 +948,8 @@ func TestExecutionSchedulerEndIterations(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + rs := getRuntimeState(t) + execScheduler, err := NewExecutionScheduler(runner, rs) require.NoError(t, err) samples := make(chan metrics.SampleContainer, 300) @@ -1170,17 +1139,8 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { counter.add(6, { place: "defaultAfterSleep" }); }`) - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) + rs := getRuntimeState(t) + runner, err := js.New(rs, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) require.NoError(t, err) options, err := executor.DeriveScenariosFromShortcuts(runner.GetOptions().Apply(lib.Options{ @@ -1193,7 +1153,7 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { require.NoError(t, err) require.NoError(t, runner.SetOptions(options)) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + execScheduler, err := NewExecutionScheduler(runner, rs) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1216,7 +1176,7 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { case sampleContainer := <-sampleContainers: gotVus := false for _, s := range sampleContainer.GetSamples() { - if s.Metric == builtinMetrics.VUs || s.Metric == builtinMetrics.VUsMax { + if s.Metric == rs.BuiltinMetrics.VUs || s.Metric == rs.BuiltinMetrics.VUsMax { gotVus = true break } @@ -1260,7 +1220,7 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { } return metrics.IntoSampleTags(&tags) } - testCounter, err := registry.NewMetric("test_counter", metrics.Counter) + testCounter, err := rs.Registry.NewMetric("test_counter", metrics.Counter) require.NoError(t, err) getSample := func(expValue float64, expMetric *metrics.Metric, expTags ...string) metrics.SampleContainer { return metrics.Sample{ @@ -1277,7 +1237,7 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { net.Dialer{}, netext.NewResolver(net.LookupIP, 0, types.DNSfirst, types.DNSpreferIPv4), ).GetTrail(time.Now(), time.Now(), - true, emitIterations, getTags(expTags...), builtinMetrics) + true, emitIterations, getTags(expTags...), rs.BuiltinMetrics) } // Initially give a long time (5s) for the execScheduler to start @@ -1323,12 +1283,7 @@ func TestSetPaused(t *testing.T) { t.Parallel() t.Run("second pause is an error", func(t *testing.T) { t.Parallel() - runner := &minirunner.MiniRunner{} - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) + sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getRuntimeState(t)) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} @@ -1340,12 +1295,7 @@ func TestSetPaused(t *testing.T) { t.Run("unpause at the start is an error", func(t *testing.T) { t.Parallel() - runner := &minirunner.MiniRunner{} - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) + sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getRuntimeState(t)) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} err = sched.SetPaused(false) @@ -1355,12 +1305,7 @@ func TestSetPaused(t *testing.T) { t.Run("second unpause is an error", func(t *testing.T) { t.Parallel() - runner := &minirunner.MiniRunner{} - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) + sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getRuntimeState(t)) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} require.NoError(t, sched.SetPaused(true)) @@ -1372,12 +1317,7 @@ func TestSetPaused(t *testing.T) { t.Run("an error on pausing is propagated", func(t *testing.T) { t.Parallel() - runner := &minirunner.MiniRunner{} - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) + sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getRuntimeState(t)) require.NoError(t, err) expectedErr := errors.New("testing pausable executor error") sched.executors = []lib.Executor{pausableExecutor{err: expectedErr}} @@ -1396,11 +1336,7 @@ func TestSetPaused(t *testing.T) { require.NoError(t, err) require.NoError(t, runner.SetOptions(options)) - logger := logrus.New() - logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - sched, err := NewExecutionScheduler(runner, builtinMetrics, logger) + sched, err := NewExecutionScheduler(runner, getRuntimeState(t)) require.NoError(t, err) err = sched.SetPaused(true) require.Error(t, err) @@ -1445,24 +1381,16 @@ func TestNewExecutionSchedulerHasWork(t *testing.T) { logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - runner, err := js.New( - &lib.RuntimeState{ - Logger: logger, - BuiltinMetrics: builtinMetrics, - Registry: registry, - }, - &loader.SourceData{ - URL: &url.URL{Path: "/script.js"}, - Data: script, - }, - nil, - ) + rs := &lib.RuntimeState{ + Logger: logger, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + } + runner, err := js.New(rs, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger) + execScheduler, err := NewExecutionScheduler(runner, rs) require.NoError(t, err) assert.Len(t, execScheduler.executors, 2) diff --git a/js/runner_test.go b/js/runner_test.go index f3aa93bbf8a..57da1678d8b 100644 --- a/js/runner_test.go +++ b/js/runner_test.go @@ -310,14 +310,13 @@ func TestSetupDataIsolation(t *testing.T) { options := runner.GetOptions() require.Empty(t, options.Validate()) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(runner, builtinMetrics, testutils.NewLogger(t)) + rs := runner.runtimeState + execScheduler, err := local.NewExecutionScheduler(runner, rs) require.NoError(t, err) mockOutput := mockoutput.New() engine, err := core.NewEngine( - execScheduler, options, lib.RuntimeOptions{}, []output.Output{mockOutput}, testutils.NewLogger(t), registry, + execScheduler, options, rs.RuntimeOptions, []output.Output{mockOutput}, rs.Logger, rs.Registry, ) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) @@ -2438,9 +2437,7 @@ func TestExecutionInfo(t *testing.T) { initVU, err := r.NewVU(1, 10, samples) require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - execScheduler, err := local.NewExecutionScheduler(r, builtinMetrics, testutils.NewLogger(t)) + execScheduler, err := local.NewExecutionScheduler(r, r.runtimeState) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) diff --git a/lib/runtime_state.go b/lib/runtime_state.go index 777ce718d35..e56e53db1e5 100644 --- a/lib/runtime_state.go +++ b/lib/runtime_state.go @@ -7,7 +7,10 @@ import ( "go.k6.io/k6/metrics" ) -// RuntimeState represents what is mostly needed during the running of a test +// RuntimeState represents what is mostly needed during the running of a test. +// +// TODO: since this has nothing to do with the goja JS "runtime", maybe we +// should rename it to something more appropriate? e.g. TestRunState? type RuntimeState struct { RuntimeOptions RuntimeOptions // TODO maybe have a struct `Metrics` with `Registry` and `Builtin` ? diff --git a/lib/state.go b/lib/state.go index 89be0af3b17..d709b4b6009 100644 --- a/lib/state.go +++ b/lib/state.go @@ -42,11 +42,17 @@ type DialContexter interface { // State provides the volatile state for a VU. type State struct { - // Global options. - Options Options + // Global options and built-in metrics. + // + // TODO: remove them from here, the built-in metrics and the script options + // are not part of a VU's unique "state", they are global and the same for + // all VUs. Figure out how to thread them some other way, e.g. through the + // RuntimeState. The Samples channel might also benefit from that... + Options Options + BuiltinMetrics *metrics.BuiltinMetrics // Logger. Avoid using the global logger. - // TODO change to logrus.FieldLogger when there is time to fix all the tests + // TODO: change to logrus.FieldLogger when there is time to fix all the tests Logger *logrus.Logger // Current group; all emitted metrics are tagged with this. @@ -85,8 +91,6 @@ type State struct { // unique globally across k6 instances (taking into account execution // segments). GetScenarioGlobalVUIter func() uint64 - - BuiltinMetrics *metrics.BuiltinMetrics } // CloneTags makes a copy of the tags map and returns it. From 142ea478a6537e98006e0284bc8acb1e0227e0ff Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Sun, 31 Jul 2022 16:53:41 +0300 Subject: [PATCH 04/12] Rename lib.RuntimeState to TestPreInitState --- api/server_test.go | 4 +- api/v1/group_routes_test.go | 10 +- api/v1/metric_routes_test.go | 16 +-- api/v1/setup_teardown_routes_test.go | 8 +- api/v1/status_routes_test.go | 12 +- .../eventloop/eventloop_test.go | 6 +- cmd/outputs.go | 4 +- cmd/run.go | 14 +-- cmd/runtime_options_test.go | 4 +- cmd/test_load.go | 24 ++-- converter/har/converter_test.go | 2 +- core/engine_test.go | 108 +++++++++--------- core/local/k6execution_test.go | 8 +- core/local/local.go | 24 ++-- core/local/local_test.go | 78 ++++++------- js/bundle.go | 36 +++--- js/bundle_test.go | 16 +-- js/console_test.go | 2 +- js/init_and_modules_test.go | 4 +- js/module_loading_test.go | 22 ++-- js/modules/k6/marshalling_test.go | 2 +- js/runner.go | 34 +++--- js/runner_test.go | 66 +++++------ js/share_test.go | 2 +- lib/runtime_state.go | 23 ---- lib/test_state.go | 20 ++++ lib/{state.go => vu_state.go} | 2 +- 27 files changed, 275 insertions(+), 276 deletions(-) delete mode 100644 lib/runtime_state.go create mode 100644 lib/test_state.go rename lib/{state.go => vu_state.go} (98%) diff --git a/api/server_test.go b/api/server_test.go index 93b85ef403d..8489ebbd383 100644 --- a/api/server_test.go +++ b/api/server_test.go @@ -81,12 +81,12 @@ func TestWithEngine(t *testing.T) { logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) registry := metrics.NewRegistry() - rs := &lib.RuntimeState{ + piState := &lib.TestPreInitState{ Logger: logger, Registry: registry, BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), } - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, rs) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, piState) require.NoError(t, err) engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) require.NoError(t, err) diff --git a/api/v1/group_routes_test.go b/api/v1/group_routes_test.go index de08b36fad4..8fb46d85d4f 100644 --- a/api/v1/group_routes_test.go +++ b/api/v1/group_routes_test.go @@ -37,9 +37,9 @@ import ( "go.k6.io/k6/metrics" ) -func getRuntimeState(tb testing.TB) *lib.RuntimeState { +func getTestPreInitState(tb testing.TB) *lib.TestPreInitState { reg := metrics.NewRegistry() - return &lib.RuntimeState{ + return &lib.TestPreInitState{ Logger: testutils.NewLogger(tb), RuntimeOptions: lib.RuntimeOptions{}, Registry: reg, @@ -55,10 +55,10 @@ func TestGetGroups(t *testing.T) { g2, err := g1.Group("group 2") assert.NoError(t, err) - rs := getRuntimeState(t) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Group: g0}, rs) + piState := getTestPreInitState(t) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Group: g0}, piState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, rs.RuntimeOptions, nil, rs.Logger, rs.Registry) + engine, err := core.NewEngine(execScheduler, lib.Options{}, piState.RuntimeOptions, nil, piState.Logger, piState.Registry) require.NoError(t, err) t.Run("list", func(t *testing.T) { diff --git a/api/v1/metric_routes_test.go b/api/v1/metric_routes_test.go index 88e5b13fb1e..bf0255a35bb 100644 --- a/api/v1/metric_routes_test.go +++ b/api/v1/metric_routes_test.go @@ -40,12 +40,12 @@ import ( func TestGetMetrics(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) - testMetric, err := rs.Registry.NewMetric("my_metric", metrics.Trend, metrics.Time) + piState := getTestPreInitState(t) + testMetric, err := piState.Registry.NewMetric("my_metric", metrics.Trend, metrics.Time) require.NoError(t, err) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, rs) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, piState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, rs.RuntimeOptions, nil, rs.Logger, rs.Registry) + engine, err := core.NewEngine(execScheduler, lib.Options{}, piState.RuntimeOptions, nil, piState.Logger, piState.Registry) require.NoError(t, err) engine.MetricsEngine.ObservedMetrics = map[string]*metrics.Metric{ @@ -99,12 +99,12 @@ func TestGetMetrics(t *testing.T) { func TestGetMetric(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) - testMetric, err := rs.Registry.NewMetric("my_metric", metrics.Trend, metrics.Time) + piState := getTestPreInitState(t) + testMetric, err := piState.Registry.NewMetric("my_metric", metrics.Trend, metrics.Time) require.NoError(t, err) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, rs) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, piState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, rs.RuntimeOptions, nil, rs.Logger, rs.Registry) + engine, err := core.NewEngine(execScheduler, lib.Options{}, piState.RuntimeOptions, nil, piState.Logger, piState.Registry) require.NoError(t, err) engine.MetricsEngine.ObservedMetrics = map[string]*metrics.Metric{ diff --git a/api/v1/setup_teardown_routes_test.go b/api/v1/setup_teardown_routes_test.go index 7f0d5d88704..193ae07c83a 100644 --- a/api/v1/setup_teardown_routes_test.go +++ b/api/v1/setup_teardown_routes_test.go @@ -137,9 +137,9 @@ func TestSetupData(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) + piState := getTestPreInitState(t) runner, err := js.New( - rs, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: testCase.script}, nil, + piState, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: testCase.script}, nil, ) require.NoError(t, err) runner.SetOptions(lib.Options{ @@ -150,10 +150,10 @@ func TestSetupData(t *testing.T) { SetupTimeout: types.NullDurationFrom(5 * time.Second), TeardownTimeout: types.NullDurationFrom(5 * time.Second), }) - execScheduler, err := local.NewExecutionScheduler(runner, rs) + execScheduler, err := local.NewExecutionScheduler(runner, piState) require.NoError(t, err) engine, err := core.NewEngine( - execScheduler, runner.GetOptions(), rs.RuntimeOptions, nil, rs.Logger, rs.Registry, + execScheduler, runner.GetOptions(), piState.RuntimeOptions, nil, piState.Logger, piState.Registry, ) require.NoError(t, err) diff --git a/api/v1/status_routes_test.go b/api/v1/status_routes_test.go index 688b0d30544..31c52da2f7b 100644 --- a/api/v1/status_routes_test.go +++ b/api/v1/status_routes_test.go @@ -42,10 +42,10 @@ import ( func TestGetStatus(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, rs) + piState := getTestPreInitState(t) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, piState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, rs.RuntimeOptions, nil, rs.Logger, rs.Registry) + engine, err := core.NewEngine(execScheduler, lib.Options{}, piState.RuntimeOptions, nil, piState.Logger, piState.Registry) require.NoError(t, err) rw := httptest.NewRecorder() @@ -130,10 +130,10 @@ func TestPatchStatus(t *testing.T) { require.NoError(t, err) options := lib.Options{Scenarios: scenarios} - rs := getRuntimeState(t) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Options: options}, rs) + piState := getTestPreInitState(t) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Options: options}, piState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, options, rs.RuntimeOptions, nil, rs.Logger, rs.Registry) + engine, err := core.NewEngine(execScheduler, options, piState.RuntimeOptions, nil, piState.Logger, piState.Registry) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) diff --git a/cmd/integration_tests/eventloop/eventloop_test.go b/cmd/integration_tests/eventloop/eventloop_test.go index 286dba272c0..d474651748f 100644 --- a/cmd/integration_tests/eventloop/eventloop_test.go +++ b/cmd/integration_tests/eventloop/eventloop_test.go @@ -29,14 +29,14 @@ func eventLoopTest(t *testing.T, script []byte, testHandle func(context.Context, logger.AddHook(logHook) registry := metrics.NewRegistry() - rs := &lib.RuntimeState{ + piState := &lib.TestPreInitState{ Logger: logger, Registry: registry, BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), } script = []byte("import {setTimeout} from 'k6/x/events';\n" + string(script)) - runner, err := js.New(rs, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) + runner, err := js.New(piState, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -50,7 +50,7 @@ func eventLoopTest(t *testing.T, script []byte, testHandle func(context.Context, require.Empty(t, newOpts.Validate()) require.NoError(t, runner.SetOptions(newOpts)) - execScheduler, err := local.NewExecutionScheduler(runner, rs) + execScheduler, err := local.NewExecutionScheduler(runner, piState) require.NoError(t, err) samples := make(chan metrics.SampleContainer, newOpts.MetricSamplesBufferSize.Int64) diff --git a/cmd/outputs.go b/cmd/outputs.go index b1913ebdc03..b494b3eef49 100644 --- a/cmd/outputs.go +++ b/cmd/outputs.go @@ -90,7 +90,7 @@ func createOutputs(gs *globalState, test *loadedTest, executionPlan []lib.Execut StdErr: gs.stdErr, FS: gs.fs, ScriptOptions: test.derivedConfig.Options, - RuntimeOptions: test.runtimeState.RuntimeOptions, + RuntimeOptions: test.preInitState.RuntimeOptions, ExecutionPlan: executionPlan, } result := make([]output.Output, 0, len(test.derivedConfig.Out)) @@ -120,7 +120,7 @@ func createOutputs(gs *globalState, test *loadedTest, executionPlan []lib.Execut } if builtinMetricOut, ok := out.(output.WithBuiltinMetrics); ok { - builtinMetricOut.SetBuiltinMetrics(test.runtimeState.BuiltinMetrics) + builtinMetricOut.SetBuiltinMetrics(test.preInitState.BuiltinMetrics) } result = append(result, out) diff --git a/cmd/run.go b/cmd/run.go index a444b39eb78..d3747753ed9 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -86,10 +86,10 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { runCtx, runCancel := context.WithCancel(lingerCtx) defer runCancel() - logger := test.runtimeState.Logger + logger := test.preInitState.Logger // Create a local execution scheduler wrapping the runner. logger.Debug("Initializing the execution scheduler...") - execScheduler, err := local.NewExecutionScheduler(test.initRunner, test.runtimeState) + execScheduler, err := local.NewExecutionScheduler(test.initRunner, test.preInitState) if err != nil { return err } @@ -126,8 +126,8 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { // Create the engine. initBar.Modify(pb.WithConstProgress(0, "Init engine")) engine, err := core.NewEngine( - execScheduler, conf.Options, test.runtimeState.RuntimeOptions, - outputs, logger, test.runtimeState.Registry, + execScheduler, conf.Options, test.preInitState.RuntimeOptions, + outputs, logger, test.preInitState.Registry, ) if err != nil { return err @@ -230,7 +230,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { } // Handle the end-of-test summary. - if !test.runtimeState.RuntimeOptions.NoSummary.Bool { + if !test.preInitState.RuntimeOptions.NoSummary.Bool { engine.MetricsEngine.MetricsLock.Lock() // TODO: refactor so this is not needed summaryResult, err := test.initRunner.HandleSummary(globalCtx, &lib.Summary{ Metrics: engine.MetricsEngine.ObservedMetrics, @@ -268,8 +268,8 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { logger.Debug("Waiting for engine processes to finish...") engineWait() logger.Debug("Everything has finished, exiting k6!") - if test.runtimeState.KeyLogger != nil { - if err := test.runtimeState.KeyLogger.Close(); err != nil { + if test.preInitState.KeyLogger != nil { + if err := test.preInitState.KeyLogger.Close(); err != nil { logger.WithError(err).Warn("Error while closing the SSLKEYLOGFILE") } } diff --git a/cmd/runtime_options_test.go b/cmd/runtime_options_test.go index dfc55501895..8130da41e43 100644 --- a/cmd/runtime_options_test.go +++ b/cmd/runtime_options_test.go @@ -84,7 +84,7 @@ func testRuntimeOptionsCase(t *testing.T, tc runtimeOptionsTestCase) { sourceRootPath: "script.js", source: &loader.SourceData{Data: jsCode.Bytes(), URL: &url.URL{Path: "/script.js", Scheme: "file"}}, fileSystems: map[string]afero.Fs{"file": fs}, - runtimeState: &lib.RuntimeState{ + preInitState: &lib.TestPreInitState{ Logger: ts.logger, RuntimeOptions: rtOpts, Registry: registry, @@ -103,7 +103,7 @@ func testRuntimeOptionsCase(t *testing.T, tc runtimeOptionsTestCase) { sourceRootPath: "script.tar", source: &loader.SourceData{Data: archiveBuf.Bytes(), URL: &url.URL{Path: "/script.tar", Scheme: "file"}}, fileSystems: map[string]afero.Fs{"file": fs}, - runtimeState: &lib.RuntimeState{ + preInitState: &lib.TestPreInitState{ Logger: ts.logger, RuntimeOptions: rtOpts, Registry: registry, diff --git a/cmd/test_load.go b/cmd/test_load.go index ff1445ceb3a..5e5bf125cb9 100644 --- a/cmd/test_load.go +++ b/cmd/test_load.go @@ -33,7 +33,7 @@ type loadedTest struct { source *loader.SourceData fs afero.Fs fileSystems map[string]afero.Fs - runtimeState *lib.RuntimeState + preInitState *lib.TestPreInitState initRunner lib.Runner // TODO: rename to something more appropriate // Only set if cliConfigGetter is supplied to loadAndConfigureTest() or if @@ -70,7 +70,7 @@ func loadAndConfigureTest( } registry := metrics.NewRegistry() - state := &lib.RuntimeState{ + state := &lib.TestPreInitState{ Logger: gs.logger, RuntimeOptions: runtimeOptions, Registry: registry, @@ -83,7 +83,7 @@ func loadAndConfigureTest( source: src, fs: gs.fs, fileSystems: fileSystems, - runtimeState: state, + preInitState: state, } gs.logger.Debugf("Initializing k6 runner for '%s' (%s)...", sourceRootPath, resolvedPath) @@ -105,16 +105,16 @@ func (lt *loadedTest) initializeFirstRunner(gs *globalState) error { testPath := lt.source.URL.String() logger := gs.logger.WithField("test_path", testPath) - testType := lt.runtimeState.RuntimeOptions.TestType.String + testType := lt.preInitState.RuntimeOptions.TestType.String if testType == "" { logger.Debug("Detecting test type for...") testType = detectTestType(lt.source.Data) } - if lt.runtimeState.RuntimeOptions.KeyWriter.Valid { + if lt.preInitState.RuntimeOptions.KeyWriter.Valid { logger.Warnf("SSLKEYLOGFILE was specified, logging TLS connection keys to '%s'...", - lt.runtimeState.RuntimeOptions.KeyWriter.String) - keylogFilename := lt.runtimeState.RuntimeOptions.KeyWriter.String + lt.preInitState.RuntimeOptions.KeyWriter.String) + keylogFilename := lt.preInitState.RuntimeOptions.KeyWriter.String // if path is absolute - no point doing anything if !filepath.IsAbs(keylogFilename) { // filepath.Abs could be used but it will get the pwd from `os` package instead of what is in lt.pwd @@ -125,12 +125,12 @@ func (lt *loadedTest) initializeFirstRunner(gs *globalState) error { if err != nil { return fmt.Errorf("couldn't get absolute path for keylog file: %w", err) } - lt.runtimeState.KeyLogger = &syncWriteCloser{w: f} + lt.preInitState.KeyLogger = &syncWriteCloser{w: f} } switch testType { case testTypeJS: logger.Debug("Trying to load as a JS test...") - runner, err := js.New(lt.runtimeState, lt.source, lt.fileSystems) + runner, err := js.New(lt.preInitState, lt.source, lt.fileSystems) // TODO: should we use common.UnwrapGojaInterruptedError() here? if err != nil { return fmt.Errorf("could not load JS test '%s': %w", testPath, err) @@ -151,7 +151,7 @@ func (lt *loadedTest) initializeFirstRunner(gs *globalState) error { switch arc.Type { case testTypeJS: logger.Debug("Evaluating JS from archive bundle...") - lt.initRunner, err = js.NewFromArchive(lt.runtimeState, arc) + lt.initRunner, err = js.NewFromArchive(lt.preInitState, arc) if err != nil { return fmt.Errorf("could not load JS from test archive bundle '%s': %w", testPath, err) } @@ -208,14 +208,14 @@ func (lt *loadedTest) consolidateDeriveAndValidateConfig( // Parse the thresholds, only if the --no-threshold flag is not set. // If parsing the threshold expressions failed, consider it as an // invalid configuration error. - if !lt.runtimeState.RuntimeOptions.NoThresholds.Bool { + if !lt.preInitState.RuntimeOptions.NoThresholds.Bool { for metricName, thresholdsDefinition := range consolidatedConfig.Options.Thresholds { err = thresholdsDefinition.Parse() if err != nil { return errext.WithExitCodeIfNone(err, exitcodes.InvalidConfig) } - err = thresholdsDefinition.Validate(metricName, lt.runtimeState.Registry) + err = thresholdsDefinition.Validate(metricName, lt.preInitState.Registry) if err != nil { return errext.WithExitCodeIfNone(err, exitcodes.InvalidConfig) } diff --git a/converter/har/converter_test.go b/converter/har/converter_test.go index f5d259a080a..aeacfc8324b 100644 --- a/converter/har/converter_test.go +++ b/converter/har/converter_test.go @@ -62,7 +62,7 @@ func TestBuildK6RequestObject(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) _, err = js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, diff --git a/core/engine_test.go b/core/engine_test.go index 58bf4dc5c7f..c6d6bb5d1a3 100644 --- a/core/engine_test.go +++ b/core/engine_test.go @@ -58,12 +58,12 @@ type testStruct struct { run func() error runCancel func() wait func() - rs *lib.RuntimeState + piState *lib.TestPreInitState } -func getRuntimeState(tb testing.TB) *lib.RuntimeState { +func getTestPreInitState(tb testing.TB) *lib.TestPreInitState { reg := metrics.NewRegistry() - return &lib.RuntimeState{ + return &lib.TestPreInitState{ Logger: testutils.NewLogger(tb), RuntimeOptions: lib.RuntimeOptions{}, Registry: reg, @@ -72,9 +72,8 @@ func getRuntimeState(tb testing.TB) *lib.RuntimeState { } // Wrapper around NewEngine that applies a logger and manages the options. -func newTestEngineWithRuntimeState( //nolint:golint - t *testing.T, runTimeout *time.Duration, runner lib.Runner, outputs []output.Output, opts lib.Options, - rs *lib.RuntimeState, +func newTestEngineWithTestPreInitState( //nolint:golint + t *testing.T, runTimeout *time.Duration, runner lib.Runner, outputs []output.Output, opts lib.Options, piState *lib.TestPreInitState, ) *testStruct { if runner == nil { runner = &minirunner.MiniRunner{} @@ -82,16 +81,16 @@ func newTestEngineWithRuntimeState( //nolint:golint newOpts, err := executor.DeriveScenariosFromShortcuts(lib.Options{ MetricSamplesBufferSize: null.NewInt(200, false), - }.Apply(runner.GetOptions()).Apply(opts), rs.Logger) + }.Apply(runner.GetOptions()).Apply(opts), piState.Logger) require.NoError(t, err) require.Empty(t, newOpts.Validate()) require.NoError(t, runner.SetOptions(newOpts)) - execScheduler, err := local.NewExecutionScheduler(runner, rs) + execScheduler, err := local.NewExecutionScheduler(runner, piState) require.NoError(t, err) - engine, err := NewEngine(execScheduler, opts, rs.RuntimeOptions, outputs, rs.Logger, rs.Registry) + engine, err := NewEngine(execScheduler, opts, piState.RuntimeOptions, outputs, piState.Logger, piState.Registry) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) @@ -117,6 +116,7 @@ func newTestEngineWithRuntimeState( //nolint:golint waitFn() engine.OutputManager.StopOutputs() }, + piState: piState, } return test } @@ -124,7 +124,7 @@ func newTestEngineWithRuntimeState( //nolint:golint func newTestEngine( t *testing.T, runTimeout *time.Duration, runner lib.Runner, outputs []output.Output, opts lib.Options, ) *testStruct { - return newTestEngineWithRuntimeState(t, runTimeout, runner, outputs, opts, getRuntimeState(t)) + return newTestEngineWithTestPreInitState(t, runTimeout, runner, outputs, opts, getTestPreInitState(t)) } func TestEngineRun(t *testing.T) { @@ -164,8 +164,8 @@ func TestEngineRun(t *testing.T) { t.Run("collects samples", func(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) - testMetric, err := rs.Registry.NewMetric("test_metric", metrics.Trend) + piState := getTestPreInitState(t) + testMetric, err := piState.Registry.NewMetric("test_metric", metrics.Trend) require.NoError(t, err) signalChan := make(chan interface{}) @@ -181,10 +181,10 @@ func TestEngineRun(t *testing.T) { } mockOutput := mockoutput.New() - test := newTestEngineWithRuntimeState(t, nil, runner, []output.Output{mockOutput}, lib.Options{ + test := newTestEngineWithTestPreInitState(t, nil, runner, []output.Output{mockOutput}, lib.Options{ VUs: null.IntFrom(1), Iterations: null.IntFrom(1), - }, rs) + }, piState) errC := make(chan error) go func() { errC <- test.run() }() @@ -234,8 +234,8 @@ func TestEngineStopped(t *testing.T) { func TestEngineOutput(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) - testMetric, err := rs.Registry.NewMetric("test_metric", metrics.Trend) + piState := getTestPreInitState(t) + testMetric, err := piState.Registry.NewMetric("test_metric", metrics.Trend) require.NoError(t, err) runner := &minirunner.MiniRunner{ @@ -246,10 +246,10 @@ func TestEngineOutput(t *testing.T) { } mockOutput := mockoutput.New() - test := newTestEngineWithRuntimeState(t, nil, runner, []output.Output{mockOutput}, lib.Options{ + test := newTestEngineWithTestPreInitState(t, nil, runner, []output.Output{mockOutput}, lib.Options{ VUs: null.IntFrom(1), Iterations: null.IntFrom(1), - }, rs) + }, piState) assert.NoError(t, test.run()) test.wait() @@ -277,8 +277,8 @@ func TestEngine_processSamples(t *testing.T) { t.Run("metric", func(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) - metric, err := rs.Registry.NewMetric("my_metric", metrics.Gauge) + piState := getTestPreInitState(t) + metric, err := piState.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) done := make(chan struct{}) @@ -289,7 +289,7 @@ func TestEngine_processSamples(t *testing.T) { return nil }, } - test := newTestEngineWithRuntimeState(t, nil, runner, nil, lib.Options{}, rs) + test := newTestEngineWithTestPreInitState(t, nil, runner, nil, lib.Options{}, piState) go func() { assert.NoError(t, test.run()) @@ -309,8 +309,8 @@ func TestEngine_processSamples(t *testing.T) { t.Run("submetric", func(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) - metric, err := rs.Registry.NewMetric("my_metric", metrics.Gauge) + piState := getTestPreInitState(t) + metric, err := piState.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) ths := metrics.NewThresholds([]string{`value<2`}) @@ -325,11 +325,11 @@ func TestEngine_processSamples(t *testing.T) { return nil }, } - test := newTestEngineWithRuntimeState(t, nil, runner, nil, lib.Options{ + test := newTestEngineWithTestPreInitState(t, nil, runner, nil, lib.Options{ Thresholds: map[string]metrics.Thresholds{ "my_metric{a:1}": ths, }, - }, rs) + }, piState) go func() { assert.NoError(t, test.run()) @@ -355,8 +355,8 @@ func TestEngine_processSamples(t *testing.T) { func TestEngineThresholdsWillAbort(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) - metric, err := rs.Registry.NewMetric("my_metric", metrics.Gauge) + piState := getTestPreInitState(t) + metric, err := piState.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) // The incoming samples for the metric set it to 1.25. Considering @@ -377,7 +377,7 @@ func TestEngineThresholdsWillAbort(t *testing.T) { return nil }, } - test := newTestEngineWithRuntimeState(t, nil, runner, nil, lib.Options{Thresholds: thresholds}, rs) + test := newTestEngineWithTestPreInitState(t, nil, runner, nil, lib.Options{Thresholds: thresholds}, piState) go func() { assert.NoError(t, test.run()) @@ -396,8 +396,8 @@ func TestEngineThresholdsWillAbort(t *testing.T) { func TestEngineAbortedByThresholds(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) - metric, err := rs.Registry.NewMetric("my_metric", metrics.Gauge) + piState := getTestPreInitState(t) + metric, err := piState.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) // The MiniRunner sets the value of the metric to 1.25. Considering @@ -421,7 +421,7 @@ func TestEngineAbortedByThresholds(t *testing.T) { }, } - test := newTestEngineWithRuntimeState(t, nil, runner, nil, lib.Options{Thresholds: thresholds}, rs) + test := newTestEngineWithTestPreInitState(t, nil, runner, nil, lib.Options{Thresholds: thresholds}, piState) defer test.wait() go func() { @@ -471,12 +471,12 @@ func TestEngine_processThresholds(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) - gaugeMetric, err := rs.Registry.NewMetric("my_metric", metrics.Gauge) + piState := getTestPreInitState(t) + gaugeMetric, err := piState.Registry.NewMetric("my_metric", metrics.Gauge) require.NoError(t, err) - counterMetric, err := rs.Registry.NewMetric("used_counter", metrics.Counter) + counterMetric, err := piState.Registry.NewMetric("used_counter", metrics.Counter) require.NoError(t, err) - _, err = rs.Registry.NewMetric("unused_counter", metrics.Counter) + _, err = piState.Registry.NewMetric("unused_counter", metrics.Counter) require.NoError(t, err) thresholds := make(map[string]metrics.Thresholds, len(data.ths)) @@ -488,8 +488,8 @@ func TestEngine_processThresholds(t *testing.T) { } runner := &minirunner.MiniRunner{} - test := newTestEngineWithRuntimeState( - t, nil, runner, nil, lib.Options{Thresholds: thresholds}, rs, + test := newTestEngineWithTestPreInitState( + t, nil, runner, nil, lib.Options{Thresholds: thresholds}, piState, ) test.engine.OutputManager.AddMetricSamples( @@ -600,7 +600,7 @@ func TestSentReceivedMetrics(t *testing.T) { runTest := func(t *testing.T, ts testScript, tc testCase, noConnReuse bool) (float64, float64) { r, err := js.New( - getRuntimeState(t), + getTestPreInitState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: []byte(ts.Code)}, nil, ) @@ -733,7 +733,7 @@ func TestRunTags(t *testing.T) { `)) r, err := js.New( - getRuntimeState(t), + getTestPreInitState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil, ) @@ -810,7 +810,7 @@ func TestSetupException(t *testing.T) { } `), 0x666)) runner, err := js.New( - getRuntimeState(t), + getTestPreInitState(t), &loader.SourceData{URL: &url.URL{Scheme: "file", Path: "/script.js"}, Data: script}, map[string]afero.Fs{"file": memfs}, ) @@ -856,9 +856,9 @@ func TestVuInitException(t *testing.T) { } `) - rs := getRuntimeState(t) + piState := getTestPreInitState(t) runner, err := js.New( - rs, + piState, &loader.SourceData{URL: &url.URL{Scheme: "file", Path: "/script.js"}, Data: script}, nil, ) @@ -869,9 +869,9 @@ func TestVuInitException(t *testing.T) { require.Empty(t, opts.Validate()) require.NoError(t, runner.SetOptions(opts)) - execScheduler, err := local.NewExecutionScheduler(runner, rs) + execScheduler, err := local.NewExecutionScheduler(runner, piState) require.NoError(t, err) - engine, err := NewEngine(execScheduler, opts, rs.RuntimeOptions, nil, rs.Logger, rs.Registry) + engine, err := NewEngine(execScheduler, opts, piState.RuntimeOptions, nil, piState.Logger, piState.Registry) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -925,7 +925,7 @@ func TestEmittedMetricsWhenScalingDown(t *testing.T) { `)) runner, err := js.New( - getRuntimeState(t), + getTestPreInitState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil, ) @@ -1008,7 +1008,7 @@ func TestMetricsEmission(t *testing.T) { t.Parallel() } runner, err := js.New( - getRuntimeState(t), + getTestPreInitState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: []byte(fmt.Sprintf(` import { sleep } from "k6"; import { Counter } from "k6/metrics"; @@ -1114,7 +1114,7 @@ func TestMinIterationDurationInSetupTeardownStage(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() runner, err := js.New( - getRuntimeState(t), + getTestPreInitState(t), &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script)}, nil, ) @@ -1139,8 +1139,8 @@ func TestMinIterationDurationInSetupTeardownStage(t *testing.T) { func TestEngineRunsTeardownEvenAfterTestRunIsAborted(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) - testMetric, err := rs.Registry.NewMetric("teardown_metric", metrics.Counter) + piState := getTestPreInitState(t) + testMetric, err := piState.Registry.NewMetric("teardown_metric", metrics.Counter) require.NoError(t, err) var test *testStruct @@ -1156,9 +1156,9 @@ func TestEngineRunsTeardownEvenAfterTestRunIsAborted(t *testing.T) { } mockOutput := mockoutput.New() - test = newTestEngineWithRuntimeState(t, nil, runner, []output.Output{mockOutput}, lib.Options{ + test = newTestEngineWithTestPreInitState(t, nil, runner, []output.Output{mockOutput}, lib.Options{ VUs: null.IntFrom(1), Iterations: null.IntFrom(1), - }, rs) + }, piState) assert.NoError(t, test.run()) test.wait() @@ -1225,13 +1225,13 @@ func TestActiveVUsCount(t *testing.T) { rtOpts := lib.RuntimeOptions{CompatibilityMode: null.StringFrom("base")} registry := metrics.NewRegistry() - rs := &lib.RuntimeState{ + piState := &lib.TestPreInitState{ Logger: logger, Registry: registry, BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), RuntimeOptions: rtOpts, } - runner, err := js.New(rs, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) + runner, err := js.New(piState, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) require.NoError(t, err) mockOutput := mockoutput.New() @@ -1244,7 +1244,7 @@ func TestActiveVUsCount(t *testing.T) { require.NoError(t, err) require.Empty(t, opts.Validate()) require.NoError(t, runner.SetOptions(opts)) - execScheduler, err := local.NewExecutionScheduler(runner, rs) + execScheduler, err := local.NewExecutionScheduler(runner, piState) require.NoError(t, err) engine, err := NewEngine(execScheduler, opts, rtOpts, []output.Output{mockOutput}, logger, registry) require.NoError(t, err) diff --git a/core/local/k6execution_test.go b/core/local/k6execution_test.go index ab39e5cf74d..fee68206cfe 100644 --- a/core/local/k6execution_test.go +++ b/core/local/k6execution_test.go @@ -88,7 +88,7 @@ func TestExecutionInfoVUSharing(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, @@ -201,7 +201,7 @@ func TestExecutionInfoScenarioIter(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, @@ -283,7 +283,7 @@ func TestSharedIterationsStable(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, @@ -418,7 +418,7 @@ func TestExecutionInfoAll(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, diff --git a/core/local/local.go b/core/local/local.go index 60fac7b0f21..b3ecfa5c2d7 100644 --- a/core/local/local.go +++ b/core/local/local.go @@ -40,7 +40,7 @@ import ( type ExecutionScheduler struct { runner lib.Runner options lib.Options - runtimeState *lib.RuntimeState + preInitState *lib.TestPreInitState initProgress *pb.ProgressBar executorConfigs []lib.ExecutorConfig // sorted by (startTime, ID) @@ -62,7 +62,7 @@ var _ lib.ExecutionScheduler = &ExecutionScheduler{} // instance, without initializing it beyond the bare minimum. Specifically, it // creates the needed executor instances and a lot of state placeholders, but it // doesn't initialize the executors and it doesn't initialize or run VUs. -func NewExecutionScheduler(runner lib.Runner, rs *lib.RuntimeState) (*ExecutionScheduler, error) { +func NewExecutionScheduler(runner lib.Runner, piState *lib.TestPreInitState) (*ExecutionScheduler, error) { options := runner.GetOptions() et, err := lib.NewExecutionTuple(options.ExecutionSegment, options.ExecutionSegmentSequence) if err != nil { @@ -72,7 +72,7 @@ func NewExecutionScheduler(runner lib.Runner, rs *lib.RuntimeState) (*ExecutionS maxPlannedVUs := lib.GetMaxPlannedVUs(executionPlan) maxPossibleVUs := lib.GetMaxPossibleVUs(executionPlan) - executionState := lib.NewExecutionState(options, et, rs.BuiltinMetrics, maxPlannedVUs, maxPossibleVUs) + executionState := lib.NewExecutionState(options, et, piState.BuiltinMetrics, maxPlannedVUs, maxPossibleVUs) maxDuration, _ := lib.GetEndOffset(executionPlan) // we don't care if the end offset is final executorConfigs := options.Scenarios.GetSortedConfigs() @@ -80,13 +80,13 @@ func NewExecutionScheduler(runner lib.Runner, rs *lib.RuntimeState) (*ExecutionS // Only take executors which have work. for _, sc := range executorConfigs { if !sc.HasWork(et) { - rs.Logger.Warnf( + piState.Logger.Warnf( "Executor '%s' is disabled for segment %s due to lack of work!", sc.GetName(), options.ExecutionSegment, ) continue } - s, err := sc.NewExecutor(executionState, rs.Logger.WithFields(logrus.Fields{ + s, err := sc.NewExecutor(executionState, piState.Logger.WithFields(logrus.Fields{ "scenario": sc.GetName(), "executor": sc.GetType(), })) @@ -104,7 +104,7 @@ func NewExecutionScheduler(runner lib.Runner, rs *lib.RuntimeState) (*ExecutionS return &ExecutionScheduler{ runner: runner, - runtimeState: rs, + preInitState: piState, options: options, initProgress: pb.New(pb.WithConstLeft("Init")), @@ -231,7 +231,7 @@ func (e *ExecutionScheduler) initVUsConcurrently( } func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- metrics.SampleContainer) { - e.runtimeState.Logger.Debug("Starting emission of VUs and VUsMax metrics...") + e.preInitState.Logger.Debug("Starting emission of VUs and VUsMax metrics...") emitMetrics := func() { t := time.Now() @@ -259,7 +259,7 @@ func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- me go func() { defer func() { ticker.Stop() - e.runtimeState.Logger.Debug("Metrics emission of VUs and VUsMax metrics stopped") + e.preInitState.Logger.Debug("Metrics emission of VUs and VUsMax metrics stopped") close(e.vusEmissionStopped) }() @@ -281,7 +281,7 @@ func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- me func (e *ExecutionScheduler) Init(ctx context.Context, samplesOut chan<- metrics.SampleContainer) error { e.emitVUsAndVUsMax(ctx, samplesOut) - logger := e.runtimeState.Logger.WithField("phase", "local-execution-scheduler-init") + logger := e.preInitState.Logger.WithField("phase", "local-execution-scheduler-init") vusToInitialize := lib.GetMaxPlannedVUs(e.executionPlan) logger.WithFields(logrus.Fields{ "neededVUs": vusToInitialize, @@ -348,7 +348,7 @@ func (e *ExecutionScheduler) runExecutor( ) { executorConfig := executor.GetConfig() executorStartTime := executorConfig.GetStartTime() - executorLogger := e.runtimeState.Logger.WithFields(logrus.Fields{ + executorLogger := e.preInitState.Logger.WithFields(logrus.Fields{ "executor": executorConfig.GetName(), "type": executorConfig.GetType(), "startTime": executorStartTime, @@ -400,7 +400,7 @@ func (e *ExecutionScheduler) Run(globalCtx, runCtx context.Context, engineOut ch }() executorsCount := len(e.executors) - logger := e.runtimeState.Logger.WithField("phase", "local-execution-scheduler-run") + logger := e.preInitState.Logger.WithField("phase", "local-execution-scheduler-run") e.initProgress.Modify(pb.WithConstLeft("Run")) var interrupted bool defer func() { @@ -497,7 +497,7 @@ func (e *ExecutionScheduler) SetPaused(pause bool) error { if pause { return fmt.Errorf("execution is already paused") } - e.runtimeState.Logger.Debug("Starting execution") + e.preInitState.Logger.Debug("Starting execution") return e.state.Resume() } diff --git a/core/local/local_test.go b/core/local/local_test.go index ce8305f3f33..daf8107caba 100644 --- a/core/local/local_test.go +++ b/core/local/local_test.go @@ -53,9 +53,9 @@ import ( "go.k6.io/k6/metrics" ) -func getRuntimeState(tb testing.TB) *lib.RuntimeState { +func getTestPreInitState(tb testing.TB) *lib.TestPreInitState { reg := metrics.NewRegistry() - return &lib.RuntimeState{ + return &lib.TestPreInitState{ Logger: testutils.NewLogger(tb), RuntimeOptions: lib.RuntimeOptions{}, Registry: reg, @@ -78,12 +78,12 @@ func newTestExecutionScheduler( require.NoError(t, runner.SetOptions(newOpts)) - rs := getRuntimeState(t) + piState := getTestPreInitState(t) if logger != nil { - rs.Logger = logger + piState.Logger = logger } - execScheduler, err = NewExecutionScheduler(runner, rs) + execScheduler, err = NewExecutionScheduler(runner, piState) require.NoError(t, err) samples = make(chan metrics.SampleContainer, newOpts.MetricSamplesBufferSize.Int64) @@ -137,14 +137,14 @@ func TestExecutionSchedulerRunNonDefault(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) + piState := getTestPreInitState(t) runner, err := js.New( - rs, &loader.SourceData{ + piState, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script), }, nil) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, rs) + execScheduler, err := NewExecutionScheduler(runner, piState) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -245,17 +245,17 @@ func TestExecutionSchedulerRunEnv(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) - rs.RuntimeOptions = lib.RuntimeOptions{Env: map[string]string{"TESTVAR": "global"}} + piState := getTestPreInitState(t) + piState.RuntimeOptions = lib.RuntimeOptions{Env: map[string]string{"TESTVAR": "global"}} runner, err := js.New( - rs, &loader.SourceData{ + piState, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script), }, nil, ) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, rs) + execScheduler, err := NewExecutionScheduler(runner, piState) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -311,9 +311,9 @@ func TestExecutionSchedulerSystemTags(t *testing.T) { http.get("HTTPBIN_IP_URL/"); }`) - rs := getRuntimeState(t) + piState := getTestPreInitState(t) runner, err := js.New( - rs, &loader.SourceData{ + piState, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(script), }, nil) @@ -323,7 +323,7 @@ func TestExecutionSchedulerSystemTags(t *testing.T) { SystemTags: &metrics.DefaultSystemTagSet, }))) - execScheduler, err := NewExecutionScheduler(runner, rs) + execScheduler, err := NewExecutionScheduler(runner, piState) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) @@ -448,16 +448,16 @@ func TestExecutionSchedulerRunCustomTags(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - rs := getRuntimeState(t) + piState := getTestPreInitState(t) runner, err := js.New( - rs, &loader.SourceData{ + piState, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script), }, nil, ) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, rs) + execScheduler, err := NewExecutionScheduler(runner, piState) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) @@ -609,10 +609,10 @@ func TestExecutionSchedulerRunCustomConfigNoCrossover(t *testing.T) { } `) - rs := getRuntimeState(t) - rs.RuntimeOptions.Env = map[string]string{"TESTGLOBALVAR": "global"} + piState := getTestPreInitState(t) + piState.RuntimeOptions.Env = map[string]string{"TESTGLOBALVAR": "global"} runner, err := js.New( - rs, &loader.SourceData{ + piState, &loader.SourceData{ URL: &url.URL{Path: "/script.js"}, Data: []byte(script), }, @@ -620,7 +620,7 @@ func TestExecutionSchedulerRunCustomConfigNoCrossover(t *testing.T) { ) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, rs) + execScheduler, err := NewExecutionScheduler(runner, piState) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) @@ -948,8 +948,8 @@ func TestExecutionSchedulerEndIterations(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - rs := getRuntimeState(t) - execScheduler, err := NewExecutionScheduler(runner, rs) + piState := getTestPreInitState(t) + execScheduler, err := NewExecutionScheduler(runner, piState) require.NoError(t, err) samples := make(chan metrics.SampleContainer, 300) @@ -1058,7 +1058,7 @@ func TestDNSResolver(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1139,8 +1139,8 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { counter.add(6, { place: "defaultAfterSleep" }); }`) - rs := getRuntimeState(t) - runner, err := js.New(rs, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) + piState := getTestPreInitState(t) + runner, err := js.New(piState, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) require.NoError(t, err) options, err := executor.DeriveScenariosFromShortcuts(runner.GetOptions().Apply(lib.Options{ @@ -1153,7 +1153,7 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { require.NoError(t, err) require.NoError(t, runner.SetOptions(options)) - execScheduler, err := NewExecutionScheduler(runner, rs) + execScheduler, err := NewExecutionScheduler(runner, piState) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1176,7 +1176,7 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { case sampleContainer := <-sampleContainers: gotVus := false for _, s := range sampleContainer.GetSamples() { - if s.Metric == rs.BuiltinMetrics.VUs || s.Metric == rs.BuiltinMetrics.VUsMax { + if s.Metric == piState.BuiltinMetrics.VUs || s.Metric == piState.BuiltinMetrics.VUsMax { gotVus = true break } @@ -1220,7 +1220,7 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { } return metrics.IntoSampleTags(&tags) } - testCounter, err := rs.Registry.NewMetric("test_counter", metrics.Counter) + testCounter, err := piState.Registry.NewMetric("test_counter", metrics.Counter) require.NoError(t, err) getSample := func(expValue float64, expMetric *metrics.Metric, expTags ...string) metrics.SampleContainer { return metrics.Sample{ @@ -1237,7 +1237,7 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { net.Dialer{}, netext.NewResolver(net.LookupIP, 0, types.DNSfirst, types.DNSpreferIPv4), ).GetTrail(time.Now(), time.Now(), - true, emitIterations, getTags(expTags...), rs.BuiltinMetrics) + true, emitIterations, getTags(expTags...), piState.BuiltinMetrics) } // Initially give a long time (5s) for the execScheduler to start @@ -1283,7 +1283,7 @@ func TestSetPaused(t *testing.T) { t.Parallel() t.Run("second pause is an error", func(t *testing.T) { t.Parallel() - sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getRuntimeState(t)) + sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getTestPreInitState(t)) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} @@ -1295,7 +1295,7 @@ func TestSetPaused(t *testing.T) { t.Run("unpause at the start is an error", func(t *testing.T) { t.Parallel() - sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getRuntimeState(t)) + sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getTestPreInitState(t)) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} err = sched.SetPaused(false) @@ -1305,7 +1305,7 @@ func TestSetPaused(t *testing.T) { t.Run("second unpause is an error", func(t *testing.T) { t.Parallel() - sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getRuntimeState(t)) + sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getTestPreInitState(t)) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} require.NoError(t, sched.SetPaused(true)) @@ -1317,7 +1317,7 @@ func TestSetPaused(t *testing.T) { t.Run("an error on pausing is propagated", func(t *testing.T) { t.Parallel() - sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getRuntimeState(t)) + sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getTestPreInitState(t)) require.NoError(t, err) expectedErr := errors.New("testing pausable executor error") sched.executors = []lib.Executor{pausableExecutor{err: expectedErr}} @@ -1336,7 +1336,7 @@ func TestSetPaused(t *testing.T) { require.NoError(t, err) require.NoError(t, runner.SetOptions(options)) - sched, err := NewExecutionScheduler(runner, getRuntimeState(t)) + sched, err := NewExecutionScheduler(runner, getTestPreInitState(t)) require.NoError(t, err) err = sched.SetPaused(true) require.Error(t, err) @@ -1382,15 +1382,15 @@ func TestNewExecutionSchedulerHasWork(t *testing.T) { logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) registry := metrics.NewRegistry() - rs := &lib.RuntimeState{ + piState := &lib.TestPreInitState{ Logger: logger, Registry: registry, BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), } - runner, err := js.New(rs, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) + runner, err := js.New(piState, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, rs) + execScheduler, err := NewExecutionScheduler(runner, piState) require.NoError(t, err) assert.Len(t, execScheduler.executors, 2) diff --git a/js/bundle.go b/js/bundle.go index 6e653ccf0a5..41d18dd6ffc 100644 --- a/js/bundle.go +++ b/js/bundle.go @@ -73,19 +73,21 @@ type BundleInstance struct { } // NewBundle creates a new bundle from a source file and a filesystem. -func NewBundle(rs *lib.RuntimeState, src *loader.SourceData, filesystems map[string]afero.Fs) (*Bundle, error) { - compatMode, err := lib.ValidateCompatibilityMode(rs.RuntimeOptions.CompatibilityMode.String) +func NewBundle( + piState *lib.TestPreInitState, src *loader.SourceData, filesystems map[string]afero.Fs, +) (*Bundle, error) { + compatMode, err := lib.ValidateCompatibilityMode(piState.RuntimeOptions.CompatibilityMode.String) if err != nil { return nil, err } // Compile sources, both ES5 and ES6 are supported. code := string(src.Data) - c := compiler.New(rs.Logger) + c := compiler.New(piState.Logger) c.Options = compiler.Options{ CompatibilityMode: compatMode, Strict: true, - SourceMapLoader: generateSourceMapLoader(rs.Logger, filesystems), + SourceMapLoader: generateSourceMapLoader(piState.Logger, filesystems), } pgm, _, err := c.Compile(code, src.URL.String(), false) if err != nil { @@ -97,17 +99,17 @@ func NewBundle(rs *lib.RuntimeState, src *loader.SourceData, filesystems map[str Filename: src.URL, Source: code, Program: pgm, - BaseInitContext: NewInitContext(rs.Logger, rt, c, compatMode, filesystems, loader.Dir(src.URL)), - RuntimeOptions: rs.RuntimeOptions, + BaseInitContext: NewInitContext(piState.Logger, rt, c, compatMode, filesystems, loader.Dir(src.URL)), + RuntimeOptions: piState.RuntimeOptions, CompatibilityMode: compatMode, exports: make(map[string]goja.Callable), - registry: rs.Registry, + registry: piState.Registry, } - if err = bundle.instantiate(rs.Logger, rt, bundle.BaseInitContext, 0); err != nil { + if err = bundle.instantiate(piState.Logger, rt, bundle.BaseInitContext, 0); err != nil { return nil, err } - err = bundle.getExports(rs.Logger, rt, true) + err = bundle.getExports(piState.Logger, rt, true) if err != nil { return nil, err } @@ -116,12 +118,12 @@ func NewBundle(rs *lib.RuntimeState, src *loader.SourceData, filesystems map[str } // NewBundleFromArchive creates a new bundle from an lib.Archive. -func NewBundleFromArchive(rs *lib.RuntimeState, arc *lib.Archive) (*Bundle, error) { +func NewBundleFromArchive(piState *lib.TestPreInitState, arc *lib.Archive) (*Bundle, error) { if arc.Type != "js" { return nil, fmt.Errorf("expected bundle type 'js', got '%s'", arc.Type) } - rtOpts := rs.RuntimeOptions // copy the struct from the RuntimeState + rtOpts := piState.RuntimeOptions // copy the struct from the TestPreInitState if !rtOpts.CompatibilityMode.Valid { // `k6 run --compatibility-mode=whatever archive.tar` should override // whatever value is in the archive @@ -132,18 +134,18 @@ func NewBundleFromArchive(rs *lib.RuntimeState, arc *lib.Archive) (*Bundle, erro return nil, err } - c := compiler.New(rs.Logger) + c := compiler.New(piState.Logger) c.Options = compiler.Options{ Strict: true, CompatibilityMode: compatMode, - SourceMapLoader: generateSourceMapLoader(rs.Logger, arc.Filesystems), + SourceMapLoader: generateSourceMapLoader(piState.Logger, arc.Filesystems), } pgm, _, err := c.Compile(string(arc.Data), arc.FilenameURL.String(), false) if err != nil { return nil, err } rt := goja.New() - initctx := NewInitContext(rs.Logger, rt, c, compatMode, arc.Filesystems, arc.PwdURL) + initctx := NewInitContext(piState.Logger, rt, c, compatMode, arc.Filesystems, arc.PwdURL) env := arc.Env if env == nil { @@ -164,16 +166,16 @@ func NewBundleFromArchive(rs *lib.RuntimeState, arc *lib.Archive) (*Bundle, erro RuntimeOptions: rtOpts, CompatibilityMode: compatMode, exports: make(map[string]goja.Callable), - registry: rs.Registry, + registry: piState.Registry, } - if err = bundle.instantiate(rs.Logger, rt, bundle.BaseInitContext, 0); err != nil { + if err = bundle.instantiate(piState.Logger, rt, bundle.BaseInitContext, 0); err != nil { return nil, err } // Grab exported objects, but avoid overwriting options, which would // be initialized from the metadata.json at this point. - err = bundle.getExports(rs.Logger, rt, false) + err = bundle.getExports(piState.Logger, rt, false) if err != nil { return nil, err } diff --git a/js/bundle_test.go b/js/bundle_test.go index 0f4f588f240..f645e0b719f 100644 --- a/js/bundle_test.go +++ b/js/bundle_test.go @@ -50,7 +50,7 @@ import ( const isWindows = runtime.GOOS == "windows" -func getRuntimeState(tb testing.TB, logger *logrus.Logger, rtOpts *lib.RuntimeOptions) *lib.RuntimeState { +func getTestPreInitState(tb testing.TB, logger *logrus.Logger, rtOpts *lib.RuntimeOptions) *lib.TestPreInitState { if logger == nil { logger = testutils.NewLogger(tb) } @@ -58,7 +58,7 @@ func getRuntimeState(tb testing.TB, logger *logrus.Logger, rtOpts *lib.RuntimeOp rtOpts = &lib.RuntimeOptions{} } reg := metrics.NewRegistry() - return &lib.RuntimeState{ + return &lib.TestPreInitState{ Logger: logger, RuntimeOptions: *rtOpts, Registry: reg, @@ -82,7 +82,7 @@ func getSimpleBundle(tb testing.TB, filename, data string, opts ...interface{}) } return NewBundle( - getRuntimeState(tb, logger, rtOpts), + getTestPreInitState(tb, logger, rtOpts), &loader.SourceData{ URL: &url.URL{Path: filename, Scheme: "file"}, Data: []byte(data), @@ -502,7 +502,7 @@ func TestNewBundleFromArchive(t *testing.T) { } checkArchive := func(t *testing.T, arc *lib.Archive, rtOpts lib.RuntimeOptions, expError string) { - b, err := NewBundleFromArchive(getRuntimeState(t, logger, &rtOpts), arc) + b, err := NewBundleFromArchive(getTestPreInitState(t, logger, &rtOpts), arc) if expError != "" { require.Error(t, err) require.Contains(t, err.Error(), expError) @@ -585,7 +585,7 @@ func TestNewBundleFromArchive(t *testing.T) { PwdURL: &url.URL{Scheme: "file", Path: "/"}, Filesystems: nil, } - b, err := NewBundleFromArchive(getRuntimeState(t, logger, nil), arc) + b, err := NewBundleFromArchive(getTestPreInitState(t, logger, nil), arc) require.NoError(t, err) bi, err := b.Instantiate(logger, 0) require.NoError(t, err) @@ -724,7 +724,7 @@ func TestOpen(t *testing.T) { } require.NoError(t, err) - arcBundle, err := NewBundleFromArchive(getRuntimeState(t, logger, nil), sourceBundle.makeArchive()) + arcBundle, err := NewBundleFromArchive(getTestPreInitState(t, logger, nil), sourceBundle.makeArchive()) require.NoError(t, err) @@ -824,7 +824,7 @@ func TestBundleEnv(t *testing.T) { require.NoError(t, err) logger := testutils.NewLogger(t) - b2, err := NewBundleFromArchive(getRuntimeState(t, logger, nil), b1.makeArchive()) + b2, err := NewBundleFromArchive(getTestPreInitState(t, logger, nil), b1.makeArchive()) require.NoError(t, err) bundles := map[string]*Bundle{"Source": b1, "Archive": b2} @@ -861,7 +861,7 @@ func TestBundleNotSharable(t *testing.T) { require.NoError(t, err) logger := testutils.NewLogger(t) - b2, err := NewBundleFromArchive(getRuntimeState(t, logger, nil), b1.makeArchive()) + b2, err := NewBundleFromArchive(getTestPreInitState(t, logger, nil), b1.makeArchive()) require.NoError(t, err) bundles := map[string]*Bundle{"Source": b1, "Archive": b2} diff --git a/js/console_test.go b/js/console_test.go index 14917cca408..4521079cacc 100644 --- a/js/console_test.go +++ b/js/console_test.go @@ -63,7 +63,7 @@ func getSimpleRunner(tb testing.TB, filename, data string, opts ...interface{}) registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) return New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, RuntimeOptions: rtOpts, BuiltinMetrics: builtinMetrics, diff --git a/js/init_and_modules_test.go b/js/init_and_modules_test.go index cf74a7b0115..73014711fe5 100644 --- a/js/init_and_modules_test.go +++ b/js/init_and_modules_test.go @@ -78,7 +78,7 @@ func TestNewJSRunnerWithCustomModule(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, @@ -115,7 +115,7 @@ func TestNewJSRunnerWithCustomModule(t *testing.T) { assert.Equal(t, checkModule.vuCtxCalled, 2) runnerFromArc, err := js.NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, diff --git a/js/module_loading_test.go b/js/module_loading_test.go index fdd722f49b5..b32d8d33a94 100644 --- a/js/module_loading_test.go +++ b/js/module_loading_test.go @@ -109,7 +109,7 @@ func TestLoadOnceGlobalVars(t *testing.T) { arc := r1.MakeArchive() registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - r2, err := NewFromArchive(&lib.RuntimeState{ + r2, err := NewFromArchive(&lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -167,7 +167,7 @@ func TestLoadExportsIsUsableInModule(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -222,7 +222,7 @@ func TestLoadDoesntBreakHTTPGet(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -275,7 +275,7 @@ func TestLoadGlobalVarsAreNotSharedBetweenVUs(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -347,7 +347,7 @@ func TestLoadCycle(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -417,7 +417,7 @@ func TestLoadCycleBinding(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -489,7 +489,7 @@ func TestBrowserified(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -540,7 +540,7 @@ func TestLoadingUnexistingModuleDoesntPanic(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -582,7 +582,7 @@ func TestLoadingSourceMapsDoesntErrorOut(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -642,7 +642,7 @@ func TestOptionsAreGloballyReadable(t *testing.T) { arc := r1.MakeArchive() registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - r2, err := NewFromArchive(&lib.RuntimeState{ + r2, err := NewFromArchive(&lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -701,7 +701,7 @@ func TestOptionsAreNotGloballyWritable(t *testing.T) { arc := r1.MakeArchive() registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - r2, err := NewFromArchive(&lib.RuntimeState{ + r2, err := NewFromArchive(&lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, diff --git a/js/modules/k6/marshalling_test.go b/js/modules/k6/marshalling_test.go index da47142f58f..62be5f04429 100644 --- a/js/modules/k6/marshalling_test.go +++ b/js/modules/k6/marshalling_test.go @@ -119,7 +119,7 @@ func TestSetupDataMarshalling(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := js.New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, diff --git a/js/runner.go b/js/runner.go index db280d43d7b..5154abe4792 100644 --- a/js/runner.go +++ b/js/runner.go @@ -65,7 +65,7 @@ var nameToCertWarning sync.Once type Runner struct { Bundle *Bundle - runtimeState *lib.RuntimeState + preInitState *lib.TestPreInitState defaultGroup *lib.Group BaseDialer net.Dialer @@ -79,27 +79,27 @@ type Runner struct { } // New returns a new Runner for the provided source -func New(rs *lib.RuntimeState, src *loader.SourceData, filesystems map[string]afero.Fs) (*Runner, error) { - bundle, err := NewBundle(rs, src, filesystems) +func New(piState *lib.TestPreInitState, src *loader.SourceData, filesystems map[string]afero.Fs) (*Runner, error) { + bundle, err := NewBundle(piState, src, filesystems) if err != nil { return nil, err } - return NewFromBundle(rs, bundle) + return NewFromBundle(piState, bundle) } // NewFromArchive returns a new Runner from the source in the provided archive -func NewFromArchive(rs *lib.RuntimeState, arc *lib.Archive) (*Runner, error) { - bundle, err := NewBundleFromArchive(rs, arc) +func NewFromArchive(piState *lib.TestPreInitState, arc *lib.Archive) (*Runner, error) { + bundle, err := NewBundleFromArchive(piState, arc) if err != nil { return nil, err } - return NewFromBundle(rs, bundle) + return NewFromBundle(piState, bundle) } // NewFromBundle returns a new Runner from the provided Bundle -func NewFromBundle(rs *lib.RuntimeState, b *Bundle) (*Runner, error) { +func NewFromBundle(piState *lib.TestPreInitState, b *Bundle) (*Runner, error) { defaultGroup, err := lib.NewGroup("", nil) if err != nil { return nil, err @@ -108,14 +108,14 @@ func NewFromBundle(rs *lib.RuntimeState, b *Bundle) (*Runner, error) { defDNS := types.DefaultDNSConfig() r := &Runner{ Bundle: b, - runtimeState: rs, + preInitState: piState, defaultGroup: defaultGroup, BaseDialer: net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, }, - console: newConsole(rs.Logger), + console: newConsole(piState.Logger), Resolver: netext.NewResolver( net.LookupIP, 0, defDNS.Select.DNSSelect, defDNS.Policy.DNSPolicy), ActualResolver: net.LookupIP, @@ -142,7 +142,7 @@ func (r *Runner) NewVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.Sampl //nolint:funlen func (r *Runner) newVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.SampleContainer) (*VU, error) { // Instantiate a new bundle, make a VU out of it. - bi, err := r.Bundle.Instantiate(r.runtimeState.Logger, idLocal) + bi, err := r.Bundle.Instantiate(r.preInitState.Logger, idLocal) if err != nil { return nil, err } @@ -193,13 +193,13 @@ func (r *Runner) newVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.Sampl MaxVersion: uint16(tlsVersions.Max), Certificates: certs, Renegotiation: tls.RenegotiateFreelyAsClient, - KeyLogWriter: r.runtimeState.KeyLogger, + KeyLogWriter: r.preInitState.KeyLogger, } // Follow NameToCertificate in https://pkg.go.dev/crypto/tls@go1.17.6#Config, leave this field nil // when it is empty if len(nameToCert) > 0 { nameToCertWarning.Do(func() { - r.runtimeState.Logger.Warn( + r.preInitState.Logger.Warn( "tlsAuth.domains option could be removed in the next releases, it's recommended to leave it empty " + "and let k6 automatically detect from the provided certificate. It follows the Go's NameToCertificate " + "deprecation - https://pkg.go.dev/crypto/tls@go1.17#Config.", @@ -246,7 +246,7 @@ func (r *Runner) newVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.Sampl } vu.state = &lib.State{ - Logger: vu.Runner.runtimeState.Logger, + Logger: vu.Runner.preInitState.Logger, Options: vu.Runner.Bundle.Options, Transport: vu.Transport, Dialer: vu.Dialer, @@ -259,7 +259,7 @@ func (r *Runner) newVU(idLocal, idGlobal uint64, samplesOut chan<- metrics.Sampl Samples: vu.Samples, Tags: lib.NewTagMap(vu.Runner.Bundle.Options.RunTags.CloneTags()), Group: r.defaultGroup, - BuiltinMetrics: r.runtimeState.BuiltinMetrics, + BuiltinMetrics: r.preInitState.BuiltinMetrics, } vu.moduleVUImpl.state = vu.state _ = vu.Runtime.Set("console", vu.Console) @@ -434,7 +434,7 @@ func (r *Runner) SetOptions(opts lib.Options) error { // TODO: validate that all exec values are either nil or valid exported methods (or HTTP requests in the future) if opts.ConsoleOutput.Valid { - c, err := newFileConsole(opts.ConsoleOutput.String, r.runtimeState.Logger.Formatter) + c, err := newFileConsole(opts.ConsoleOutput.String, r.preInitState.Logger.Formatter) if err != nil { return err } @@ -811,7 +811,7 @@ func (u *VU) runFn( sampleTags := metrics.NewSampleTags(u.state.CloneTags()) u.state.Samples <- u.Dialer.GetTrail( - startTime, endTime, isFullIteration, isDefault, sampleTags, u.Runner.runtimeState.BuiltinMetrics) + startTime, endTime, isFullIteration, isDefault, sampleTags, u.Runner.preInitState.BuiltinMetrics) return v, isFullIteration, endTime.Sub(startTime), err } diff --git a/js/runner_test.go b/js/runner_test.go index 57da1678d8b..8d3609b75fe 100644 --- a/js/runner_test.go +++ b/js/runner_test.go @@ -110,7 +110,7 @@ func TestRunnerGetDefaultGroup(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -127,7 +127,7 @@ func TestRunnerOptions(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -217,7 +217,7 @@ func TestOptionsPropagationToScript(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -310,13 +310,13 @@ func TestSetupDataIsolation(t *testing.T) { options := runner.GetOptions() require.Empty(t, options.Validate()) - rs := runner.runtimeState - execScheduler, err := local.NewExecutionScheduler(runner, rs) + piState := runner.preInitState + execScheduler, err := local.NewExecutionScheduler(runner, piState) require.NoError(t, err) mockOutput := mockoutput.New() engine, err := core.NewEngine( - execScheduler, options, rs.RuntimeOptions, []output.Output{mockOutput}, rs.Logger, rs.Registry, + execScheduler, options, piState.RuntimeOptions, []output.Output{mockOutput}, piState.Logger, piState.Registry, ) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) @@ -510,7 +510,7 @@ func TestRunnerIntegrationImports(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -547,7 +547,7 @@ func TestVURunContext(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -598,7 +598,7 @@ func TestVURunInterrupt(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -640,7 +640,7 @@ func TestVURunInterruptDoesntPanic(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -705,7 +705,7 @@ func TestVUIntegrationGroups(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -765,7 +765,7 @@ func TestVUIntegrationMetrics(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -848,7 +848,7 @@ func TestVUIntegrationInsecureRequests(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -859,7 +859,7 @@ func TestVUIntegrationInsecureRequests(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - r.runtimeState.Logger, _ = logtest.NewNullLogger() + r.preInitState.Logger, _ = logtest.NewNullLogger() initVU, err := r.NewVU(1, 1, make(chan metrics.SampleContainer, 100)) require.NoError(t, err) @@ -899,7 +899,7 @@ func TestVUIntegrationBlacklistOption(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -940,7 +940,7 @@ func TestVUIntegrationBlacklistScript(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -983,7 +983,7 @@ func TestVUIntegrationBlockHostnamesOption(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1026,7 +1026,7 @@ func TestVUIntegrationBlockHostnamesScript(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1080,7 +1080,7 @@ func TestVUIntegrationHosts(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1156,7 +1156,7 @@ func TestVUIntegrationTLSConfig(t *testing.T) { require.NoError(t, r1.SetOptions(lib.Options{Throw: null.BoolFrom(true)}.Apply(data.opts))) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1168,7 +1168,7 @@ func TestVUIntegrationTLSConfig(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - r.runtimeState.Logger, _ = logtest.NewNullLogger() + r.preInitState.Logger, _ = logtest.NewNullLogger() initVU, err := r.NewVU(1, 1, make(chan metrics.SampleContainer, 100)) require.NoError(t, err) @@ -1327,7 +1327,7 @@ func TestVUIntegrationCookiesReset(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1387,7 +1387,7 @@ func TestVUIntegrationCookiesNoReset(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1427,7 +1427,7 @@ func TestVUIntegrationVUID(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1583,7 +1583,7 @@ func TestVUIntegrationClientCerts(t *testing.T) { } require.NoError(t, r1.SetOptions(opt)) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1595,7 +1595,7 @@ func TestVUIntegrationClientCerts(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - r.runtimeState.Logger, _ = logtest.NewNullLogger() + r.preInitState.Logger, _ = logtest.NewNullLogger() initVU, err := r.NewVU(1, 1, make(chan metrics.SampleContainer, 100)) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1748,7 +1748,7 @@ func TestArchiveRunningIntegrity(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -1793,7 +1793,7 @@ func TestArchiveNotPanicking(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -2006,7 +2006,7 @@ func TestVUPanic(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -2074,7 +2074,7 @@ func runMultiFileTestCase(t *testing.T, tc multiFileTestCase, tb *httpmultibin.H registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) runner, err := New( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, @@ -2116,7 +2116,7 @@ func runMultiFileTestCase(t *testing.T, tc multiFileTestCase, tb *httpmultibin.H arc := runner.MakeArchive() runnerFromArc, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, @@ -2341,7 +2341,7 @@ func TestForceHTTP1Feature(t *testing.T) { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: testutils.NewLogger(t), BuiltinMetrics: builtinMetrics, Registry: registry, @@ -2437,7 +2437,7 @@ func TestExecutionInfo(t *testing.T) { initVU, err := r.NewVU(1, 10, samples) require.NoError(t, err) - execScheduler, err := local.NewExecutionScheduler(r, r.runtimeState) + execScheduler, err := local.NewExecutionScheduler(r, r.preInitState) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) diff --git a/js/share_test.go b/js/share_test.go index be5bae83326..a5efaf6d904 100644 --- a/js/share_test.go +++ b/js/share_test.go @@ -85,7 +85,7 @@ exports.default = function() { registry := metrics.NewRegistry() builtinMetrics := metrics.RegisterBuiltinMetrics(registry) r2, err := NewFromArchive( - &lib.RuntimeState{ + &lib.TestPreInitState{ Logger: logger, BuiltinMetrics: builtinMetrics, Registry: registry, diff --git a/lib/runtime_state.go b/lib/runtime_state.go deleted file mode 100644 index e56e53db1e5..00000000000 --- a/lib/runtime_state.go +++ /dev/null @@ -1,23 +0,0 @@ -package lib - -import ( - "io" - - "github.com/sirupsen/logrus" - "go.k6.io/k6/metrics" -) - -// RuntimeState represents what is mostly needed during the running of a test. -// -// TODO: since this has nothing to do with the goja JS "runtime", maybe we -// should rename it to something more appropriate? e.g. TestRunState? -type RuntimeState struct { - RuntimeOptions RuntimeOptions - // TODO maybe have a struct `Metrics` with `Registry` and `Builtin` ? - Registry *metrics.Registry - BuiltinMetrics *metrics.BuiltinMetrics - KeyLogger io.WriteCloser - - // TODO: replace with logrus.FieldLogger when all of the tests can be fixed - Logger *logrus.Logger -} diff --git a/lib/test_state.go b/lib/test_state.go new file mode 100644 index 00000000000..ba1d27b220c --- /dev/null +++ b/lib/test_state.go @@ -0,0 +1,20 @@ +package lib + +import ( + "io" + + "github.com/sirupsen/logrus" + "go.k6.io/k6/metrics" +) + +// TestPreInitState contains all of the state that can be gathered and built +// before the test run is initialized. +type TestPreInitState struct { + RuntimeOptions RuntimeOptions + Registry *metrics.Registry + BuiltinMetrics *metrics.BuiltinMetrics + KeyLogger io.WriteCloser + + // TODO: replace with logrus.FieldLogger when all of the tests can be fixed + Logger *logrus.Logger +} diff --git a/lib/state.go b/lib/vu_state.go similarity index 98% rename from lib/state.go rename to lib/vu_state.go index d709b4b6009..8b8cca79273 100644 --- a/lib/state.go +++ b/lib/vu_state.go @@ -47,7 +47,7 @@ type State struct { // TODO: remove them from here, the built-in metrics and the script options // are not part of a VU's unique "state", they are global and the same for // all VUs. Figure out how to thread them some other way, e.g. through the - // RuntimeState. The Samples channel might also benefit from that... + // TestPreInitState. The Samples channel might also benefit from that... Options Options BuiltinMetrics *metrics.BuiltinMetrics From 4baaa322a37b7ada3fc512309de40f8871a99e5e Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Sun, 31 Jul 2022 17:12:15 +0300 Subject: [PATCH 05/12] Split apart cmd.loadedTest and make config consolidation more explicit --- cmd/inspect.go | 14 +++++++----- cmd/outputs.go | 4 +++- cmd/test_load.go | 55 ++++++++++++++++++++++++++---------------------- 3 files changed, 42 insertions(+), 31 deletions(-) diff --git a/cmd/inspect.go b/cmd/inspect.go index 916e8b788e2..e3533392f60 100644 --- a/cmd/inspect.go +++ b/cmd/inspect.go @@ -40,7 +40,7 @@ func getCmdInspect(gs *globalState) *cobra.Command { Long: `Inspect a script or archive.`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - test, err := loadAndConfigureTest(gs, cmd, args, nil) + test, err := loadTest(gs, cmd, args) if err != nil { return err } @@ -82,16 +82,20 @@ func getCmdInspect(gs *globalState) *cobra.Command { // derive the value of `scenarios` and calculate the max test duration and VUs. func inspectOutputWithExecRequirements(gs *globalState, cmd *cobra.Command, test *loadedTest) (interface{}, error) { // we don't actually support CLI flags here, so we pass nil as the getter - if err := test.consolidateDeriveAndValidateConfig(gs, cmd, nil); err != nil { + configuredTest, err := test.consolidateDeriveAndValidateConfig(gs, cmd, nil) + if err != nil { return nil, err } - et, err := lib.NewExecutionTuple(test.derivedConfig.ExecutionSegment, test.derivedConfig.ExecutionSegmentSequence) + et, err := lib.NewExecutionTuple( + configuredTest.derivedConfig.ExecutionSegment, + configuredTest.derivedConfig.ExecutionSegmentSequence, + ) if err != nil { return nil, err } - executionPlan := test.derivedConfig.Scenarios.GetFullExecutionRequirements(et) + executionPlan := configuredTest.derivedConfig.Scenarios.GetFullExecutionRequirements(et) duration, _ := lib.GetEndOffset(executionPlan) return struct { @@ -99,7 +103,7 @@ func inspectOutputWithExecRequirements(gs *globalState, cmd *cobra.Command, test TotalDuration types.NullDuration `json:"totalDuration"` MaxVUs uint64 `json:"maxVUs"` }{ - test.derivedConfig.Options, + configuredTest.derivedConfig.Options, types.NewNullDuration(duration, true), lib.GetMaxPossibleVUs(executionPlan), }, nil diff --git a/cmd/outputs.go b/cmd/outputs.go index b494b3eef49..ffb73e6d85a 100644 --- a/cmd/outputs.go +++ b/cmd/outputs.go @@ -77,7 +77,9 @@ func getPossibleIDList(constrs map[string]func(output.Params) (output.Output, er return strings.Join(res, ", ") } -func createOutputs(gs *globalState, test *loadedTest, executionPlan []lib.ExecutionStep) ([]output.Output, error) { +func createOutputs( + gs *globalState, test *loadedAndConfiguredTest, executionPlan []lib.ExecutionStep, +) ([]output.Output, error) { outputConstructors, err := getAllOutputConstructors() if err != nil { return nil, err diff --git a/cmd/test_load.go b/cmd/test_load.go index 5e5bf125cb9..a2602fc0149 100644 --- a/cmd/test_load.go +++ b/cmd/test_load.go @@ -25,8 +25,8 @@ const ( testTypeArchive = "archive" ) -// loadedTest contains all of data, details and dependencies of a fully-loaded -// and configured k6 test. +// loadedTest contains all of data, details and dependencies of a loaded +// k6 test, but without any config consolidation. type loadedTest struct { sourceRootPath string // contains the raw string the user supplied pwd string @@ -35,18 +35,17 @@ type loadedTest struct { fileSystems map[string]afero.Fs preInitState *lib.TestPreInitState initRunner lib.Runner // TODO: rename to something more appropriate +} - // Only set if cliConfigGetter is supplied to loadAndConfigureTest() or if - // consolidateDeriveAndValidateConfig() is manually called. +// loadedAndConfiguredTest contains the whole loadedTest, as well as the +// consolidated test config and the full test run state. +type loadedAndConfiguredTest struct { + *loadedTest consolidatedConfig Config derivedConfig Config } -func loadAndConfigureTest( - gs *globalState, cmd *cobra.Command, args []string, - // supply this if you want the test config consolidated and validated - cliConfigGetter func(flags *pflag.FlagSet) (Config, error), // TODO: obviate -) (*loadedTest, error) { +func loadTest(gs *globalState, cmd *cobra.Command, args []string) (*loadedTest, error) { if len(args) < 1 { return nil, fmt.Errorf("k6 needs at least one argument to load the test") } @@ -91,13 +90,6 @@ func loadAndConfigureTest( return nil, fmt.Errorf("could not initialize '%s': %w", sourceRootPath, err) } gs.logger.Debug("Runner successfully initialized!") - - if cliConfigGetter != nil { - if err := test.consolidateDeriveAndValidateConfig(gs, cmd, cliConfigGetter); err != nil { - return nil, err - } - } - return test, nil } @@ -187,21 +179,21 @@ func detectTestType(data []byte) string { func (lt *loadedTest) consolidateDeriveAndValidateConfig( gs *globalState, cmd *cobra.Command, cliConfGetter func(flags *pflag.FlagSet) (Config, error), // TODO: obviate -) error { +) (*loadedAndConfiguredTest, error) { var cliConfig Config if cliConfGetter != nil { gs.logger.Debug("Parsing CLI flags...") var err error cliConfig, err = cliConfGetter(cmd.Flags()) if err != nil { - return err + return nil, err } } gs.logger.Debug("Consolidating config layers...") consolidatedConfig, err := getConsolidatedConfig(gs, cliConfig, lt.initRunner.GetOptions()) if err != nil { - return err + return nil, err } gs.logger.Debug("Parsing thresholds and validating config...") @@ -212,25 +204,38 @@ func (lt *loadedTest) consolidateDeriveAndValidateConfig( for metricName, thresholdsDefinition := range consolidatedConfig.Options.Thresholds { err = thresholdsDefinition.Parse() if err != nil { - return errext.WithExitCodeIfNone(err, exitcodes.InvalidConfig) + return nil, errext.WithExitCodeIfNone(err, exitcodes.InvalidConfig) } err = thresholdsDefinition.Validate(metricName, lt.preInitState.Registry) if err != nil { - return errext.WithExitCodeIfNone(err, exitcodes.InvalidConfig) + return nil, errext.WithExitCodeIfNone(err, exitcodes.InvalidConfig) } } } derivedConfig, err := deriveAndValidateConfig(consolidatedConfig, lt.initRunner.IsExecutable, gs.logger) if err != nil { - return err + return nil, err } - lt.consolidatedConfig = consolidatedConfig - lt.derivedConfig = derivedConfig + return &loadedAndConfiguredTest{ + loadedTest: lt, + consolidatedConfig: consolidatedConfig, + derivedConfig: derivedConfig, + }, nil +} + +func loadAndConfigureTest( + gs *globalState, cmd *cobra.Command, args []string, + cliConfigGetter func(flags *pflag.FlagSet) (Config, error), +) (*loadedAndConfiguredTest, error) { + test, err := loadTest(gs, cmd, args) + if err != nil { + return nil, err + } - return nil + return test.consolidateDeriveAndValidateConfig(gs, cmd, cliConfigGetter) } type syncWriteCloser struct { From 7f28a333c94c251b08b5748e1accd41969e51c42 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Sun, 31 Jul 2022 18:13:16 +0300 Subject: [PATCH 06/12] Add a new lib.TestRunState object to hold the post-init test state --- cmd/archive.go | 4 ++-- cmd/cloud.go | 4 ++-- cmd/run.go | 15 ++++++++------- cmd/test_load.go | 33 +++++++++++++++++++++++++-------- lib/test_state.go | 14 ++++++++++++++ 5 files changed, 51 insertions(+), 19 deletions(-) diff --git a/cmd/archive.go b/cmd/archive.go index 3cc4fe07c7c..9fdee894ae1 100644 --- a/cmd/archive.go +++ b/cmd/archive.go @@ -43,13 +43,13 @@ func (c *cmdArchive) run(cmd *cobra.Command, args []string) error { // an execution shortcut option (e.g. `iterations` or `duration`), // we will have multiple conflicting execution options since the // derivation will set `scenarios` as well. - err = test.initRunner.SetOptions(test.consolidatedConfig.Options) + testRunState, err := test.buildTestRunState(test.consolidatedConfig.Options) if err != nil { return err } // Archive. - arc := test.initRunner.MakeArchive() + arc := testRunState.Runner.MakeArchive() f, err := c.gs.fs.Create(c.archiveOut) if err != nil { return err diff --git a/cmd/cloud.go b/cmd/cloud.go index 3d272b09a41..a3171b2a332 100644 --- a/cmd/cloud.go +++ b/cmd/cloud.go @@ -102,7 +102,7 @@ func (c *cmdCloud) run(cmd *cobra.Command, args []string) error { // an execution shortcut option (e.g. `iterations` or `duration`), // we will have multiple conflicting execution options since the // derivation will set `scenarios` as well. - err = test.initRunner.SetOptions(test.consolidatedConfig.Options) + testRunState, err := test.buildTestRunState(test.consolidatedConfig.Options) if err != nil { return err } @@ -112,7 +112,7 @@ func (c *cmdCloud) run(cmd *cobra.Command, args []string) error { // TODO: move those validations to a separate function and reuse validateConfig()? modifyAndPrintBar(c.gs, progressBar, pb.WithConstProgress(0, "Building the archive...")) - arc := test.initRunner.MakeArchive() + arc := testRunState.Runner.MakeArchive() // TODO: Fix this // We reuse cloud.Config for parsing options.ext.loadimpact, but this probably shouldn't be diff --git a/cmd/run.go b/cmd/run.go index d3747753ed9..efe4673724d 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -65,7 +65,8 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { // Write the full consolidated *and derived* options back to the Runner. conf := test.derivedConfig - if err = test.initRunner.SetOptions(conf.Options); err != nil { + testRunState, err := test.buildTestRunState(conf.Options) + if err != nil { return err } @@ -86,7 +87,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { runCtx, runCancel := context.WithCancel(lingerCtx) defer runCancel() - logger := test.preInitState.Logger + logger := testRunState.Logger // Create a local execution scheduler wrapping the runner. logger.Debug("Initializing the execution scheduler...") execScheduler, err := local.NewExecutionScheduler(test.initRunner, test.preInitState) @@ -126,8 +127,8 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { // Create the engine. initBar.Modify(pb.WithConstProgress(0, "Init engine")) engine, err := core.NewEngine( - execScheduler, conf.Options, test.preInitState.RuntimeOptions, - outputs, logger, test.preInitState.Registry, + execScheduler, conf.Options, testRunState.RuntimeOptions, + outputs, logger, testRunState.Registry, ) if err != nil { return err @@ -230,7 +231,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { } // Handle the end-of-test summary. - if !test.preInitState.RuntimeOptions.NoSummary.Bool { + if !testRunState.RuntimeOptions.NoSummary.Bool { engine.MetricsEngine.MetricsLock.Lock() // TODO: refactor so this is not needed summaryResult, err := test.initRunner.HandleSummary(globalCtx, &lib.Summary{ Metrics: engine.MetricsEngine.ObservedMetrics, @@ -268,8 +269,8 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { logger.Debug("Waiting for engine processes to finish...") engineWait() logger.Debug("Everything has finished, exiting k6!") - if test.preInitState.KeyLogger != nil { - if err := test.preInitState.KeyLogger.Close(); err != nil { + if testRunState.KeyLogger != nil { + if err := testRunState.KeyLogger.Close(); err != nil { logger.WithError(err).Warn("Error while closing the SSLKEYLOGFILE") } } diff --git a/cmd/test_load.go b/cmd/test_load.go index a2602fc0149..ccfd78aa0c7 100644 --- a/cmd/test_load.go +++ b/cmd/test_load.go @@ -37,14 +37,6 @@ type loadedTest struct { initRunner lib.Runner // TODO: rename to something more appropriate } -// loadedAndConfiguredTest contains the whole loadedTest, as well as the -// consolidated test config and the full test run state. -type loadedAndConfiguredTest struct { - *loadedTest - consolidatedConfig Config - derivedConfig Config -} - func loadTest(gs *globalState, cmd *cobra.Command, args []string) (*loadedTest, error) { if len(args) < 1 { return nil, fmt.Errorf("k6 needs at least one argument to load the test") @@ -226,6 +218,14 @@ func (lt *loadedTest) consolidateDeriveAndValidateConfig( }, nil } +// loadedAndConfiguredTest contains the whole loadedTest, as well as the +// consolidated test config and the full test run state. +type loadedAndConfiguredTest struct { + *loadedTest + consolidatedConfig Config + derivedConfig Config +} + func loadAndConfigureTest( gs *globalState, cmd *cobra.Command, args []string, cliConfigGetter func(flags *pflag.FlagSet) (Config, error), @@ -238,6 +238,23 @@ func loadAndConfigureTest( return test.consolidateDeriveAndValidateConfig(gs, cmd, cliConfigGetter) } +func (lct *loadedAndConfiguredTest) buildTestRunState( + configToReinject lib.Options, +) (*lib.TestRunState, error) { + // This might be the full derived or just the consodlidated options + if err := lct.initRunner.SetOptions(configToReinject); err != nil { + return nil, err + } + + // TODO: init atlas root node, etc. + + return &lib.TestRunState{ + TestPreInitState: lct.preInitState, + Runner: lct.initRunner, + Options: lct.derivedConfig.Options, // we will always run with the derived options + }, nil +} + type syncWriteCloser struct { w io.WriteCloser m sync.Mutex diff --git a/lib/test_state.go b/lib/test_state.go index ba1d27b220c..57c110ef150 100644 --- a/lib/test_state.go +++ b/lib/test_state.go @@ -18,3 +18,17 @@ type TestPreInitState struct { // TODO: replace with logrus.FieldLogger when all of the tests can be fixed Logger *logrus.Logger } + +// TestRunState contains the pre-init state as well as all of the state and +// options that are necessary for actually running the test. +type TestRunState struct { + *TestPreInitState + + Options Options + Runner Runner // TODO: rename to something better, see type comment + + // TODO: add atlas root node + + // TODO: add other properties that are computed or derived after init, e.g. + // thresholds? +} From 350133597c520d33b3cac221cfc811edbb80f50a Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Mon, 1 Aug 2022 00:01:12 +0300 Subject: [PATCH 07/12] Thread lib.TestRunState through the Engine and ExecutionScheduler --- api/server_test.go | 17 ++- api/v1/group_routes_test.go | 15 ++- api/v1/metric_routes_test.go | 16 +-- api/v1/setup_teardown_routes_test.go | 16 ++- api/v1/status_routes_test.go | 13 +- .../eventloop/eventloop_test.go | 8 +- cmd/run.go | 7 +- core/engine.go | 21 ++-- core/engine_test.go | 38 ++++-- core/local/local.go | 28 ++--- core/local/local_test.go | 114 +++++++++++------- js/runner_test.go | 21 +++- 12 files changed, 192 insertions(+), 122 deletions(-) diff --git a/api/server_test.go b/api/server_test.go index 8489ebbd383..991efd41532 100644 --- a/api/server_test.go +++ b/api/server_test.go @@ -81,14 +81,19 @@ func TestWithEngine(t *testing.T) { logger := logrus.New() logger.SetOutput(testutils.NewTestOutput(t)) registry := metrics.NewRegistry() - piState := &lib.TestPreInitState{ - Logger: logger, - Registry: registry, - BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + testState := &lib.TestRunState{ + TestPreInitState: &lib.TestPreInitState{ + Logger: logger, + Registry: registry, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(registry), + }, + Options: lib.Options{}, + Runner: &minirunner.MiniRunner{}, } - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, piState) + + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, lib.RuntimeOptions{}, nil, logger, registry) + engine, err := core.NewEngine(testState, execScheduler, nil) require.NoError(t, err) rw := httptest.NewRecorder() diff --git a/api/v1/group_routes_test.go b/api/v1/group_routes_test.go index 8fb46d85d4f..68b2f6e747d 100644 --- a/api/v1/group_routes_test.go +++ b/api/v1/group_routes_test.go @@ -47,6 +47,15 @@ func getTestPreInitState(tb testing.TB) *lib.TestPreInitState { } } +func getTestRunState(tb testing.TB, options lib.Options, runner lib.Runner) *lib.TestRunState { + require.NoError(tb, runner.SetOptions(runner.GetOptions().Apply(options))) + return &lib.TestRunState{ + TestPreInitState: getTestPreInitState(tb), + Options: options, + Runner: runner, + } +} + func TestGetGroups(t *testing.T) { g0, err := lib.NewGroup("", nil) assert.NoError(t, err) @@ -55,10 +64,10 @@ func TestGetGroups(t *testing.T) { g2, err := g1.Group("group 2") assert.NoError(t, err) - piState := getTestPreInitState(t) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Group: g0}, piState) + testState := getTestRunState(t, lib.Options{}, &minirunner.MiniRunner{Group: g0}) + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, piState.RuntimeOptions, nil, piState.Logger, piState.Registry) + engine, err := core.NewEngine(testState, execScheduler, nil) require.NoError(t, err) t.Run("list", func(t *testing.T) { diff --git a/api/v1/metric_routes_test.go b/api/v1/metric_routes_test.go index bf0255a35bb..c071e1ffc89 100644 --- a/api/v1/metric_routes_test.go +++ b/api/v1/metric_routes_test.go @@ -40,12 +40,12 @@ import ( func TestGetMetrics(t *testing.T) { t.Parallel() - piState := getTestPreInitState(t) - testMetric, err := piState.Registry.NewMetric("my_metric", metrics.Trend, metrics.Time) + testState := getTestRunState(t, lib.Options{}, &minirunner.MiniRunner{}) + testMetric, err := testState.Registry.NewMetric("my_metric", metrics.Trend, metrics.Time) require.NoError(t, err) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, piState) + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, piState.RuntimeOptions, nil, piState.Logger, piState.Registry) + engine, err := core.NewEngine(testState, execScheduler, nil) require.NoError(t, err) engine.MetricsEngine.ObservedMetrics = map[string]*metrics.Metric{ @@ -99,12 +99,12 @@ func TestGetMetrics(t *testing.T) { func TestGetMetric(t *testing.T) { t.Parallel() - piState := getTestPreInitState(t) - testMetric, err := piState.Registry.NewMetric("my_metric", metrics.Trend, metrics.Time) + testState := getTestRunState(t, lib.Options{}, &minirunner.MiniRunner{}) + testMetric, err := testState.Registry.NewMetric("my_metric", metrics.Trend, metrics.Time) require.NoError(t, err) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, piState) + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, piState.RuntimeOptions, nil, piState.Logger, piState.Registry) + engine, err := core.NewEngine(testState, execScheduler, nil) require.NoError(t, err) engine.MetricsEngine.ObservedMetrics = map[string]*metrics.Metric{ diff --git a/api/v1/setup_teardown_routes_test.go b/api/v1/setup_teardown_routes_test.go index 193ae07c83a..020acfa1238 100644 --- a/api/v1/setup_teardown_routes_test.go +++ b/api/v1/setup_teardown_routes_test.go @@ -142,19 +142,23 @@ func TestSetupData(t *testing.T) { piState, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: testCase.script}, nil, ) require.NoError(t, err) - runner.SetOptions(lib.Options{ + require.NoError(t, runner.SetOptions(lib.Options{ Paused: null.BoolFrom(true), VUs: null.IntFrom(2), Iterations: null.IntFrom(3), NoSetup: null.BoolFrom(true), SetupTimeout: types.NullDurationFrom(5 * time.Second), TeardownTimeout: types.NullDurationFrom(5 * time.Second), - }) - execScheduler, err := local.NewExecutionScheduler(runner, piState) + })) + testState := &lib.TestRunState{ + TestPreInitState: piState, + Options: runner.GetOptions(), + Runner: runner, + } + + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := core.NewEngine( - execScheduler, runner.GetOptions(), piState.RuntimeOptions, nil, piState.Logger, piState.Registry, - ) + engine, err := core.NewEngine(testState, execScheduler, nil) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) diff --git a/api/v1/status_routes_test.go b/api/v1/status_routes_test.go index 31c52da2f7b..8161848b924 100644 --- a/api/v1/status_routes_test.go +++ b/api/v1/status_routes_test.go @@ -42,10 +42,10 @@ import ( func TestGetStatus(t *testing.T) { t.Parallel() - piState := getTestPreInitState(t) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, piState) + testState := getTestRunState(t, lib.Options{}, &minirunner.MiniRunner{}) + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, lib.Options{}, piState.RuntimeOptions, nil, piState.Logger, piState.Registry) + engine, err := core.NewEngine(testState, execScheduler, nil) require.NoError(t, err) rw := httptest.NewRecorder() @@ -128,12 +128,11 @@ func TestPatchStatus(t *testing.T) { {"external": {"executor": "externally-controlled", "vus": 0, "maxVUs": 10, "duration": "0"}}`), &scenarios) require.NoError(t, err) - options := lib.Options{Scenarios: scenarios} - piState := getTestPreInitState(t) - execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Options: options}, piState) + testState := getTestRunState(t, lib.Options{Scenarios: scenarios}, &minirunner.MiniRunner{}) + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := core.NewEngine(execScheduler, options, piState.RuntimeOptions, nil, piState.Logger, piState.Registry) + engine, err := core.NewEngine(testState, execScheduler, nil) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) diff --git a/cmd/integration_tests/eventloop/eventloop_test.go b/cmd/integration_tests/eventloop/eventloop_test.go index d474651748f..df21c9ba02c 100644 --- a/cmd/integration_tests/eventloop/eventloop_test.go +++ b/cmd/integration_tests/eventloop/eventloop_test.go @@ -50,7 +50,13 @@ func eventLoopTest(t *testing.T, script []byte, testHandle func(context.Context, require.Empty(t, newOpts.Validate()) require.NoError(t, runner.SetOptions(newOpts)) - execScheduler, err := local.NewExecutionScheduler(runner, piState) + testState := &lib.TestRunState{ + TestPreInitState: piState, + Options: newOpts, + Runner: runner, + } + + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) samples := make(chan metrics.SampleContainer, newOpts.MetricSamplesBufferSize.Int64) diff --git a/cmd/run.go b/cmd/run.go index efe4673724d..c3b21656273 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -90,7 +90,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { logger := testRunState.Logger // Create a local execution scheduler wrapping the runner. logger.Debug("Initializing the execution scheduler...") - execScheduler, err := local.NewExecutionScheduler(test.initRunner, test.preInitState) + execScheduler, err := local.NewExecutionScheduler(testRunState) if err != nil { return err } @@ -126,10 +126,7 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { // TODO: remove this completely // Create the engine. initBar.Modify(pb.WithConstProgress(0, "Init engine")) - engine, err := core.NewEngine( - execScheduler, conf.Options, testRunState.RuntimeOptions, - outputs, logger, testRunState.Registry, - ) + engine, err := core.NewEngine(testRunState, execScheduler, outputs) if err != nil { return err } diff --git a/core/engine.go b/core/engine.go index 30df149e8fc..183e79bd942 100644 --- a/core/engine.go +++ b/core/engine.go @@ -72,10 +72,7 @@ type Engine struct { } // NewEngine instantiates a new Engine, without doing any heavy initialization. -func NewEngine( - ex lib.ExecutionScheduler, opts lib.Options, rtOpts lib.RuntimeOptions, outputs []output.Output, logger *logrus.Logger, - registry *metrics.Registry, -) (*Engine, error) { +func NewEngine(testState *lib.TestRunState, ex lib.ExecutionScheduler, outputs []output.Output) (*Engine, error) { if ex == nil { return nil, errors.New("missing ExecutionScheduler instance") } @@ -83,26 +80,28 @@ func NewEngine( e := &Engine{ ExecutionScheduler: ex, - runtimeOptions: rtOpts, - Samples: make(chan metrics.SampleContainer, opts.MetricSamplesBufferSize.Int64), + runtimeOptions: testState.RuntimeOptions, + Samples: make(chan metrics.SampleContainer, testState.Options.MetricSamplesBufferSize.Int64), stopChan: make(chan struct{}), - logger: logger.WithField("component", "engine"), + logger: testState.Logger.WithField("component", "engine"), } - me, err := engine.NewMetricsEngine(registry, ex.GetState(), opts, rtOpts, logger) + me, err := engine.NewMetricsEngine( + testState.Registry, ex.GetState(), testState.Options, testState.RuntimeOptions, testState.Logger, + ) if err != nil { return nil, err } e.MetricsEngine = me - if !(rtOpts.NoSummary.Bool && rtOpts.NoThresholds.Bool) { + if !(testState.RuntimeOptions.NoSummary.Bool && testState.RuntimeOptions.NoThresholds.Bool) { e.ingester = me.GetIngester() outputs = append(outputs, e.ingester) } - e.OutputManager = output.NewManager(outputs, logger, func(err error) { + e.OutputManager = output.NewManager(outputs, testState.Logger, func(err error) { if err != nil { - logger.WithError(err).Error("Received error to stop from output") + testState.Logger.WithError(err).Error("Received error to stop from output") } e.Stop() }) diff --git a/core/engine_test.go b/core/engine_test.go index c6d6bb5d1a3..f1c8f14730d 100644 --- a/core/engine_test.go +++ b/core/engine_test.go @@ -71,9 +71,22 @@ func getTestPreInitState(tb testing.TB) *lib.TestPreInitState { } } +func getTestRunState( + tb testing.TB, piState *lib.TestPreInitState, options lib.Options, runner lib.Runner, +) *lib.TestRunState { + require.Empty(tb, options.Validate()) + require.NoError(tb, runner.SetOptions(options)) + return &lib.TestRunState{ + TestPreInitState: piState, + Options: options, + Runner: runner, + } +} + // Wrapper around NewEngine that applies a logger and manages the options. func newTestEngineWithTestPreInitState( //nolint:golint - t *testing.T, runTimeout *time.Duration, runner lib.Runner, outputs []output.Output, opts lib.Options, piState *lib.TestPreInitState, + t *testing.T, runTimeout *time.Duration, runner lib.Runner, outputs []output.Output, + opts lib.Options, piState *lib.TestPreInitState, ) *testStruct { if runner == nil { runner = &minirunner.MiniRunner{} @@ -83,14 +96,13 @@ func newTestEngineWithTestPreInitState( //nolint:golint MetricSamplesBufferSize: null.NewInt(200, false), }.Apply(runner.GetOptions()).Apply(opts), piState.Logger) require.NoError(t, err) - require.Empty(t, newOpts.Validate()) - require.NoError(t, runner.SetOptions(newOpts)) + testRunState := getTestRunState(t, piState, newOpts, runner) - execScheduler, err := local.NewExecutionScheduler(runner, piState) + execScheduler, err := local.NewExecutionScheduler(testRunState) require.NoError(t, err) - engine, err := NewEngine(execScheduler, opts, piState.RuntimeOptions, outputs, piState.Logger, piState.Registry) + engine, err := NewEngine(testRunState, execScheduler, outputs) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) @@ -866,12 +878,12 @@ func TestVuInitException(t *testing.T) { opts, err := executor.DeriveScenariosFromShortcuts(runner.GetOptions(), nil) require.NoError(t, err) - require.Empty(t, opts.Validate()) - require.NoError(t, runner.SetOptions(opts)) - execScheduler, err := local.NewExecutionScheduler(runner, piState) + testState := getTestRunState(t, piState, opts, runner) + + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := NewEngine(execScheduler, opts, piState.RuntimeOptions, nil, piState.Logger, piState.Registry) + engine, err := NewEngine(testState, execScheduler, nil) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1242,11 +1254,11 @@ func TestActiveVUsCount(t *testing.T) { MetricSamplesBufferSize: null.NewInt(200, false), }.Apply(runner.GetOptions()), nil) require.NoError(t, err) - require.Empty(t, opts.Validate()) - require.NoError(t, runner.SetOptions(opts)) - execScheduler, err := local.NewExecutionScheduler(runner, piState) + + testState := getTestRunState(t, piState, opts, runner) + execScheduler, err := local.NewExecutionScheduler(testState) require.NoError(t, err) - engine, err := NewEngine(execScheduler, opts, rtOpts, []output.Output{mockOutput}, logger, registry) + engine, err := NewEngine(testState, execScheduler, []output.Output{mockOutput}) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) run, waitFn, err := engine.Init(ctx, ctx) // no need for 2 different contexts diff --git a/core/local/local.go b/core/local/local.go index b3ecfa5c2d7..0aec006dad3 100644 --- a/core/local/local.go +++ b/core/local/local.go @@ -40,7 +40,7 @@ import ( type ExecutionScheduler struct { runner lib.Runner options lib.Options - preInitState *lib.TestPreInitState + testRunState *lib.TestRunState initProgress *pb.ProgressBar executorConfigs []lib.ExecutorConfig // sorted by (startTime, ID) @@ -62,8 +62,8 @@ var _ lib.ExecutionScheduler = &ExecutionScheduler{} // instance, without initializing it beyond the bare minimum. Specifically, it // creates the needed executor instances and a lot of state placeholders, but it // doesn't initialize the executors and it doesn't initialize or run VUs. -func NewExecutionScheduler(runner lib.Runner, piState *lib.TestPreInitState) (*ExecutionScheduler, error) { - options := runner.GetOptions() +func NewExecutionScheduler(trs *lib.TestRunState) (*ExecutionScheduler, error) { + options := trs.Options et, err := lib.NewExecutionTuple(options.ExecutionSegment, options.ExecutionSegmentSequence) if err != nil { return nil, err @@ -72,7 +72,7 @@ func NewExecutionScheduler(runner lib.Runner, piState *lib.TestPreInitState) (*E maxPlannedVUs := lib.GetMaxPlannedVUs(executionPlan) maxPossibleVUs := lib.GetMaxPossibleVUs(executionPlan) - executionState := lib.NewExecutionState(options, et, piState.BuiltinMetrics, maxPlannedVUs, maxPossibleVUs) + executionState := lib.NewExecutionState(options, et, trs.BuiltinMetrics, maxPlannedVUs, maxPossibleVUs) maxDuration, _ := lib.GetEndOffset(executionPlan) // we don't care if the end offset is final executorConfigs := options.Scenarios.GetSortedConfigs() @@ -80,13 +80,13 @@ func NewExecutionScheduler(runner lib.Runner, piState *lib.TestPreInitState) (*E // Only take executors which have work. for _, sc := range executorConfigs { if !sc.HasWork(et) { - piState.Logger.Warnf( + trs.Logger.Warnf( "Executor '%s' is disabled for segment %s due to lack of work!", sc.GetName(), options.ExecutionSegment, ) continue } - s, err := sc.NewExecutor(executionState, piState.Logger.WithFields(logrus.Fields{ + s, err := sc.NewExecutor(executionState, trs.Logger.WithFields(logrus.Fields{ "scenario": sc.GetName(), "executor": sc.GetType(), })) @@ -103,9 +103,9 @@ func NewExecutionScheduler(runner lib.Runner, piState *lib.TestPreInitState) (*E } return &ExecutionScheduler{ - runner: runner, - preInitState: piState, + runner: trs.Runner, options: options, + testRunState: trs, initProgress: pb.New(pb.WithConstLeft("Init")), executors: executors, @@ -231,7 +231,7 @@ func (e *ExecutionScheduler) initVUsConcurrently( } func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- metrics.SampleContainer) { - e.preInitState.Logger.Debug("Starting emission of VUs and VUsMax metrics...") + e.testRunState.Logger.Debug("Starting emission of VUs and VUsMax metrics...") emitMetrics := func() { t := time.Now() @@ -259,7 +259,7 @@ func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- me go func() { defer func() { ticker.Stop() - e.preInitState.Logger.Debug("Metrics emission of VUs and VUsMax metrics stopped") + e.testRunState.Logger.Debug("Metrics emission of VUs and VUsMax metrics stopped") close(e.vusEmissionStopped) }() @@ -281,7 +281,7 @@ func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- me func (e *ExecutionScheduler) Init(ctx context.Context, samplesOut chan<- metrics.SampleContainer) error { e.emitVUsAndVUsMax(ctx, samplesOut) - logger := e.preInitState.Logger.WithField("phase", "local-execution-scheduler-init") + logger := e.testRunState.Logger.WithField("phase", "local-execution-scheduler-init") vusToInitialize := lib.GetMaxPlannedVUs(e.executionPlan) logger.WithFields(logrus.Fields{ "neededVUs": vusToInitialize, @@ -348,7 +348,7 @@ func (e *ExecutionScheduler) runExecutor( ) { executorConfig := executor.GetConfig() executorStartTime := executorConfig.GetStartTime() - executorLogger := e.preInitState.Logger.WithFields(logrus.Fields{ + executorLogger := e.testRunState.Logger.WithFields(logrus.Fields{ "executor": executorConfig.GetName(), "type": executorConfig.GetType(), "startTime": executorStartTime, @@ -400,7 +400,7 @@ func (e *ExecutionScheduler) Run(globalCtx, runCtx context.Context, engineOut ch }() executorsCount := len(e.executors) - logger := e.preInitState.Logger.WithField("phase", "local-execution-scheduler-run") + logger := e.testRunState.Logger.WithField("phase", "local-execution-scheduler-run") e.initProgress.Modify(pb.WithConstLeft("Run")) var interrupted bool defer func() { @@ -497,7 +497,7 @@ func (e *ExecutionScheduler) SetPaused(pause bool) error { if pause { return fmt.Errorf("execution is already paused") } - e.preInitState.Logger.Debug("Starting execution") + e.testRunState.Logger.Debug("Starting execution") return e.state.Resume() } diff --git a/core/local/local_test.go b/core/local/local_test.go index daf8107caba..743efb90a27 100644 --- a/core/local/local_test.go +++ b/core/local/local_test.go @@ -63,6 +63,18 @@ func getTestPreInitState(tb testing.TB) *lib.TestPreInitState { } } +func getTestRunState( + tb testing.TB, piState *lib.TestPreInitState, options lib.Options, runner lib.Runner, +) *lib.TestRunState { + require.Empty(tb, options.Validate()) + require.NoError(tb, runner.SetOptions(options)) + return &lib.TestRunState{ + TestPreInitState: piState, + Options: options, + Runner: runner, + } +} + func newTestExecutionScheduler( t *testing.T, runner lib.Runner, logger *logrus.Logger, opts lib.Options, ) (ctx context.Context, cancel func(), execScheduler *ExecutionScheduler, samples chan metrics.SampleContainer) { @@ -74,16 +86,13 @@ func newTestExecutionScheduler( MetricSamplesBufferSize: null.NewInt(200, false), }.Apply(runner.GetOptions()).Apply(opts), nil) require.NoError(t, err) - require.Empty(t, newOpts.Validate()) - require.NoError(t, runner.SetOptions(newOpts)) - - piState := getTestPreInitState(t) + testRunState := getTestRunState(t, getTestPreInitState(t), newOpts, runner) if logger != nil { - piState.Logger = logger + testRunState.Logger = logger } - execScheduler, err = NewExecutionScheduler(runner, piState) + execScheduler, err = NewExecutionScheduler(testRunState) require.NoError(t, err) samples = make(chan metrics.SampleContainer, newOpts.MetricSamplesBufferSize.Int64) @@ -144,7 +153,9 @@ func TestExecutionSchedulerRunNonDefault(t *testing.T) { }, nil) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, piState) + testRunState := getTestRunState(t, piState, runner.GetOptions(), runner) + + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -185,7 +196,6 @@ func TestExecutionSchedulerRunEnv(t *testing.T) { scenarios: { executor: { executor: "%[1]s", - gracefulStop: "0.5s", %[2]s } } @@ -201,31 +211,37 @@ func TestExecutionSchedulerRunEnv(t *testing.T) { executorConfigs := map[string]string{ "constant-arrival-rate": ` rate: 1, - timeUnit: "0.5s", - duration: "0.5s", + timeUnit: "1s", + duration: "1s", preAllocatedVUs: 1, - maxVUs: 2,`, + maxVUs: 2, + gracefulStop: "0.5s",`, "constant-vus": ` vus: 1, - duration: "0.5s",`, + duration: "1s", + gracefulStop: "0.5s",`, "externally-controlled": ` vus: 1, - duration: "0.5s",`, + duration: "1s",`, "per-vu-iterations": ` vus: 1, - iterations: 1,`, + iterations: 1, + gracefulStop: "0.5s",`, "shared-iterations": ` vus: 1, - iterations: 1,`, + iterations: 1, + gracefulStop: "0.5s",`, "ramping-arrival-rate": ` startRate: 1, timeUnit: "0.5s", preAllocatedVUs: 1, maxVUs: 2, - stages: [ { target: 1, duration: "0.5s" } ],`, + stages: [ { target: 1, duration: "1s" } ], + gracefulStop: "0.5s",`, "ramping-vus": ` startVUs: 1, - stages: [ { target: 1, duration: "0.5s" } ],`, + stages: [ { target: 1, duration: "1s" } ], + gracefulStop: "0.5s",`, } testCases := []struct{ name, script string }{} @@ -255,7 +271,8 @@ func TestExecutionSchedulerRunEnv(t *testing.T) { ) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, piState) + testRunState := getTestRunState(t, piState, runner.GetOptions(), runner) + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -323,7 +340,8 @@ func TestExecutionSchedulerSystemTags(t *testing.T) { SystemTags: &metrics.DefaultSystemTagSet, }))) - execScheduler, err := NewExecutionScheduler(runner, piState) + testRunState := getTestRunState(t, piState, runner.GetOptions(), runner) + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) @@ -394,7 +412,6 @@ func TestExecutionSchedulerRunCustomTags(t *testing.T) { scenarios: { executor: { executor: "%s", - gracefulStop: "0.5s", %s } } @@ -407,31 +424,37 @@ func TestExecutionSchedulerRunCustomTags(t *testing.T) { executorConfigs := map[string]string{ "constant-arrival-rate": ` rate: 1, - timeUnit: "0.5s", - duration: "0.5s", + timeUnit: "1s", + duration: "1s", preAllocatedVUs: 1, - maxVUs: 2,`, + maxVUs: 2, + gracefulStop: "0.5s",`, "constant-vus": ` vus: 1, - duration: "0.5s",`, + duration: "1s", + gracefulStop: "0.5s",`, "externally-controlled": ` vus: 1, - duration: "0.5s",`, + duration: "1s",`, "per-vu-iterations": ` vus: 1, - iterations: 1,`, + iterations: 1, + gracefulStop: "0.5s",`, "shared-iterations": ` vus: 1, - iterations: 1,`, + iterations: 1, + gracefulStop: "0.5s",`, "ramping-arrival-rate": ` startRate: 5, timeUnit: "0.5s", preAllocatedVUs: 1, maxVUs: 2, - stages: [ { target: 10, duration: "1s" } ],`, + stages: [ { target: 10, duration: "1s" } ], + gracefulStop: "0.5s",`, "ramping-vus": ` startVUs: 1, - stages: [ { target: 1, duration: "0.5s" } ],`, + stages: [ { target: 1, duration: "0.5s" } ], + gracefulStop: "0.5s",`, } testCases := []struct{ name, script string }{} @@ -457,7 +480,8 @@ func TestExecutionSchedulerRunCustomTags(t *testing.T) { ) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, piState) + testRunState := getTestRunState(t, piState, runner.GetOptions(), runner) + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) @@ -620,7 +644,8 @@ func TestExecutionSchedulerRunCustomConfigNoCrossover(t *testing.T) { ) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, piState) + testRunState := getTestRunState(t, piState, runner.GetOptions(), runner) + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) @@ -948,8 +973,8 @@ func TestExecutionSchedulerEndIterations(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - piState := getTestPreInitState(t) - execScheduler, err := NewExecutionScheduler(runner, piState) + testRunState := getTestRunState(t, getTestPreInitState(t), runner.GetOptions(), runner) + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) samples := make(chan metrics.SampleContainer, 300) @@ -1151,9 +1176,9 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { TeardownTimeout: types.NullDurationFrom(4 * time.Second), }), nil) require.NoError(t, err) - require.NoError(t, runner.SetOptions(options)) - execScheduler, err := NewExecutionScheduler(runner, piState) + testRunState := getTestRunState(t, piState, options, runner) + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1283,7 +1308,8 @@ func TestSetPaused(t *testing.T) { t.Parallel() t.Run("second pause is an error", func(t *testing.T) { t.Parallel() - sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getTestPreInitState(t)) + testRunState := getTestRunState(t, getTestPreInitState(t), lib.Options{}, &minirunner.MiniRunner{}) + sched, err := NewExecutionScheduler(testRunState) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} @@ -1295,7 +1321,8 @@ func TestSetPaused(t *testing.T) { t.Run("unpause at the start is an error", func(t *testing.T) { t.Parallel() - sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getTestPreInitState(t)) + testRunState := getTestRunState(t, getTestPreInitState(t), lib.Options{}, &minirunner.MiniRunner{}) + sched, err := NewExecutionScheduler(testRunState) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} err = sched.SetPaused(false) @@ -1305,7 +1332,8 @@ func TestSetPaused(t *testing.T) { t.Run("second unpause is an error", func(t *testing.T) { t.Parallel() - sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getTestPreInitState(t)) + testRunState := getTestRunState(t, getTestPreInitState(t), lib.Options{}, &minirunner.MiniRunner{}) + sched, err := NewExecutionScheduler(testRunState) require.NoError(t, err) sched.executors = []lib.Executor{pausableExecutor{err: nil}} require.NoError(t, sched.SetPaused(true)) @@ -1317,7 +1345,8 @@ func TestSetPaused(t *testing.T) { t.Run("an error on pausing is propagated", func(t *testing.T) { t.Parallel() - sched, err := NewExecutionScheduler(&minirunner.MiniRunner{}, getTestPreInitState(t)) + testRunState := getTestRunState(t, getTestPreInitState(t), lib.Options{}, &minirunner.MiniRunner{}) + sched, err := NewExecutionScheduler(testRunState) require.NoError(t, err) expectedErr := errors.New("testing pausable executor error") sched.executors = []lib.Executor{pausableExecutor{err: expectedErr}} @@ -1334,9 +1363,9 @@ func TestSetPaused(t *testing.T) { VUs: null.IntFrom(1), }.Apply(runner.GetOptions()), nil) require.NoError(t, err) - require.NoError(t, runner.SetOptions(options)) - sched, err := NewExecutionScheduler(runner, getTestPreInitState(t)) + testRunState := getTestRunState(t, getTestPreInitState(t), options, runner) + sched, err := NewExecutionScheduler(testRunState) require.NoError(t, err) err = sched.SetPaused(true) require.Error(t, err) @@ -1390,7 +1419,8 @@ func TestNewExecutionSchedulerHasWork(t *testing.T) { runner, err := js.New(piState, &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil) require.NoError(t, err) - execScheduler, err := NewExecutionScheduler(runner, piState) + testRunState := getTestRunState(t, piState, runner.GetOptions(), runner) + execScheduler, err := NewExecutionScheduler(testRunState) require.NoError(t, err) assert.Len(t, execScheduler.executors, 2) diff --git a/js/runner_test.go b/js/runner_test.go index 8d3609b75fe..821af0c65b6 100644 --- a/js/runner_test.go +++ b/js/runner_test.go @@ -310,14 +310,17 @@ func TestSetupDataIsolation(t *testing.T) { options := runner.GetOptions() require.Empty(t, options.Validate()) - piState := runner.preInitState - execScheduler, err := local.NewExecutionScheduler(runner, piState) + testRunState := &lib.TestRunState{ + TestPreInitState: runner.preInitState, + Options: options, + Runner: runner, + } + + execScheduler, err := local.NewExecutionScheduler(testRunState) require.NoError(t, err) mockOutput := mockoutput.New() - engine, err := core.NewEngine( - execScheduler, options, piState.RuntimeOptions, []output.Output{mockOutput}, piState.Logger, piState.Registry, - ) + engine, err := core.NewEngine(testRunState, execScheduler, []output.Output{mockOutput}) require.NoError(t, err) require.NoError(t, engine.OutputManager.StartOutputs()) defer engine.OutputManager.StopOutputs() @@ -2437,7 +2440,13 @@ func TestExecutionInfo(t *testing.T) { initVU, err := r.NewVU(1, 10, samples) require.NoError(t, err) - execScheduler, err := local.NewExecutionScheduler(r, r.preInitState) + testRunState := &lib.TestRunState{ + TestPreInitState: r.preInitState, + Options: r.GetOptions(), + Runner: r, + } + + execScheduler, err := local.NewExecutionScheduler(testRunState) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) From 81c2a056f21ce1fd8b2a9362a01c48dd5006def8 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Mon, 1 Aug 2022 10:21:19 +0300 Subject: [PATCH 08/12] Add lib.TestRunState to the lib.ExecutionState --- core/local/local.go | 6 +- lib/execution.go | 23 +- lib/executor/base_executor.go | 6 +- lib/executor/common_test.go | 73 +++++- lib/executor/constant_arrival_rate.go | 2 +- lib/executor/constant_arrival_rate_test.go | 237 ++++++++----------- lib/executor/constant_vus_test.go | 35 ++- lib/executor/execution_test.go | 10 +- lib/executor/externally_controlled_test.go | 31 ++- lib/executor/per_vu_iterations.go | 2 +- lib/executor/per_vu_iterations_test.go | 88 +++----- lib/executor/ramping_arrival_rate.go | 2 +- lib/executor/ramping_arrival_rate_test.go | 251 +++++++++------------ lib/executor/ramping_vus_test.go | 177 +++++++-------- lib/executor/shared_iterations.go | 2 +- lib/executor/shared_iterations_test.go | 122 ++++------ 16 files changed, 489 insertions(+), 578 deletions(-) diff --git a/core/local/local.go b/core/local/local.go index 0aec006dad3..342ebe73be0 100644 --- a/core/local/local.go +++ b/core/local/local.go @@ -72,7 +72,7 @@ func NewExecutionScheduler(trs *lib.TestRunState) (*ExecutionScheduler, error) { maxPlannedVUs := lib.GetMaxPlannedVUs(executionPlan) maxPossibleVUs := lib.GetMaxPossibleVUs(executionPlan) - executionState := lib.NewExecutionState(options, et, trs.BuiltinMetrics, maxPlannedVUs, maxPossibleVUs) + executionState := lib.NewExecutionState(trs, et, maxPlannedVUs, maxPossibleVUs) maxDuration, _ := lib.GetEndOffset(executionPlan) // we don't care if the end offset is final executorConfigs := options.Scenarios.GetSortedConfigs() @@ -239,12 +239,12 @@ func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- me Samples: []metrics.Sample{ { Time: t, - Metric: e.state.BuiltinMetrics.VUs, + Metric: e.state.Test.BuiltinMetrics.VUs, Value: float64(e.state.GetCurrentlyActiveVUsCount()), Tags: e.options.RunTags, }, { Time: t, - Metric: e.state.BuiltinMetrics.VUsMax, + Metric: e.state.Test.BuiltinMetrics.VUsMax, Value: float64(e.state.GetInitializedVUsCount()), Tags: e.options.RunTags, }, diff --git a/lib/execution.go b/lib/execution.go index dd8959b5828..6dc109312aa 100644 --- a/lib/execution.go +++ b/lib/execution.go @@ -141,19 +141,18 @@ const ( // around pausing, and uninitializedUnplannedVUs for restricting the number of // unplanned VUs being initialized. type ExecutionState struct { - // A copy of the options, so the different executors have access to them. - // They will need to access things like the current execution segment, the - // per-run metrics tags, etc. + // A portal to the broader test run state, so the different executors have + // access to the test options, built-in metrics, etc.. They will need to + // access things like the current execution segment, the per-run metrics + // tags, different metrics to emit, etc. // - // Obviously, they are not meant to be changed... They should be a constant - // during the execution of a single test, but we can't easily enforce that - // via the Go type system... - Options Options + // Obviously, things here are not meant to be changed... They should be a + // constant during the execution of a single test, but we can't easily + // enforce that via the Go type system... + Test *TestRunState ExecutionTuple *ExecutionTuple // TODO Rename, possibly move - BuiltinMetrics *metrics.BuiltinMetrics - // vus is the shared channel buffer that contains all of the VUs that have // been initialized and aren't currently being used by a executor. // @@ -276,8 +275,7 @@ type ExecutionState struct { // with zeros. It also makes sure that the initial state is unpaused, by // setting resumeNotify to an already closed channel. func NewExecutionState( - options Options, et *ExecutionTuple, builtinMetrics *metrics.BuiltinMetrics, - maxPlannedVUs, maxPossibleVUs uint64, + testRunState *TestRunState, et *ExecutionTuple, maxPlannedVUs, maxPossibleVUs uint64, ) *ExecutionState { resumeNotify := make(chan struct{}) close(resumeNotify) // By default the ExecutionState starts unpaused @@ -286,9 +284,8 @@ func NewExecutionState( segIdx := NewSegmentedIndex(et) return &ExecutionState{ - Options: options, + Test: testRunState, ExecutionTuple: et, - BuiltinMetrics: builtinMetrics, vus: make(chan InitializedVU, maxPossibleVUs), diff --git a/lib/executor/base_executor.go b/lib/executor/base_executor.go index 830abf234f7..e8e57cddf0e 100644 --- a/lib/executor/base_executor.go +++ b/lib/executor/base_executor.go @@ -93,11 +93,11 @@ func (bs *BaseExecutor) GetProgress() *pb.ProgressBar { // getMetricTags returns a tag set that can be used to emit metrics by the // executor. The VU ID is optional. func (bs *BaseExecutor) getMetricTags(vuID *uint64) *metrics.SampleTags { - tags := bs.executionState.Options.RunTags.CloneTags() - if bs.executionState.Options.SystemTags.Has(metrics.TagScenario) { + tags := bs.executionState.Test.Options.RunTags.CloneTags() + if bs.executionState.Test.Options.SystemTags.Has(metrics.TagScenario) { tags["scenario"] = bs.config.GetName() } - if vuID != nil && bs.executionState.Options.SystemTags.Has(metrics.TagVU) { + if vuID != nil && bs.executionState.Test.Options.SystemTags.Has(metrics.TagVU) { tags["vu"] = strconv.FormatUint(*vuID, 10) } return metrics.IntoSampleTags(&tags) diff --git a/lib/executor/common_test.go b/lib/executor/common_test.go index b092b1204f4..a1cc917c343 100644 --- a/lib/executor/common_test.go +++ b/lib/executor/common_test.go @@ -42,7 +42,25 @@ func simpleRunner(vuFn func(context.Context, *lib.State) error) lib.Runner { } } -func setupExecutor(t testing.TB, config lib.ExecutorConfig, es *lib.ExecutionState, runner lib.Runner) ( +func getTestRunState(tb testing.TB, options lib.Options, runner lib.Runner) *lib.TestRunState { + reg := metrics.NewRegistry() + piState := &lib.TestPreInitState{ + Logger: testutils.NewLogger(tb), + RuntimeOptions: lib.RuntimeOptions{}, + Registry: reg, + BuiltinMetrics: metrics.RegisterBuiltinMetrics(reg), + } + + require.NoError(tb, runner.SetOptions(options)) + + return &lib.TestRunState{ + TestPreInitState: piState, + Options: options, + Runner: runner, + } +} + +func setupExecutor(t testing.TB, config lib.ExecutorConfig, es *lib.ExecutionState) ( context.Context, context.CancelFunc, lib.Executor, *testutils.SimpleLogrusHook, ) { ctx, cancel := context.WithCancel(context.Background()) @@ -56,7 +74,7 @@ func setupExecutor(t testing.TB, config lib.ExecutorConfig, es *lib.ExecutionSta initVUFunc := func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { idl, idg := es.GetUniqueVUIdentifiers() - return runner.NewVU(idl, idg, engineOut) + return es.Test.Runner.NewVU(idl, idg, engineOut) } es.SetInitVUFunc(initVUFunc) @@ -83,3 +101,54 @@ func initializeVUs( es.AddInitializedVU(vu) } } + +type executorTest struct { + options lib.Options + state *lib.ExecutionState + + ctx context.Context //nolint + cancel context.CancelFunc + executor lib.Executor + logHook *testutils.SimpleLogrusHook +} + +func setupExecutorTest( + t testing.TB, segmentStr, sequenceStr string, extraOptions lib.Options, + runner lib.Runner, config lib.ExecutorConfig, +) *executorTest { + var err error + var segment *lib.ExecutionSegment + if segmentStr != "" { + segment, err = lib.NewExecutionSegmentFromString(segmentStr) + require.NoError(t, err) + } + + var sequence lib.ExecutionSegmentSequence + if sequenceStr != "" { + sequence, err = lib.NewExecutionSegmentSequenceFromString(sequenceStr) + require.NoError(t, err) + } + + et, err := lib.NewExecutionTuple(segment, &sequence) + require.NoError(t, err) + + options := lib.Options{ + ExecutionSegment: segment, + ExecutionSegmentSequence: &sequence, + }.Apply(runner.GetOptions()).Apply(extraOptions) + + testRunState := getTestRunState(t, options, runner) + + execReqs := config.GetExecutionRequirements(et) + es := lib.NewExecutionState(testRunState, et, lib.GetMaxPlannedVUs(execReqs), lib.GetMaxPossibleVUs(execReqs)) + ctx, cancel, executor, logHook := setupExecutor(t, config, es) + + return &executorTest{ + options: options, + state: es, + ctx: ctx, + cancel: cancel, + executor: executor, + logHook: logHook, + } +} diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go index c97cdfcf985..070b9916b37 100644 --- a/lib/executor/constant_arrival_rate.go +++ b/lib/executor/constant_arrival_rate.go @@ -337,7 +337,7 @@ func (car ConstantArrivalRate) Run(parentCtx context.Context, out chan<- metrics int64(car.config.TimeUnit.TimeDuration()), )).TimeDuration() - droppedIterationMetric := car.executionState.BuiltinMetrics.DroppedIterations + droppedIterationMetric := car.executionState.Test.BuiltinMetrics.DroppedIterations shownWarning := false metricTags := car.getMetricTags(nil) for li, gi := 0, start; ; li, gi = li+1, gi+offsets[li%len(offsets)] { diff --git a/lib/executor/constant_arrival_rate_test.go b/lib/executor/constant_arrival_rate_test.go index 7bca39fa9d8..f1315efb02f 100644 --- a/lib/executor/constant_arrival_rate_test.go +++ b/lib/executor/constant_arrival_rate_test.go @@ -34,7 +34,6 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" "go.k6.io/k6/metrics" ) @@ -68,24 +67,18 @@ func getTestConstantArrivalRateConfig() *ConstantArrivalRateConfig { func TestConstantArrivalRateRunNotEnoughAllocatedVUsWarn(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, getTestConstantArrivalRateConfig(), es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - time.Sleep(time.Second) - return nil - }), - ) - defer cancel() + + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + time.Sleep(time.Second) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestConstantArrivalRateConfig()) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) - entries := logHook.Drain() + require.NoError(t, test.executor.Run(test.ctx, engineOut)) + entries := test.logHook.Drain() require.NotEmpty(t, entries) for _, entry := range entries { require.Equal(t, @@ -97,20 +90,16 @@ func TestConstantArrivalRateRunNotEnoughAllocatedVUsWarn(t *testing.T) { func TestConstantArrivalRateRunCorrectRate(t *testing.T) { t.Parallel() + var count int64 - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, getTestConstantArrivalRateConfig(), es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - atomic.AddInt64(&count, 1) - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + atomic.AddInt64(&count, 1) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestConstantArrivalRateConfig()) + defer test.cancel() + var wg sync.WaitGroup wg.Add(1) go func() { @@ -125,114 +114,108 @@ func TestConstantArrivalRateRunCorrectRate(t *testing.T) { } }() engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) wg.Wait() - require.NoError(t, err) - require.Empty(t, logHook.Drain()) + require.Empty(t, test.logHook.Drain()) } //nolint:tparallel,paralleltest // this is flaky if ran with other tests func TestConstantArrivalRateRunCorrectTiming(t *testing.T) { // t.Parallel() tests := []struct { - segment *lib.ExecutionSegment - sequence *lib.ExecutionSegmentSequence + segment string + sequence string start time.Duration steps []int64 }{ { - segment: newExecutionSegmentFromString("0:1/3"), + segment: "0:1/3", start: time.Millisecond * 20, steps: []int64{40, 60, 60, 60, 60, 60, 60}, }, { - segment: newExecutionSegmentFromString("1/3:2/3"), + segment: "1/3:2/3", start: time.Millisecond * 20, steps: []int64{60, 60, 60, 60, 60, 60, 40}, }, { - segment: newExecutionSegmentFromString("2/3:1"), + segment: "2/3:1", start: time.Millisecond * 20, steps: []int64{40, 60, 60, 60, 60, 60, 60}, }, { - segment: newExecutionSegmentFromString("1/6:3/6"), + segment: "1/6:3/6", start: time.Millisecond * 20, steps: []int64{40, 80, 40, 80, 40, 80, 40}, }, { - segment: newExecutionSegmentFromString("1/6:3/6"), - sequence: newExecutionSegmentSequenceFromString("1/6,3/6"), + segment: "1/6:3/6", + sequence: "1/6,3/6", start: time.Millisecond * 20, steps: []int64{40, 80, 40, 80, 40, 80, 40}, }, // sequences { - segment: newExecutionSegmentFromString("0:1/3"), - sequence: newExecutionSegmentSequenceFromString("0,1/3,2/3,1"), + segment: "0:1/3", + sequence: "0,1/3,2/3,1", start: time.Millisecond * 0, steps: []int64{60, 60, 60, 60, 60, 60, 40}, }, { - segment: newExecutionSegmentFromString("1/3:2/3"), - sequence: newExecutionSegmentSequenceFromString("0,1/3,2/3,1"), + segment: "1/3:2/3", + sequence: "0,1/3,2/3,1", start: time.Millisecond * 20, steps: []int64{60, 60, 60, 60, 60, 60, 40}, }, { - segment: newExecutionSegmentFromString("2/3:1"), - sequence: newExecutionSegmentSequenceFromString("0,1/3,2/3,1"), + segment: "2/3:1", + sequence: "0,1/3,2/3,1", start: time.Millisecond * 40, steps: []int64{60, 60, 60, 60, 60, 100}, }, } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) for _, test := range tests { test := test t.Run(fmt.Sprintf("segment %s sequence %s", test.segment, test.sequence), func(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(test.segment, test.sequence) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{ - ExecutionSegment: test.segment, - ExecutionSegmentSequence: test.sequence, - }, et, builtinMetrics, 10, 50) + var count int64 - seconds := 2 + startTime := time.Now() + expectedTimeInt64 := int64(test.start) + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + current := atomic.AddInt64(&count, 1) + + expectedTime := test.start + if current != 1 { + expectedTime = time.Duration(atomic.AddInt64(&expectedTimeInt64, + int64(time.Millisecond)*test.steps[(current-2)%int64(len(test.steps))])) + } + + // FIXME: replace this check with a unit test asserting that the scheduling is correct, + // without depending on the execution time itself + assert.WithinDuration(t, + startTime.Add(expectedTime), + time.Now(), + time.Millisecond*24, + "%d expectedTime %s", current, expectedTime, + ) + + return nil + }) + config := getTestConstantArrivalRateConfig() + seconds := 2 config.Duration.Duration = types.Duration(time.Second * time.Duration(seconds)) - newET, err := es.ExecutionTuple.GetNewExecutionTupleFromValue(config.MaxVUs.Int64) + execTest := setupExecutorTest( + t, test.segment, test.sequence, lib.Options{}, runner, config, + ) + defer execTest.cancel() + + newET, err := execTest.state.ExecutionTuple.GetNewExecutionTupleFromValue(config.MaxVUs.Int64) require.NoError(t, err) rateScaled := newET.ScaleInt64(config.Rate.Int64) - startTime := time.Now() - expectedTimeInt64 := int64(test.start) - ctx, cancel, executor, logHook := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - current := atomic.AddInt64(&count, 1) - - expectedTime := test.start - if current != 1 { - expectedTime = time.Duration(atomic.AddInt64(&expectedTimeInt64, - int64(time.Millisecond)*test.steps[(current-2)%int64(len(test.steps))])) - } - - // FIXME: replace this check with a unit test asserting that the scheduling is correct, - // without depending on the execution time itself - assert.WithinDuration(t, - startTime.Add(expectedTime), - time.Now(), - time.Millisecond*24, - "%d expectedTime %s", current, expectedTime, - ) - - return nil - }), - ) - defer cancel() var wg sync.WaitGroup wg.Add(1) go func() { @@ -248,10 +231,10 @@ func TestConstantArrivalRateRunCorrectTiming(t *testing.T) { }() startTime = time.Now() engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) + err = execTest.executor.Run(execTest.ctx, engineOut) wg.Wait() require.NoError(t, err) - require.Empty(t, logHook.Drain()) + require.Empty(t, execTest.logHook.Drain()) }) } } @@ -263,8 +246,6 @@ func TestArrivalRateCancel(t *testing.T) { "constant": getTestConstantArrivalRateConfig(), "ramping": getTestRampingArrivalRateConfig(), } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) for name, config := range testCases { config := config t.Run(name, func(t *testing.T) { @@ -272,32 +253,31 @@ func TestArrivalRateCancel(t *testing.T) { ch := make(chan struct{}) errCh := make(chan error, 1) weAreDoneCh := make(chan struct{}) - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, config, es, simpleRunner(func(ctx context.Context, _ *lib.State) error { - select { - case <-ch: - <-ch - default: - } - return nil - })) - defer cancel() + + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + select { + case <-ch: + <-ch + default: + } + return nil + }) + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() engineOut := make(chan metrics.SampleContainer, 1000) - errCh <- executor.Run(ctx, engineOut) + errCh <- test.executor.Run(test.ctx, engineOut) close(weAreDoneCh) }() time.Sleep(time.Second) ch <- struct{}{} - cancel() + test.cancel() time.Sleep(time.Second) select { case <-weAreDoneCh: @@ -308,7 +288,7 @@ func TestArrivalRateCancel(t *testing.T) { <-weAreDoneCh wg.Wait() require.NoError(t, <-errCh) - require.Empty(t, logHook.Drain()) + require.Empty(t, test.logHook.Drain()) }) } } @@ -316,8 +296,6 @@ func TestArrivalRateCancel(t *testing.T) { func TestConstantArrivalRateDroppedIterations(t *testing.T) { t.Parallel() var count int64 - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) config := &ConstantArrivalRateConfig{ BaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(0 * time.Second)}, @@ -328,22 +306,17 @@ func TestConstantArrivalRateDroppedIterations(t *testing.T) { MaxVUs: null.IntFrom(5), } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - atomic.AddInt64(&count, 1) - <-ctx.Done() - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + atomic.AddInt64(&count, 1) + <-ctx.Done() + return nil + }) + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) - logs := logHook.Drain() + require.NoError(t, test.executor.Run(test.ctx, engineOut)) + logs := test.logHook.Drain() require.Len(t, logs, 1) assert.Contains(t, logs[0].Message, "cannot initialize more") assert.Equal(t, int64(5), count) @@ -371,36 +344,24 @@ func TestConstantArrivalRateGlobalIters(t *testing.T) { {"0,1/4,3/4,1", "3/4:1", []uint64{3, 8, 13, 18}}, } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) for _, tc := range testCases { tc := tc t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { t.Parallel() - ess, err := lib.NewExecutionSegmentSequenceFromString(tc.seq) - require.NoError(t, err) - seg, err := lib.NewExecutionSegmentFromString(tc.seg) - require.NoError(t, err) - et, err := lib.NewExecutionTuple(seg, &ess) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 5, 5) - - runner := &minirunner.MiniRunner{} - ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) - defer cancel() gotIters := []uint64{} var mx sync.Mutex - runner.Fn = func(ctx context.Context, state *lib.State, _ chan<- metrics.SampleContainer) error { + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { mx.Lock() gotIters = append(gotIters, state.GetScenarioGlobalVUIter()) mx.Unlock() return nil - } + }) + test := setupExecutorTest(t, tc.seg, tc.seq, lib.Options{}, runner, config) + defer test.cancel() engineOut := make(chan metrics.SampleContainer, 100) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) assert.Equal(t, tc.expIters, gotIters) }) } diff --git a/lib/executor/constant_vus_test.go b/lib/executor/constant_vus_test.go index 4eb8306a171..f0c4fb8b56e 100644 --- a/lib/executor/constant_vus_test.go +++ b/lib/executor/constant_vus_test.go @@ -45,26 +45,23 @@ func getTestConstantVUsConfig() ConstantVUsConfig { func TestConstantVUsRun(t *testing.T) { t.Parallel() var result sync.Map - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, getTestConstantVUsConfig(), es, - simpleRunner(func(ctx context.Context, state *lib.State) error { - select { - case <-ctx.Done(): - return nil - default: - } - currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) - result.Store(state.VUID, currIter.(uint64)+1) - time.Sleep(210 * time.Millisecond) + + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { + select { + case <-ctx.Done(): return nil - }), - ) - defer cancel() - err = executor.Run(ctx, nil) - require.NoError(t, err) + default: + } + currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) + result.Store(state.VUID, currIter.(uint64)+1) //nolint:forcetypeassert + time.Sleep(210 * time.Millisecond) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestConstantVUsConfig()) + defer test.cancel() + + require.NoError(t, test.executor.Run(test.ctx, nil)) var totalIters uint64 result.Range(func(key, value interface{}) bool { diff --git a/lib/executor/execution_test.go b/lib/executor/execution_test.go index 44724531211..f3bb312c6c2 100644 --- a/lib/executor/execution_test.go +++ b/lib/executor/execution_test.go @@ -61,7 +61,7 @@ func TestExecutionStateVUIDs(t *testing.T) { require.NoError(t, err) start, offsets, _ := et.GetStripedOffsets() - es := lib.NewExecutionState(lib.Options{}, et, nil, 0, 0) + es := lib.NewExecutionState(nil, et, 0, 0) idl, idg := es.GetUniqueVUIdentifiers() assert.Equal(t, uint64(1), idl) @@ -102,7 +102,7 @@ func TestExecutionStateGettingVUsWhenNonAreAvailable(t *testing.T) { t.Parallel() et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 0, 0) + es := lib.NewExecutionState(nil, et, 0, 0) logHook := &testutils.SimpleLogrusHook{HookedLevels: []logrus.Level{logrus.WarnLevel}} testLog := logrus.New() testLog.AddHook(logHook) @@ -128,7 +128,7 @@ func TestExecutionStateGettingVUs(t *testing.T) { et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 20) + es := lib.NewExecutionState(nil, et, 10, 20) es.SetInitVUFunc(func(_ context.Context, _ *logrus.Entry) (lib.InitializedVU, error) { return &minirunner.VU{}, nil }) @@ -193,7 +193,7 @@ func TestMarkStartedPanicsOnSecondRun(t *testing.T) { t.Parallel() et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 0, 0) + es := lib.NewExecutionState(nil, et, 0, 0) require.False(t, es.HasStarted()) es.MarkStarted() require.True(t, es.HasStarted()) @@ -204,7 +204,7 @@ func TestMarkEnded(t *testing.T) { t.Parallel() et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 0, 0) + es := lib.NewExecutionState(nil, et, 0, 0) require.False(t, es.HasEnded()) es.MarkEnded() require.True(t, es.HasEnded()) diff --git a/lib/executor/externally_controlled_test.go b/lib/executor/externally_controlled_test.go index 28b33b40497..20d93588deb 100644 --- a/lib/executor/externally_controlled_test.go +++ b/lib/executor/externally_controlled_test.go @@ -48,20 +48,15 @@ func getTestExternallyControlledConfig() ExternallyControlledConfig { func TestExternallyControlledRun(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) - doneIters := new(uint64) - ctx, cancel, executor, _ := setupExecutor( - t, getTestExternallyControlledConfig(), es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - time.Sleep(200 * time.Millisecond) - atomic.AddUint64(doneIters, 1) - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + time.Sleep(200 * time.Millisecond) + atomic.AddUint64(doneIters, 1) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestExternallyControlledConfig()) + defer test.cancel() var ( wg sync.WaitGroup @@ -71,9 +66,9 @@ func TestExternallyControlledRun(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - es.MarkStarted() - errCh <- executor.Run(ctx, nil) - es.MarkEnded() + test.state.MarkStarted() + errCh <- test.executor.Run(test.ctx, nil) + test.state.MarkEnded() close(doneCh) }() @@ -83,7 +78,7 @@ func TestExternallyControlledRun(t *testing.T) { MaxVUs: null.IntFrom(maxVUs), Duration: types.NullDurationFrom(2 * time.Second), } - err := executor.(*ExternallyControlled).UpdateConfig(ctx, newConfig) + err := test.executor.(*ExternallyControlled).UpdateConfig(test.ctx, newConfig) //nolint:forcetypeassert if errMsg != "" { assert.EqualError(t, err, errMsg) } else { @@ -94,7 +89,7 @@ func TestExternallyControlledRun(t *testing.T) { var resultVUCount [][]int64 snapshot := func() { resultVUCount = append(resultVUCount, - []int64{es.GetCurrentlyActiveVUsCount(), es.GetInitializedVUsCount()}) + []int64{test.state.GetCurrentlyActiveVUsCount(), test.state.GetInitializedVUsCount()}) } wg.Add(1) diff --git a/lib/executor/per_vu_iterations.go b/lib/executor/per_vu_iterations.go index d609fcbbfdf..d90db1e2a03 100644 --- a/lib/executor/per_vu_iterations.go +++ b/lib/executor/per_vu_iterations.go @@ -218,7 +218,7 @@ func (pvi PerVUIterations) Run(parentCtx context.Context, out chan<- metrics.Sam activeVUs.Done() } - droppedIterationMetric := pvi.executionState.BuiltinMetrics.DroppedIterations + droppedIterationMetric := pvi.executionState.Test.BuiltinMetrics.DroppedIterations handleVU := func(initVU lib.InitializedVU) { defer handleVUsWG.Done() ctx, cancel := context.WithCancel(maxDurationCtx) diff --git a/lib/executor/per_vu_iterations_test.go b/lib/executor/per_vu_iterations_test.go index 517b76a49ea..4ee367d0c1e 100644 --- a/lib/executor/per_vu_iterations_test.go +++ b/lib/executor/per_vu_iterations_test.go @@ -49,23 +49,18 @@ func getTestPerVUIterationsConfig() PerVUIterationsConfig { func TestPerVUIterationsRun(t *testing.T) { t.Parallel() var result sync.Map - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, getTestPerVUIterationsConfig(), es, - simpleRunner(func(ctx context.Context, state *lib.State) error { - currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) - result.Store(state.VUID, currIter.(uint64)+1) - return nil - }), - ) - defer cancel() + + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { + currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) + result.Store(state.VUID, currIter.(uint64)+1) //nolint:forcetypeassert + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestPerVUIterationsConfig()) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) var totalIters uint64 result.Range(func(key, value interface{}) bool { @@ -85,26 +80,21 @@ func TestPerVUIterationsRunVariableVU(t *testing.T) { result sync.Map slowVUID = uint64(1) ) - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, getTestPerVUIterationsConfig(), es, - simpleRunner(func(ctx context.Context, state *lib.State) error { - if state.VUID == slowVUID { - time.Sleep(200 * time.Millisecond) - } - currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) - result.Store(state.VUID, currIter.(uint64)+1) - return nil - }), - ) - defer cancel() + + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { + if state.VUID == slowVUID { + time.Sleep(200 * time.Millisecond) + } + currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) + result.Store(state.VUID, currIter.(uint64)+1) //nolint:forcetypeassert + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestPerVUIterationsConfig()) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) val, ok := result.Load(slowVUID) assert.True(t, ok) @@ -128,8 +118,6 @@ func TestPerVUIterationsRunVariableVU(t *testing.T) { func TestPerVuIterationsEmitDroppedIterations(t *testing.T) { t.Parallel() var count int64 - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) config := PerVUIterationsConfig{ VUs: null.IntFrom(5), @@ -137,22 +125,18 @@ func TestPerVuIterationsEmitDroppedIterations(t *testing.T) { MaxDuration: types.NullDurationFrom(1 * time.Second), } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - atomic.AddInt64(&count, 1) - <-ctx.Done() - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + atomic.AddInt64(&count, 1) + <-ctx.Done() + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) - assert.Empty(t, logHook.Drain()) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) + assert.Empty(t, test.logHook.Drain()) assert.Equal(t, int64(5), count) assert.Equal(t, float64(95), sumMetricValues(engineOut, metrics.DroppedIterationsName)) } diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index 07f611e42a9..9cf473a64f4 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -457,7 +457,7 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- metrics shownWarning := false metricTags := varr.getMetricTags(nil) go varr.config.cal(varr.et, ch) - droppedIterationMetric := varr.executionState.BuiltinMetrics.DroppedIterations + droppedIterationMetric := varr.executionState.Test.BuiltinMetrics.DroppedIterations for nextTime := range ch { select { case <-regDurationDone: diff --git a/lib/executor/ramping_arrival_rate_test.go b/lib/executor/ramping_arrival_rate_test.go index 8d542ba709d..3ef99121614 100644 --- a/lib/executor/ramping_arrival_rate_test.go +++ b/lib/executor/ramping_arrival_rate_test.go @@ -35,7 +35,6 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" "go.k6.io/k6/metrics" ) @@ -66,23 +65,18 @@ func getTestRampingArrivalRateConfig() *RampingArrivalRateConfig { func TestRampingArrivalRateRunNotEnoughAllocatedVUsWarn(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, getTestRampingArrivalRateConfig(), es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - time.Sleep(time.Second) - return nil - }), - ) - defer cancel() + + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + time.Sleep(time.Second) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestRampingArrivalRateConfig()) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) - entries := logHook.Drain() + require.NoError(t, test.executor.Run(test.ctx, engineOut)) + entries := test.logHook.Drain() require.NotEmpty(t, entries) for _, entry := range entries { require.Equal(t, @@ -95,19 +89,14 @@ func TestRampingArrivalRateRunNotEnoughAllocatedVUsWarn(t *testing.T) { func TestRampingArrivalRateRunCorrectRate(t *testing.T) { t.Parallel() var count int64 - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, getTestRampingArrivalRateConfig(), es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - atomic.AddInt64(&count, 1) - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + atomic.AddInt64(&count, 1) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestRampingArrivalRateConfig()) + defer test.cancel() + var wg sync.WaitGroup wg.Add(1) go func() { @@ -128,19 +117,28 @@ func TestRampingArrivalRateRunCorrectRate(t *testing.T) { assert.InDelta(t, 50, currentCount, 3) }() engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) wg.Wait() - require.NoError(t, err) - require.Empty(t, logHook.Drain()) + require.Empty(t, test.logHook.Drain()) } func TestRampingArrivalRateRunUnplannedVUs(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 1, 3) + + config := &RampingArrivalRateConfig{ + TimeUnit: types.NullDurationFrom(time.Second), + Stages: []Stage{ + { + // the minus one makes it so only 9 iterations will be started instead of 10 + // as the 10th happens to be just at the end and sometimes doesn't get executed :( + Duration: types.NullDurationFrom(time.Second*2 - 1), + Target: null.IntFrom(10), + }, + }, + PreAllocatedVUs: null.IntFrom(1), + MaxVUs: null.IntFrom(3), + } + var count int64 ch := make(chan struct{}) // closed when new unplannedVU is started and signal to get to next iterations ch2 := make(chan struct{}) // closed when a second iteration was started on an old VU in order to test it won't start a second unplanned VU in parallel or at all @@ -154,24 +152,12 @@ func TestRampingArrivalRateRunUnplannedVUs(t *testing.T) { return nil }) - ctx, cancel, executor, logHook := setupExecutor( - t, &RampingArrivalRateConfig{ - TimeUnit: types.NullDurationFrom(time.Second), - Stages: []Stage{ - { - // the minus one makes it so only 9 iterations will be started instead of 10 - // as the 10th happens to be just at the end and sometimes doesn't get executed :( - Duration: types.NullDurationFrom(time.Second*2 - 1), - Target: null.IntFrom(10), - }, - }, - PreAllocatedVUs: null.IntFrom(1), - MaxVUs: null.IntFrom(3), - }, - es, runner) - defer cancel() + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - es.SetInitVUFunc(func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { + test.state.SetInitVUFunc(func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { cur := atomic.LoadInt64(&count) require.Equal(t, cur, int64(1)) time.Sleep(time.Second / 2) @@ -190,13 +176,12 @@ func TestRampingArrivalRateRunUnplannedVUs(t *testing.T) { time.Sleep(time.Millisecond * 200) cur = atomic.LoadInt64(&count) require.NotEqual(t, cur, int64(2)) - idl, idg := es.GetUniqueVUIdentifiers() + idl, idg := test.state.GetUniqueVUIdentifiers() return runner.NewVU(idl, idg, engineOut) }) - err = executor.Run(ctx, engineOut) - assert.NoError(t, err) - assert.Empty(t, logHook.Drain()) + assert.NoError(t, test.executor.Run(test.ctx, engineOut)) + assert.Empty(t, test.logHook.Drain()) droppedIters := sumMetricValues(engineOut, metrics.DroppedIterationsName) assert.Equal(t, count+int64(droppedIters), int64(9)) @@ -204,11 +189,19 @@ func TestRampingArrivalRateRunUnplannedVUs(t *testing.T) { func TestRampingArrivalRateRunCorrectRateWithSlowRate(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 1, 3) + + config := &RampingArrivalRateConfig{ + TimeUnit: types.NullDurationFrom(time.Second), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(time.Second * 2), + Target: null.IntFrom(10), + }, + }, + PreAllocatedVUs: null.IntFrom(1), + MaxVUs: null.IntFrom(3), + } + var count int64 ch := make(chan struct{}) // closed when new unplannedVU is started and signal to get to next iterations runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { @@ -219,22 +212,12 @@ func TestRampingArrivalRateRunCorrectRateWithSlowRate(t *testing.T) { return nil }) - ctx, cancel, executor, logHook := setupExecutor( - t, &RampingArrivalRateConfig{ - TimeUnit: types.NullDurationFrom(time.Second), - Stages: []Stage{ - { - Duration: types.NullDurationFrom(time.Second * 2), - Target: null.IntFrom(10), - }, - }, - PreAllocatedVUs: null.IntFrom(1), - MaxVUs: null.IntFrom(3), - }, - es, runner) - defer cancel() + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - es.SetInitVUFunc(func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { + test.state.SetInitVUFunc(func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { t.Log("init") cur := atomic.LoadInt64(&count) require.Equal(t, cur, int64(1)) @@ -244,56 +227,50 @@ func TestRampingArrivalRateRunCorrectRateWithSlowRate(t *testing.T) { cur = atomic.LoadInt64(&count) require.NotEqual(t, cur, int64(1)) - idl, idg := es.GetUniqueVUIdentifiers() + idl, idg := test.state.GetUniqueVUIdentifiers() return runner.NewVU(idl, idg, engineOut) }) - err = executor.Run(ctx, engineOut) - assert.NoError(t, err) - assert.Empty(t, logHook.Drain()) - assert.Equal(t, int64(0), es.GetCurrentlyActiveVUsCount()) - assert.Equal(t, int64(2), es.GetInitializedVUsCount()) + assert.NoError(t, test.executor.Run(test.ctx, engineOut)) + assert.Empty(t, test.logHook.Drain()) + assert.Equal(t, int64(0), test.state.GetCurrentlyActiveVUsCount()) + assert.Equal(t, int64(2), test.state.GetInitializedVUsCount()) } func TestRampingArrivalRateRunGracefulStop(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 10) + + config := &RampingArrivalRateConfig{ + TimeUnit: types.NullDurationFrom(1 * time.Second), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(2 * time.Second), + Target: null.IntFrom(10), + }, + }, + StartRate: null.IntFrom(10), + PreAllocatedVUs: null.IntFrom(10), + MaxVUs: null.IntFrom(10), + BaseConfig: BaseConfig{ + GracefulStop: types.NullDurationFrom(5 * time.Second), + }, + } runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { time.Sleep(5 * time.Second) return nil }) - ctx, cancel, executor, _ := setupExecutor( - t, &RampingArrivalRateConfig{ - TimeUnit: types.NullDurationFrom(1 * time.Second), - Stages: []Stage{ - { - Duration: types.NullDurationFrom(2 * time.Second), - Target: null.IntFrom(10), - }, - }, - StartRate: null.IntFrom(10), - PreAllocatedVUs: null.IntFrom(10), - MaxVUs: null.IntFrom(10), - BaseConfig: BaseConfig{ - GracefulStop: types.NullDurationFrom(5 * time.Second), - }, - }, - es, runner) - defer cancel() + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() engineOut := make(chan metrics.SampleContainer, 1000) defer close(engineOut) - err = executor.Run(ctx, engineOut) - assert.NoError(t, err) - assert.Equal(t, int64(0), es.GetCurrentlyActiveVUsCount()) - assert.Equal(t, int64(10), es.GetInitializedVUsCount()) - assert.Equal(t, uint64(10), es.GetFullIterationCount()) + assert.NoError(t, test.executor.Run(test.ctx, engineOut)) + assert.Equal(t, int64(0), test.state.GetCurrentlyActiveVUsCount()) + assert.Equal(t, int64(10), test.state.GetInitializedVUsCount()) + assert.Equal(t, uint64(10), test.state.GetFullIterationCount()) } func BenchmarkRampingArrivalRateRun(b *testing.B) { @@ -316,19 +293,18 @@ func BenchmarkRampingArrivalRateRun(b *testing.B) { } }() - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState( - lib.Options{}, mustNewExecutionTuple(nil, nil), builtinMetrics, - uint64(tc.prealloc.Int64), uint64(tc.prealloc.Int64), - ) - var count int64 runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { atomic.AddInt64(&count, 1) return nil }) + testRunState := getTestRunState(b, lib.Options{}, runner) + es := lib.NewExecutionState( + testRunState, mustNewExecutionTuple(nil, nil), + uint64(tc.prealloc.Int64), uint64(tc.prealloc.Int64), + ) + // an high target to get the highest rate target := int64(1e9) @@ -347,8 +323,7 @@ func BenchmarkRampingArrivalRateRun(b *testing.B) { }, PreAllocatedVUs: tc.prealloc, MaxVUs: tc.prealloc, - }, - es, runner) + }, es) defer cancel() b.ResetTimer() @@ -740,32 +715,21 @@ func TestRampingArrivalRateGlobalIters(t *testing.T) { tc := tc t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { t.Parallel() - ess, err := lib.NewExecutionSegmentSequenceFromString(tc.seq) - require.NoError(t, err) - seg, err := lib.NewExecutionSegmentFromString(tc.seg) - require.NoError(t, err) - et, err := lib.NewExecutionTuple(seg, &ess) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 5, 5) - - runner := &minirunner.MiniRunner{} - ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) - defer cancel() gotIters := []uint64{} var mx sync.Mutex - runner.Fn = func(ctx context.Context, state *lib.State, _ chan<- metrics.SampleContainer) error { + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { mx.Lock() gotIters = append(gotIters, state.GetScenarioGlobalVUIter()) mx.Unlock() return nil - } + }) + + test := setupExecutorTest(t, tc.seg, tc.seq, lib.Options{}, runner, config) + defer test.cancel() engineOut := make(chan metrics.SampleContainer, 100) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) assert.Equal(t, tc.expIters, gotIters) }) } @@ -787,13 +751,6 @@ func TestRampingArrivalRateCornerCase(t *testing.T) { et, err := lib.NewExecutionTuple(newExecutionSegmentFromString("1/5:2/5"), newExecutionSegmentSequenceFromString("0,1/5,2/5,1")) require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - - executor, err := config.NewExecutor(es, nil) - require.NoError(t, err) - require.False(t, executor.GetConfig().HasWork(et)) + require.False(t, config.HasWork(et)) } diff --git a/lib/executor/ramping_vus_test.go b/lib/executor/ramping_vus_test.go index bd38bafe965..fbf30495a94 100644 --- a/lib/executor/ramping_vus_test.go +++ b/lib/executor/ramping_vus_test.go @@ -82,21 +82,18 @@ func TestRampingVUsRun(t *testing.T) { } var iterCount int64 - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - // Sleeping for a weird duration somewhat offset from the - // executor ticks to hopefully keep race conditions out of - // our control from failing the test. - time.Sleep(300 * time.Millisecond) - atomic.AddInt64(&iterCount, 1) - return nil - }), - ) - defer cancel() + + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + // Sleeping for a weird duration somewhat offset from the + // executor ticks to hopefully keep race conditions out of + // our control from failing the test. + time.Sleep(300 * time.Millisecond) + atomic.AddInt64(&iterCount, 1) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() sampleTimes := []time.Duration{ 500 * time.Millisecond, @@ -105,12 +102,12 @@ func TestRampingVUsRun(t *testing.T) { } errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil) }() + go func() { errCh <- test.executor.Run(test.ctx, nil) }() result := make([]int64, len(sampleTimes)) for i, d := range sampleTimes { time.Sleep(d) - result[i] = es.GetCurrentlyActiveVUsCount() + result[i] = test.state.GetCurrentlyActiveVUsCount() } require.NoError(t, <-errCh) @@ -139,25 +136,22 @@ func TestRampingVUsGracefulStopWaits(t *testing.T) { stop = make(chan struct{}) // the itearation should stop ) - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - close(started) - defer close(stopped) - select { - case <-ctx.Done(): - t.Fatal("The iterations should've ended before the context") - case <-stop: - } - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + close(started) + defer close(stopped) + select { + case <-ctx.Done(): + t.Fatal("The iterations should've ended before the context") + case <-stop: + } + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil) }() + go func() { errCh <- test.executor.Run(test.ctx, nil) }() <-started // 500 milliseconds more then the duration and 500 less then the gracefulStop @@ -188,25 +182,22 @@ func TestRampingVUsGracefulStopStops(t *testing.T) { stop = make(chan struct{}) // the itearation should stop ) - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - close(started) - defer close(stopped) - select { - case <-ctx.Done(): - case <-stop: - t.Fatal("The iterations shouldn't have ended before the context") - } - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + close(started) + defer close(stopped) + select { + case <-ctx.Done(): + case <-stop: + t.Fatal("The iterations shouldn't have ended before the context") + } + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil) }() + go func() { errCh <- test.executor.Run(test.ctx, nil) }() <-started // 500 milliseconds more then the gracefulStop + duration @@ -242,29 +233,26 @@ func TestRampingVUsGracefulRampDown(t *testing.T) { stop = make(chan struct{}) // the itearation should stop ) - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, state *lib.State) error { - if state.VUID == 1 { // the first VU will wait here to do stuff - close(started) - defer close(stopped) - select { - case <-ctx.Done(): - t.Fatal("The iterations can't have ended before the context") - case <-stop: - } - } else { // all other (1) VUs will just sleep long enough - time.Sleep(2500 * time.Millisecond) + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { + if state.VUID == 1 { // the first VU will wait here to do stuff + close(started) + defer close(stopped) + select { + case <-ctx.Done(): + t.Fatal("The iterations can't have ended before the context") + case <-stop: } - return nil - }), - ) - defer cancel() + } else { // all other (1) VUs will just sleep long enough + time.Sleep(2500 * time.Millisecond) + } + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil) }() + go func() { errCh <- test.executor.Run(test.ctx, nil) }() <-started // 500 milliseconds more then the gracefulRampDown + duration @@ -333,7 +321,7 @@ func TestRampingVUsHandleRemainingVUs(t *testing.T) { gotVuInterrupted uint32 gotVuFinished uint32 ) - iteration := func(ctx context.Context, _ *lib.State) error { + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { select { case <-time.After(vuSleepDuration): atomic.AddUint32(&gotVuFinished, 1) @@ -341,19 +329,14 @@ func TestRampingVUsHandleRemainingVUs(t *testing.T) { atomic.AddUint32(&gotVuInterrupted, 1) } return nil - } + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, cfg) + defer test.cancel() // run the executor: this should finish in ~70ms // sum(stages) + GracefulRampDown - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - ctx, cancel, executor, _ := setupExecutor( - t, cfg, - lib.NewExecutionState(lib.Options{}, et, nil, maxVus, maxVus), - simpleRunner(iteration), - ) - defer cancel() - require.NoError(t, executor.Run(ctx, nil)) + require.NoError(t, test.executor.Run(test.ctx, nil)) assert.Equal(t, wantVuInterrupted, atomic.LoadUint32(&gotVuInterrupted)) assert.Equal(t, wantVuFinished, atomic.LoadUint32(&gotVuFinished)) @@ -380,17 +363,13 @@ func TestRampingVUsRampDownNoWobble(t *testing.T) { }, } - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, nil, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - time.Sleep(500 * time.Millisecond) - return nil - }), - ) - defer cancel() + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + time.Sleep(500 * time.Millisecond) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() sampleTimes := []time.Duration{ 100 * time.Millisecond, @@ -400,18 +379,18 @@ func TestRampingVUsRampDownNoWobble(t *testing.T) { rampDownSamples := int((config.Stages[len(config.Stages)-1].Duration.TimeDuration() + config.GracefulRampDown.TimeDuration()) / rampDownSampleTime) errCh := make(chan error) - go func() { errCh <- executor.Run(ctx, nil) }() + go func() { errCh <- test.executor.Run(test.ctx, nil) }() result := make([]int64, len(sampleTimes)+rampDownSamples) for i, d := range sampleTimes { time.Sleep(d) - result[i] = es.GetCurrentlyActiveVUsCount() + result[i] = test.state.GetCurrentlyActiveVUsCount() } // Sample ramp-down at a higher rate for i := len(sampleTimes); i < rampDownSamples; i++ { time.Sleep(rampDownSampleTime) - result[i] = es.GetCurrentlyActiveVUsCount() + result[i] = test.state.GetCurrentlyActiveVUsCount() } require.NoError(t, <-errCh) diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go index cb419e2f3d2..d6d7986c5fa 100644 --- a/lib/executor/shared_iterations.go +++ b/lib/executor/shared_iterations.go @@ -238,7 +238,7 @@ func (si SharedIterations) Run(parentCtx context.Context, out chan<- metrics.Sam if attemptedIters < totalIters { metrics.PushIfNotDone(parentCtx, out, metrics.Sample{ Value: float64(totalIters - attemptedIters), - Metric: si.executionState.BuiltinMetrics.DroppedIterations, + Metric: si.executionState.Test.BuiltinMetrics.DroppedIterations, Tags: si.getMetricTags(nil), Time: time.Now(), }) } diff --git a/lib/executor/shared_iterations_test.go b/lib/executor/shared_iterations_test.go index f4e65f6aa6a..d3c5d6b3402 100644 --- a/lib/executor/shared_iterations_test.go +++ b/lib/executor/shared_iterations_test.go @@ -34,7 +34,6 @@ import ( "gopkg.in/guregu/null.v3" "go.k6.io/k6/lib" - "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" "go.k6.io/k6/metrics" ) @@ -51,21 +50,16 @@ func getTestSharedIterationsConfig() SharedIterationsConfig { func TestSharedIterationsRun(t *testing.T) { t.Parallel() var doneIters uint64 - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, getTestSharedIterationsConfig(), es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - atomic.AddUint64(&doneIters, 1) - return nil - }), - ) - defer cancel() - err = executor.Run(ctx, nil) - require.NoError(t, err) + + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + atomic.AddUint64(&doneIters, 1) + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestSharedIterationsConfig()) + defer test.cancel() + + require.NoError(t, test.executor.Run(test.ctx, nil)) assert.Equal(t, uint64(100), doneIters) } @@ -77,31 +71,26 @@ func TestSharedIterationsRunVariableVU(t *testing.T) { result sync.Map slowVUID uint64 ) - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, _ := setupExecutor( - t, getTestSharedIterationsConfig(), es, - simpleRunner(func(ctx context.Context, state *lib.State) error { - time.Sleep(10 * time.Millisecond) // small wait to stabilize the test - // Pick one VU randomly and always slow it down. - sid := atomic.LoadUint64(&slowVUID) - if sid == uint64(0) { - atomic.StoreUint64(&slowVUID, state.VUID) - } - if sid == state.VUID { - time.Sleep(200 * time.Millisecond) - } - currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) - result.Store(state.VUID, currIter.(uint64)+1) - return nil - }), - ) - defer cancel() - err = executor.Run(ctx, nil) - require.NoError(t, err) + + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { + time.Sleep(10 * time.Millisecond) // small wait to stabilize the test + // Pick one VU randomly and always slow it down. + sid := atomic.LoadUint64(&slowVUID) + if sid == uint64(0) { + atomic.StoreUint64(&slowVUID, state.VUID) + } + if sid == state.VUID { + time.Sleep(200 * time.Millisecond) + } + currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) + result.Store(state.VUID, currIter.(uint64)+1) //nolint:forcetypeassert + return nil + }) + + test := setupExecutorTest(t, "", "", lib.Options{}, runner, getTestSharedIterationsConfig()) + defer test.cancel() + + require.NoError(t, test.executor.Run(test.ctx, nil)) var totalIters uint64 result.Range(func(key, value interface{}) bool { @@ -120,8 +109,12 @@ func TestSharedIterationsRunVariableVU(t *testing.T) { func TestSharedIterationsEmitDroppedIterations(t *testing.T) { t.Parallel() var count int64 - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) + + runner := simpleRunner(func(ctx context.Context, _ *lib.State) error { + atomic.AddInt64(&count, 1) + <-ctx.Done() + return nil + }) config := &SharedIterationsConfig{ VUs: null.IntFrom(5), @@ -129,22 +122,12 @@ func TestSharedIterationsEmitDroppedIterations(t *testing.T) { MaxDuration: types.NullDurationFrom(1 * time.Second), } - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 10, 50) - ctx, cancel, executor, logHook := setupExecutor( - t, config, es, - simpleRunner(func(ctx context.Context, _ *lib.State) error { - atomic.AddInt64(&count, 1) - <-ctx.Done() - return nil - }), - ) - defer cancel() + test := setupExecutorTest(t, "", "", lib.Options{}, runner, config) + defer test.cancel() + engineOut := make(chan metrics.SampleContainer, 1000) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) - assert.Empty(t, logHook.Drain()) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) + assert.Empty(t, test.logHook.Drain()) assert.Equal(t, int64(5), count) assert.Equal(t, float64(95), sumMetricValues(engineOut, metrics.DroppedIterationsName)) } @@ -171,32 +154,21 @@ func TestSharedIterationsGlobalIters(t *testing.T) { tc := tc t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { t.Parallel() - ess, err := lib.NewExecutionSegmentSequenceFromString(tc.seq) - require.NoError(t, err) - seg, err := lib.NewExecutionSegmentFromString(tc.seg) - require.NoError(t, err) - et, err := lib.NewExecutionTuple(seg, &ess) - require.NoError(t, err) - registry := metrics.NewRegistry() - builtinMetrics := metrics.RegisterBuiltinMetrics(registry) - es := lib.NewExecutionState(lib.Options{}, et, builtinMetrics, 5, 5) - - runner := &minirunner.MiniRunner{} - ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) - defer cancel() gotIters := []uint64{} var mx sync.Mutex - runner.Fn = func(ctx context.Context, state *lib.State, _ chan<- metrics.SampleContainer) error { + runner := simpleRunner(func(ctx context.Context, state *lib.State) error { mx.Lock() gotIters = append(gotIters, state.GetScenarioGlobalVUIter()) mx.Unlock() return nil - } + }) + + test := setupExecutorTest(t, tc.seg, tc.seq, lib.Options{}, runner, config) + defer test.cancel() engineOut := make(chan metrics.SampleContainer, 100) - err = executor.Run(ctx, engineOut) - require.NoError(t, err) + require.NoError(t, test.executor.Run(test.ctx, engineOut)) sort.Slice(gotIters, func(i, j int) bool { return gotIters[i] < gotIters[j] }) assert.Equal(t, tc.expIters, gotIters) }) From 69a0b771db29f71176525675465262c2bfbdc133 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Mon, 1 Aug 2022 12:36:11 +0300 Subject: [PATCH 09/12] Simplify the MetricsEngine by just passing it the lib.ExecutionState --- core/engine.go | 4 +--- metrics/engine/engine.go | 31 +++++++++++-------------------- 2 files changed, 12 insertions(+), 23 deletions(-) diff --git a/core/engine.go b/core/engine.go index 183e79bd942..3e852422a4c 100644 --- a/core/engine.go +++ b/core/engine.go @@ -86,9 +86,7 @@ func NewEngine(testState *lib.TestRunState, ex lib.ExecutionScheduler, outputs [ logger: testState.Logger.WithField("component", "engine"), } - me, err := engine.NewMetricsEngine( - testState.Registry, ex.GetState(), testState.Options, testState.RuntimeOptions, testState.Logger, - ) + me, err := engine.NewMetricsEngine(ex.GetState()) if err != nil { return nil, err } diff --git a/metrics/engine/engine.go b/metrics/engine/engine.go index 080070c4d37..f7ac6d7403f 100644 --- a/metrics/engine/engine.go +++ b/metrics/engine/engine.go @@ -18,11 +18,8 @@ import ( // aggregated metric sample values. They are used to generate the end-of-test // summary and to evaluate the test thresholds. type MetricsEngine struct { - registry *metrics.Registry - executionState *lib.ExecutionState - options lib.Options - runtimeOptions lib.RuntimeOptions - logger logrus.FieldLogger + es *lib.ExecutionState + logger logrus.FieldLogger // These can be both top-level metrics or sub-metrics metricsWithThresholds []*metrics.Metric @@ -37,21 +34,15 @@ type MetricsEngine struct { } // NewMetricsEngine creates a new metrics Engine with the given parameters. -func NewMetricsEngine( - registry *metrics.Registry, executionState *lib.ExecutionState, - opts lib.Options, rtOpts lib.RuntimeOptions, logger logrus.FieldLogger, -) (*MetricsEngine, error) { +func NewMetricsEngine(es *lib.ExecutionState) (*MetricsEngine, error) { me := &MetricsEngine{ - registry: registry, - executionState: executionState, - options: opts, - runtimeOptions: rtOpts, - logger: logger.WithField("component", "metrics-engine"), + es: es, + logger: es.Test.Logger.WithField("component", "metrics-engine"), ObservedMetrics: make(map[string]*metrics.Metric), } - if !(me.runtimeOptions.NoSummary.Bool && me.runtimeOptions.NoThresholds.Bool) { + if !(me.es.Test.RuntimeOptions.NoSummary.Bool && me.es.Test.RuntimeOptions.NoThresholds.Bool) { err := me.initSubMetricsAndThresholds() if err != nil { return nil, err @@ -74,7 +65,7 @@ func (me *MetricsEngine) getThresholdMetricOrSubmetric(name string) (*metrics.Me // TODO: replace with strings.Cut after Go 1.18 nameParts := strings.SplitN(name, "{", 2) - metric := me.registry.Get(nameParts[0]) + metric := me.es.Test.Registry.Get(nameParts[0]) if metric == nil { return nil, fmt.Errorf("metric '%s' does not exist in the script", nameParts[0]) } @@ -136,10 +127,10 @@ func (me *MetricsEngine) markObserved(metric *metrics.Metric) { } func (me *MetricsEngine) initSubMetricsAndThresholds() error { - for metricName, thresholds := range me.options.Thresholds { + for metricName, thresholds := range me.es.Test.Options.Thresholds { metric, err := me.getThresholdMetricOrSubmetric(metricName) - if me.runtimeOptions.NoThresholds.Bool { + if me.es.Test.RuntimeOptions.NoThresholds.Bool { if err != nil { me.logger.WithError(err).Warnf("Invalid metric '%s' in threshold definitions", metricName) } @@ -164,7 +155,7 @@ func (me *MetricsEngine) initSubMetricsAndThresholds() error { // TODO: refactor out of here when https://github.com/grafana/k6/issues/1321 // lands and there is a better way to enable a metric with tag - if me.options.SystemTags.Has(metrics.TagExpectedResponse) { + if me.es.Test.Options.SystemTags.Has(metrics.TagExpectedResponse) { _, err := me.getThresholdMetricOrSubmetric("http_req_duration{expected_response:true}") if err != nil { return err // shouldn't happen, but ¯\_(ツ)_/¯ @@ -181,7 +172,7 @@ func (me *MetricsEngine) EvaluateThresholds(ignoreEmptySinks bool) (thresholdsTa me.MetricsLock.Lock() defer me.MetricsLock.Unlock() - t := me.executionState.GetCurrentTestRunDuration() + t := me.es.GetCurrentTestRunDuration() for _, m := range me.metricsWithThresholds { // If either the metric has no thresholds defined, or its sinks From fe9f70ab769b0caeb31b07b86d95dd2b372ef436 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Mon, 1 Aug 2022 14:37:44 +0300 Subject: [PATCH 10/12] Harden test helper type assertions --- js/bundle_test.go | 2 ++ js/console_test.go | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/js/bundle_test.go b/js/bundle_test.go index f645e0b719f..645b338f5cf 100644 --- a/js/bundle_test.go +++ b/js/bundle_test.go @@ -78,6 +78,8 @@ func getSimpleBundle(tb testing.TB, filename, data string, opts ...interface{}) rtOpts = &opt case *logrus.Logger: logger = opt + default: + tb.Fatalf("unknown test option %q", opt) } } diff --git a/js/console_test.go b/js/console_test.go index 4521079cacc..cdb5760a491 100644 --- a/js/console_test.go +++ b/js/console_test.go @@ -58,6 +58,8 @@ func getSimpleRunner(tb testing.TB, filename, data string, opts ...interface{}) rtOpts = opt case *logrus.Logger: logger = opt + default: + tb.Fatalf("unknown test option %q", opt) } } registry := metrics.NewRegistry() @@ -83,8 +85,9 @@ func extractLogger(fl logrus.FieldLogger) *logrus.Logger { return e.Logger case *logrus.Logger: return e + default: + panic(fmt.Sprintf("unknown logrus.FieldLogger option %q", fl)) } - return nil } func TestConsoleLogWithGojaNativeObject(t *testing.T) { From ec45155e8a195a1dd3e97f2a154768125d33574a Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Tue, 2 Aug 2022 12:00:07 +0300 Subject: [PATCH 11/12] Simplify the ExecutionScheduler struct --- core/local/local.go | 40 ++++++++++++++++------------------------ 1 file changed, 16 insertions(+), 24 deletions(-) diff --git a/core/local/local.go b/core/local/local.go index 342ebe73be0..c4c01b17829 100644 --- a/core/local/local.go +++ b/core/local/local.go @@ -38,10 +38,6 @@ import ( // ExecutionScheduler is the local implementation of lib.ExecutionScheduler type ExecutionScheduler struct { - runner lib.Runner - options lib.Options - testRunState *lib.TestRunState - initProgress *pb.ProgressBar executorConfigs []lib.ExecutorConfig // sorted by (startTime, ID) executors []lib.Executor // sorted by (startTime, ID), excludes executors with no work @@ -103,10 +99,6 @@ func NewExecutionScheduler(trs *lib.TestRunState) (*ExecutionScheduler, error) { } return &ExecutionScheduler{ - runner: trs.Runner, - options: options, - testRunState: trs, - initProgress: pb.New(pb.WithConstLeft("Init")), executors: executors, executorConfigs: executorConfigs, @@ -121,8 +113,8 @@ func NewExecutionScheduler(trs *lib.TestRunState) (*ExecutionScheduler, error) { } // GetRunner returns the wrapped lib.Runner instance. -func (e *ExecutionScheduler) GetRunner() lib.Runner { - return e.runner +func (e *ExecutionScheduler) GetRunner() lib.Runner { // TODO: remove + return e.state.Test.Runner } // GetState returns a pointer to the execution state struct for the local @@ -168,7 +160,7 @@ func (e *ExecutionScheduler) initVU( // Get the VU IDs here, so that the VUs are (mostly) ordered by their // number in the channel buffer vuIDLocal, vuIDGlobal := e.state.GetUniqueVUIdentifiers() - vu, err := e.runner.NewVU(vuIDLocal, vuIDGlobal, samplesOut) + vu, err := e.state.Test.Runner.NewVU(vuIDLocal, vuIDGlobal, samplesOut) if err != nil { return nil, errext.WithHint(err, fmt.Sprintf("error while initializing VU #%d", vuIDGlobal)) } @@ -231,7 +223,7 @@ func (e *ExecutionScheduler) initVUsConcurrently( } func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- metrics.SampleContainer) { - e.testRunState.Logger.Debug("Starting emission of VUs and VUsMax metrics...") + e.state.Test.Logger.Debug("Starting emission of VUs and VUsMax metrics...") emitMetrics := func() { t := time.Now() @@ -241,15 +233,15 @@ func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- me Time: t, Metric: e.state.Test.BuiltinMetrics.VUs, Value: float64(e.state.GetCurrentlyActiveVUsCount()), - Tags: e.options.RunTags, + Tags: e.state.Test.Options.RunTags, }, { Time: t, Metric: e.state.Test.BuiltinMetrics.VUsMax, Value: float64(e.state.GetInitializedVUsCount()), - Tags: e.options.RunTags, + Tags: e.state.Test.Options.RunTags, }, }, - Tags: e.options.RunTags, + Tags: e.state.Test.Options.RunTags, Time: t, } metrics.PushIfNotDone(ctx, out, samples) @@ -259,7 +251,7 @@ func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- me go func() { defer func() { ticker.Stop() - e.testRunState.Logger.Debug("Metrics emission of VUs and VUsMax metrics stopped") + e.state.Test.Logger.Debug("Metrics emission of VUs and VUsMax metrics stopped") close(e.vusEmissionStopped) }() @@ -281,7 +273,7 @@ func (e *ExecutionScheduler) emitVUsAndVUsMax(ctx context.Context, out chan<- me func (e *ExecutionScheduler) Init(ctx context.Context, samplesOut chan<- metrics.SampleContainer) error { e.emitVUsAndVUsMax(ctx, samplesOut) - logger := e.testRunState.Logger.WithField("phase", "local-execution-scheduler-init") + logger := e.state.Test.Logger.WithField("phase", "local-execution-scheduler-init") vusToInitialize := lib.GetMaxPlannedVUs(e.executionPlan) logger.WithFields(logrus.Fields{ "neededVUs": vusToInitialize, @@ -348,7 +340,7 @@ func (e *ExecutionScheduler) runExecutor( ) { executorConfig := executor.GetConfig() executorStartTime := executorConfig.GetStartTime() - executorLogger := e.testRunState.Logger.WithFields(logrus.Fields{ + executorLogger := e.state.Test.Logger.WithFields(logrus.Fields{ "executor": executorConfig.GetName(), "type": executorConfig.GetType(), "startTime": executorStartTime, @@ -400,7 +392,7 @@ func (e *ExecutionScheduler) Run(globalCtx, runCtx context.Context, engineOut ch }() executorsCount := len(e.executors) - logger := e.testRunState.Logger.WithField("phase", "local-execution-scheduler-run") + logger := e.state.Test.Logger.WithField("phase", "local-execution-scheduler-run") e.initProgress.Modify(pb.WithConstLeft("Run")) var interrupted bool defer func() { @@ -434,11 +426,11 @@ func (e *ExecutionScheduler) Run(globalCtx, runCtx context.Context, engineOut ch defer cancel() // just in case, and to shut up go vet... // Run setup() before any executors, if it's not disabled - if !e.options.NoSetup.Bool { + if !e.state.Test.Options.NoSetup.Bool { logger.Debug("Running setup()") e.state.SetExecutionStatus(lib.ExecutionStatusSetup) e.initProgress.Modify(pb.WithConstProgress(1, "setup()")) - if err := e.runner.Setup(runSubCtx, engineOut); err != nil { + if err := e.state.Test.Runner.Setup(runSubCtx, engineOut); err != nil { logger.WithField("error", err).Debug("setup() aborted by error") return err } @@ -470,14 +462,14 @@ func (e *ExecutionScheduler) Run(globalCtx, runCtx context.Context, engineOut ch } // Run teardown() after all executors are done, if it's not disabled - if !e.options.NoTeardown.Bool { + if !e.state.Test.Options.NoTeardown.Bool { logger.Debug("Running teardown()") e.state.SetExecutionStatus(lib.ExecutionStatusTeardown) e.initProgress.Modify(pb.WithConstProgress(1, "teardown()")) // We run teardown() with the global context, so it isn't interrupted by // aborts caused by thresholds or even Ctrl+C (unless used twice). - if err := e.runner.Teardown(globalCtx, engineOut); err != nil { + if err := e.state.Test.Runner.Teardown(globalCtx, engineOut); err != nil { logger.WithField("error", err).Debug("teardown() aborted by error") return err } @@ -497,7 +489,7 @@ func (e *ExecutionScheduler) SetPaused(pause bool) error { if pause { return fmt.Errorf("execution is already paused") } - e.testRunState.Logger.Debug("Starting execution") + e.state.Test.Logger.Debug("Starting execution") return e.state.Resume() } From a75766b398ef3ed87628362e22f715a50d70a525 Mon Sep 17 00:00:00 2001 From: Nedyalko Andreev Date: Tue, 2 Aug 2022 12:26:39 +0300 Subject: [PATCH 12/12] Restrict TestPreInitState.KeyLogger back to just an io.Writer --- cmd/run.go | 4 ++-- cmd/test_load.go | 16 ++++++---------- lib/test_state.go | 2 +- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index c3b21656273..ddaffd2e65d 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -266,8 +266,8 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) error { logger.Debug("Waiting for engine processes to finish...") engineWait() logger.Debug("Everything has finished, exiting k6!") - if testRunState.KeyLogger != nil { - if err := testRunState.KeyLogger.Close(); err != nil { + if test.keyLogger != nil { + if err := test.keyLogger.Close(); err != nil { logger.WithError(err).Warn("Error while closing the SSLKEYLOGFILE") } } diff --git a/cmd/test_load.go b/cmd/test_load.go index ccfd78aa0c7..0b2c2d3abb4 100644 --- a/cmd/test_load.go +++ b/cmd/test_load.go @@ -35,6 +35,7 @@ type loadedTest struct { fileSystems map[string]afero.Fs preInitState *lib.TestPreInitState initRunner lib.Runner // TODO: rename to something more appropriate + keyLogger io.Closer } func loadTest(gs *globalState, cmd *cobra.Command, args []string) (*loadedTest, error) { @@ -109,7 +110,8 @@ func (lt *loadedTest) initializeFirstRunner(gs *globalState) error { if err != nil { return fmt.Errorf("couldn't get absolute path for keylog file: %w", err) } - lt.preInitState.KeyLogger = &syncWriteCloser{w: f} + lt.keyLogger = f + lt.preInitState.KeyLogger = &syncWriter{w: f} } switch testType { case testTypeJS: @@ -255,19 +257,13 @@ func (lct *loadedAndConfiguredTest) buildTestRunState( }, nil } -type syncWriteCloser struct { - w io.WriteCloser +type syncWriter struct { + w io.Writer m sync.Mutex } -func (cw *syncWriteCloser) Write(b []byte) (int, error) { +func (cw *syncWriter) Write(b []byte) (int, error) { cw.m.Lock() defer cw.m.Unlock() return cw.w.Write(b) } - -func (cw *syncWriteCloser) Close() error { - cw.m.Lock() - defer cw.m.Unlock() - return cw.w.Close() -} diff --git a/lib/test_state.go b/lib/test_state.go index 57c110ef150..94fc56382f5 100644 --- a/lib/test_state.go +++ b/lib/test_state.go @@ -13,7 +13,7 @@ type TestPreInitState struct { RuntimeOptions RuntimeOptions Registry *metrics.Registry BuiltinMetrics *metrics.BuiltinMetrics - KeyLogger io.WriteCloser + KeyLogger io.Writer // TODO: replace with logrus.FieldLogger when all of the tests can be fixed Logger *logrus.Logger