From 052bdf19fa0f3096eb492c1bcb96837a58c26b04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Fri, 18 Jun 2021 13:12:04 +0200 Subject: [PATCH 01/22] Embed ScenarioState in execution context This will be used by the k6/execution module. --- lib/context.go | 15 ++++++ lib/executor/constant_arrival_rate.go | 56 +++++++++++--------- lib/executor/constant_vus.go | 8 +++ lib/executor/externally_controlled.go | 7 +++ lib/executor/per_vu_iterations.go | 8 +++ lib/executor/ramping_arrival_rate.go | 73 ++++++++++++++------------- lib/executor/ramping_vus.go | 6 +++ lib/executor/shared_iterations.go | 8 +++ lib/executor/vu_handle_test.go | 1 + lib/executors.go | 8 +++ 10 files changed, 131 insertions(+), 59 deletions(-) diff --git a/lib/context.go b/lib/context.go index a7544e4de4b..97681f1cb75 100644 --- a/lib/context.go +++ b/lib/context.go @@ -26,6 +26,7 @@ type ctxKey int const ( ctxKeyState ctxKey = iota + ctxKeyScenario ) func WithState(ctx context.Context, state *State) context.Context { @@ -39,3 +40,17 @@ func GetState(ctx context.Context) *State { } return v.(*State) } + +// WithScenarioState embeds a ScenarioState in ctx. +func WithScenarioState(ctx context.Context, s *ScenarioState) context.Context { + return context.WithValue(ctx, ctxKeyScenario, s) +} + +// GetScenarioState returns a ScenarioState from ctx. +func GetScenarioState(ctx context.Context) *ScenarioState { + v := ctx.Value(ctxKeyScenario) + if v == nil { + return nil + } + return v.(*ScenarioState) +} diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go index 7acfd9db206..3dfaf65519c 100644 --- a/lib/executor/constant_arrival_rate.go +++ b/lib/executor/constant_arrival_rate.go @@ -243,11 +243,41 @@ func (car ConstantArrivalRate) Run(parentCtx context.Context, out chan<- stats.S }() activeVUsCount := uint64(0) + vusFmt := pb.GetFixedLengthIntFormat(maxVUs) + progIters := fmt.Sprintf( + pb.GetFixedLengthFloatFormat(arrivalRatePerSec, 0)+" iters/s", arrivalRatePerSec) + progressFn := func() (float64, []string) { + spent := time.Since(startTime) + currActiveVUs := atomic.LoadUint64(&activeVUsCount) + progVUs := fmt.Sprintf(vusFmt+"/"+vusFmt+" VUs", + vusPool.Running(), currActiveVUs) + + right := []string{progVUs, duration.String(), progIters} + + if spent > duration { + return 1, right + } + + spentDuration := pb.GetFixedLengthDuration(spent, duration) + progDur := fmt.Sprintf("%s/%s", spentDuration, duration) + right[1] = progDur + + return math.Min(1, float64(spent)/float64(duration)), right + } + car.progress.Modify(pb.WithProgress(progressFn)) + go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, &car, progressFn) + + maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ + Name: car.config.Name, + Executor: car.config.Type, + StartTime: startTime, + ProgressFn: progressFn, + }) + returnVU := func(u lib.InitializedVU) { car.executionState.ReturnVU(u, true) activeVUsWg.Done() } - runIterationBasic := getIterationRunner(car.executionState, car.logger) activateVU := func(initVU lib.InitializedVU) lib.ActiveVU { activeVUsWg.Add(1) activeVU := initVU.Activate(getVUActivationParams(maxDurationCtx, car.config.BaseConfig, returnVU)) @@ -284,30 +314,6 @@ func (car ConstantArrivalRate) Run(parentCtx context.Context, out chan<- stats.S activateVU(initVU) } - vusFmt := pb.GetFixedLengthIntFormat(maxVUs) - progIters := fmt.Sprintf( - pb.GetFixedLengthFloatFormat(arrivalRatePerSec, 0)+" iters/s", arrivalRatePerSec) - progressFn := func() (float64, []string) { - spent := time.Since(startTime) - currActiveVUs := atomic.LoadUint64(&activeVUsCount) - progVUs := fmt.Sprintf(vusFmt+"/"+vusFmt+" VUs", - vusPool.Running(), currActiveVUs) - - right := []string{progVUs, duration.String(), progIters} - - if spent > duration { - return 1, right - } - - spentDuration := pb.GetFixedLengthDuration(spent, duration) - progDur := fmt.Sprintf("%s/%s", spentDuration, duration) - right[1] = progDur - - return math.Min(1, float64(spent)/float64(duration)), right - } - car.progress.Modify(pb.WithProgress(progressFn)) - go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, &car, progressFn) - start, offsets, _ := car.et.GetStripedOffsets() timer := time.NewTimer(time.Hour * 24) // here the we need the not scaled one diff --git a/lib/executor/constant_vus.go b/lib/executor/constant_vus.go index 33e872ff5ed..04c2ed3f0e4 100644 --- a/lib/executor/constant_vus.go +++ b/lib/executor/constant_vus.go @@ -176,10 +176,18 @@ func (clv ConstantVUs) Run(parentCtx context.Context, out chan<- stats.SampleCon regDurationDone := regDurationCtx.Done() runIteration := getIterationRunner(clv.executionState, clv.logger) + maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ + Name: clv.config.Name, + Executor: clv.config.Type, + StartTime: startTime, + ProgressFn: progressFn, + }) + returnVU := func(u lib.InitializedVU) { clv.executionState.ReturnVU(u, true) activeVUs.Done() } + handleVU := func(initVU lib.InitializedVU) { ctx, cancel := context.WithCancel(maxDurationCtx) defer cancel() diff --git a/lib/executor/externally_controlled.go b/lib/executor/externally_controlled.go index 270a16cc6a4..0383ade0193 100644 --- a/lib/executor/externally_controlled.go +++ b/lib/executor/externally_controlled.go @@ -536,6 +536,13 @@ func (mex *ExternallyControlled) Run(parentCtx context.Context, out chan<- stats return err } + ctx = lib.WithScenarioState(ctx, &lib.ScenarioState{ + Name: mex.config.Name, + Executor: mex.config.Type, + StartTime: time.Now(), + ProgressFn: runState.progressFn, + }) + mex.progress.Modify(pb.WithProgress(runState.progressFn)) // Keep track of the progress go trackProgress(parentCtx, ctx, ctx, mex, runState.progressFn) diff --git a/lib/executor/per_vu_iterations.go b/lib/executor/per_vu_iterations.go index 26cb6bd14d8..dc0d733c67e 100644 --- a/lib/executor/per_vu_iterations.go +++ b/lib/executor/per_vu_iterations.go @@ -200,10 +200,18 @@ func (pvi PerVUIterations) Run(parentCtx context.Context, out chan<- stats.Sampl regDurationDone := regDurationCtx.Done() runIteration := getIterationRunner(pvi.executionState, pvi.logger) + maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ + Name: pvi.config.Name, + Executor: pvi.config.Type, + StartTime: startTime, + ProgressFn: progressFn, + }) + returnVU := func(u lib.InitializedVU) { pvi.executionState.ReturnVU(u, true) activeVUs.Done() } + handleVU := func(initVU lib.InitializedVU) { defer handleVUsWG.Done() ctx, cancel := context.WithCancel(maxDurationCtx) diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index fa4688825a9..ae5a70b16b8 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -346,6 +346,45 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.S }() activeVUsCount := uint64(0) + tickerPeriod := int64(startTickerPeriod.Duration) + vusFmt := pb.GetFixedLengthIntFormat(maxVUs) + itersFmt := pb.GetFixedLengthFloatFormat(maxArrivalRatePerSec, 0) + " iters/s" + + progressFn := func() (float64, []string) { + currActiveVUs := atomic.LoadUint64(&activeVUsCount) + currentTickerPeriod := atomic.LoadInt64(&tickerPeriod) + progVUs := fmt.Sprintf(vusFmt+"/"+vusFmt+" VUs", + vusPool.Running(), currActiveVUs) + + itersPerSec := 0.0 + if currentTickerPeriod > 0 { + itersPerSec = float64(time.Second) / float64(currentTickerPeriod) + } + progIters := fmt.Sprintf(itersFmt, itersPerSec) + + right := []string{progVUs, duration.String(), progIters} + + spent := time.Since(startTime) + if spent > duration { + return 1, right + } + + spentDuration := pb.GetFixedLengthDuration(spent, duration) + progDur := fmt.Sprintf("%s/%s", spentDuration, duration) + right[1] = progDur + + return math.Min(1, float64(spent)/float64(duration)), right + } + + varr.progress.Modify(pb.WithProgress(progressFn)) + go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, varr, progressFn) + + maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ + Name: varr.config.Name, + Executor: varr.config.Type, + StartTime: startTime, + ProgressFn: progressFn, + }) returnVU := func(u lib.InitializedVU) { varr.executionState.ReturnVU(u, true) @@ -391,40 +430,6 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.S activateVU(initVU) } - tickerPeriod := int64(startTickerPeriod.Duration) - - vusFmt := pb.GetFixedLengthIntFormat(maxVUs) - itersFmt := pb.GetFixedLengthFloatFormat(maxArrivalRatePerSec, 0) + " iters/s" - - progressFn := func() (float64, []string) { - currActiveVUs := atomic.LoadUint64(&activeVUsCount) - currentTickerPeriod := atomic.LoadInt64(&tickerPeriod) - progVUs := fmt.Sprintf(vusFmt+"/"+vusFmt+" VUs", - vusPool.Running(), currActiveVUs) - - itersPerSec := 0.0 - if currentTickerPeriod > 0 { - itersPerSec = float64(time.Second) / float64(currentTickerPeriod) - } - progIters := fmt.Sprintf(itersFmt, itersPerSec) - - right := []string{progVUs, duration.String(), progIters} - - spent := time.Since(startTime) - if spent > duration { - return 1, right - } - - spentDuration := pb.GetFixedLengthDuration(spent, duration) - progDur := fmt.Sprintf("%s/%s", spentDuration, duration) - right[1] = progDur - - return math.Min(1, float64(spent)/float64(duration)), right - } - - varr.progress.Modify(pb.WithProgress(progressFn)) - go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, varr, progressFn) - regDurationDone := regDurationCtx.Done() timer := time.NewTimer(time.Hour) start := time.Now() diff --git a/lib/executor/ramping_vus.go b/lib/executor/ramping_vus.go index 52804ca539c..f3d1132015c 100644 --- a/lib/executor/ramping_vus.go +++ b/lib/executor/ramping_vus.go @@ -628,6 +628,12 @@ func (vlv RampingVUs) Run(parentCtx context.Context, out chan<- stats.SampleCont vlv.executionState.ModCurrentlyActiveVUsCount(-1) } + maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ + Name: vlv.config.Name, + Executor: vlv.config.Type, + StartTime: startTime, + ProgressFn: progressFn, + }) vuHandles := make([]*vuHandle, maxVUs) for i := uint64(0); i < maxVUs; i++ { vuHandle := newStoppedVUHandle( diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go index ee0720b39c9..7c60bd83121 100644 --- a/lib/executor/shared_iterations.go +++ b/lib/executor/shared_iterations.go @@ -232,10 +232,18 @@ func (si SharedIterations) Run(parentCtx context.Context, out chan<- stats.Sampl regDurationDone := regDurationCtx.Done() runIteration := getIterationRunner(si.executionState, si.logger) + maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ + Name: si.config.Name, + Executor: si.config.Type, + StartTime: startTime, + ProgressFn: progressFn, + }) + returnVU := func(u lib.InitializedVU) { si.executionState.ReturnVU(u, true) activeVUs.Done() } + handleVU := func(initVU lib.InitializedVU) { ctx, cancel := context.WithCancel(maxDurationCtx) defer cancel() diff --git a/lib/executor/vu_handle_test.go b/lib/executor/vu_handle_test.go index 7afa474a442..02fdff33f75 100644 --- a/lib/executor/vu_handle_test.go +++ b/lib/executor/vu_handle_test.go @@ -10,6 +10,7 @@ import ( "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.k6.io/k6/lib" "go.k6.io/k6/lib/testutils" "go.k6.io/k6/lib/testutils/minirunner" diff --git a/lib/executors.go b/lib/executors.go index 342e88e4208..e8ef95f49aa 100644 --- a/lib/executors.go +++ b/lib/executors.go @@ -109,6 +109,14 @@ type ExecutorConfig interface { HasWork(*ExecutionTuple) bool } +// ScenarioState holds runtime scenario information returned by the k6/execution +// JS module. +type ScenarioState struct { + Name, Executor string + StartTime time.Time + ProgressFn func() (float64, []string) +} + // InitVUFunc is just a shorthand so we don't have to type the function // signature every time. type InitVUFunc func(context.Context, *logrus.Entry) (InitializedVU, error) From 9adf101ccbfbe06bca76dc90386b5dd7f1c9d375 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Fri, 18 Jun 2021 13:18:15 +0200 Subject: [PATCH 02/22] Embed ExecutionState in context This will be used by the k6/execution module. --- core/local/local.go | 1 + lib/context.go | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/core/local/local.go b/core/local/local.go index e3f1980e368..44ac6366e82 100644 --- a/core/local/local.go +++ b/core/local/local.go @@ -362,6 +362,7 @@ func (e *ExecutionScheduler) Run(globalCtx, runCtx context.Context, engineOut ch runResults := make(chan error, executorsCount) // nil values are successful runs + runCtx = lib.WithExecutionState(runCtx, e.state) runSubCtx, cancel := context.WithCancel(runCtx) defer cancel() // just in case, and to shut up go vet... diff --git a/lib/context.go b/lib/context.go index 97681f1cb75..64df24f3942 100644 --- a/lib/context.go +++ b/lib/context.go @@ -26,13 +26,16 @@ type ctxKey int const ( ctxKeyState ctxKey = iota + ctxKeyExecState ctxKeyScenario ) +// WithState embeds a State in ctx. func WithState(ctx context.Context, state *State) context.Context { return context.WithValue(ctx, ctxKeyState, state) } +// GetState returns a State from ctx. func GetState(ctx context.Context) *State { v := ctx.Value(ctxKeyState) if v == nil { @@ -41,6 +44,20 @@ func GetState(ctx context.Context) *State { return v.(*State) } +// WithExecutionState embeds an ExecutionState in ctx. +func WithExecutionState(ctx context.Context, s *ExecutionState) context.Context { + return context.WithValue(ctx, ctxKeyExecState, s) +} + +// GetExecutionState returns an ExecutionState from ctx. +func GetExecutionState(ctx context.Context) *ExecutionState { + v := ctx.Value(ctxKeyExecState) + if v == nil { + return nil + } + return v.(*ExecutionState) +} + // WithScenarioState embeds a ScenarioState in ctx. func WithScenarioState(ctx context.Context, s *ScenarioState) context.Context { return context.WithValue(ctx, ctxKeyScenario, s) From 84c32d412a0491fe56c596abf797c4cc3fe0f0f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Mon, 22 Feb 2021 10:33:13 +0100 Subject: [PATCH 03/22] Use uint64 for VU IDs --- core/local/local.go | 2 +- js/bundle.go | 4 ++-- js/bundle_test.go | 2 +- js/runner.go | 25 +++++++++++------------ js/runner_test.go | 2 +- lib/executor/base_executor.go | 4 ++-- lib/executor/common_test.go | 2 +- lib/executor/per_vu_iterations_test.go | 2 +- lib/executor/ramping_arrival_rate_test.go | 4 ++-- lib/executor/shared_iterations_test.go | 8 ++++---- lib/executor/vu_handle_test.go | 6 +++--- lib/runner.go | 4 ++-- lib/state.go | 2 +- lib/testutils/minirunner/minirunner.go | 13 ++++++------ 14 files changed, 39 insertions(+), 41 deletions(-) diff --git a/core/local/local.go b/core/local/local.go index 44ac6366e82..85cb1e7a497 100644 --- a/core/local/local.go +++ b/core/local/local.go @@ -160,7 +160,7 @@ func (e *ExecutionScheduler) initVU( // Get the VU ID here, so that the VUs are (mostly) ordered by their // number in the channel buffer vuID := e.state.GetUniqueVUIdentifier() - vu, err := e.runner.NewVU(int64(vuID), samplesOut) + vu, err := e.runner.NewVU(vuID, samplesOut) if err != nil { return nil, errext.WithHint(err, fmt.Sprintf("error while initializing VU #%d", vuID)) } diff --git a/js/bundle.go b/js/bundle.go index 6f2f24d391c..a1d10ab160b 100644 --- a/js/bundle.go +++ b/js/bundle.go @@ -236,7 +236,7 @@ func (b *Bundle) getExports(logger logrus.FieldLogger, rt *goja.Runtime, options } // Instantiate creates a new runtime from this bundle. -func (b *Bundle) Instantiate(logger logrus.FieldLogger, vuID int64) (bi *BundleInstance, instErr error) { +func (b *Bundle) Instantiate(logger logrus.FieldLogger, vuID uint64) (bi *BundleInstance, instErr error) { // TODO: actually use a real context here, so that the instantiation can be killed // Placeholder for a real context. ctxPtr := new(context.Context) @@ -283,7 +283,7 @@ func (b *Bundle) Instantiate(logger logrus.FieldLogger, vuID int64) (bi *BundleI // Instantiates the bundle into an existing runtime. Not public because it also messes with a bunch // of other things, will potentially thrash data and makes a mess in it if the operation fails. -func (b *Bundle) instantiate(logger logrus.FieldLogger, rt *goja.Runtime, init *InitContext, vuID int64) error { +func (b *Bundle) instantiate(logger logrus.FieldLogger, rt *goja.Runtime, init *InitContext, vuID uint64) error { rt.SetParserOptions(parser.WithDisableSourceMaps) rt.SetFieldNameMapper(common.FieldNameMapper{}) rt.SetRandSource(common.NewRandSource()) diff --git a/js/bundle_test.go b/js/bundle_test.go index f98fec4416a..5b45633a1f3 100644 --- a/js/bundle_test.go +++ b/js/bundle_test.go @@ -910,7 +910,7 @@ func TestBundleNotSharable(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() for i := 0; i < vus; i++ { - bi, err := b.Instantiate(logger, int64(i)) + bi, err := b.Instantiate(logger, uint64(i)) require.NoError(t, err) for j := 0; j < iters; j++ { bi.Runtime.Set("__ITER", j) diff --git a/js/runner.go b/js/runner.go index 24e89f23ddd..7a04a0473d2 100644 --- a/js/runner.go +++ b/js/runner.go @@ -126,7 +126,7 @@ func (r *Runner) MakeArchive() *lib.Archive { } // NewVU returns a new initialized VU. -func (r *Runner) NewVU(id int64, samplesOut chan<- stats.SampleContainer) (lib.InitializedVU, error) { +func (r *Runner) NewVU(id uint64, samplesOut chan<- stats.SampleContainer) (lib.InitializedVU, error) { vu, err := r.newVU(id, samplesOut) if err != nil { return nil, err @@ -135,7 +135,7 @@ func (r *Runner) NewVU(id int64, samplesOut chan<- stats.SampleContainer) (lib.I } // nolint:funlen -func (r *Runner) newVU(id int64, samplesOut chan<- stats.SampleContainer) (*VU, error) { +func (r *Runner) newVU(id uint64, samplesOut chan<- stats.SampleContainer) (*VU, error) { // Instantiate a new bundle, make a VU out of it. bi, err := r.Bundle.Instantiate(r.Logger, id) if err != nil { @@ -526,13 +526,12 @@ func (r *Runner) getTimeoutFor(stage string) time.Duration { type VU struct { BundleInstance - Runner *Runner - Transport *http.Transport - Dialer *netext.Dialer - CookieJar *cookiejar.Jar - TLSConfig *tls.Config - ID int64 - Iteration int64 + Runner *Runner + Transport *http.Transport + Dialer *netext.Dialer + CookieJar *cookiejar.Jar + TLSConfig *tls.Config + ID, Iteration uint64 Console *console BPool *bpool.BufferPool @@ -558,7 +557,7 @@ type ActiveVU struct { } // GetID returns the unique VU ID. -func (u *VU) GetID() int64 { +func (u *VU) GetID() uint64 { return u.ID } @@ -587,10 +586,10 @@ func (u *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { u.state.Tags[k] = v } if opts.SystemTags.Has(stats.TagVU) { - u.state.Tags["vu"] = strconv.FormatInt(u.ID, 10) + u.state.Tags["vu"] = strconv.FormatUint(u.ID, 10) } if opts.SystemTags.Has(stats.TagIter) { - u.state.Tags["iter"] = strconv.FormatInt(u.Iteration, 10) + u.state.Tags["iter"] = strconv.FormatUint(u.Iteration, 10) } if opts.SystemTags.Has(stats.TagGroup) { u.state.Tags["group"] = u.state.Group.Path @@ -688,7 +687,7 @@ func (u *VU) runFn( opts := &u.Runner.Bundle.Options if opts.SystemTags.Has(stats.TagIter) { - u.state.Tags["iter"] = strconv.FormatInt(u.Iteration, 10) + u.state.Tags["iter"] = strconv.FormatUint(u.Iteration, 10) } // TODO: this seems like the wrong place for the iteration incrementation diff --git a/js/runner_test.go b/js/runner_test.go index cb9e5252ea4..84d9fff35ab 100644 --- a/js/runner_test.go +++ b/js/runner_test.go @@ -1732,7 +1732,7 @@ func TestSystemTags(t *testing.T) { InsecureSkipTLSVerify: null.BoolFrom(true), }))) - vu, err := r.NewVU(int64(num), samples) + vu, err := r.NewVU(uint64(num), samples) require.NoError(t, err) activeVU := vu.Activate(&lib.VUActivationParams{ RunContext: context.Background(), diff --git a/lib/executor/base_executor.go b/lib/executor/base_executor.go index a8f129953df..28b32c4caf0 100644 --- a/lib/executor/base_executor.go +++ b/lib/executor/base_executor.go @@ -78,13 +78,13 @@ func (bs BaseExecutor) GetProgress() *pb.ProgressBar { // getMetricTags returns a tag set that can be used to emit metrics by the // executor. The VU ID is optional. -func (bs BaseExecutor) getMetricTags(vuID *int64) *stats.SampleTags { +func (bs BaseExecutor) getMetricTags(vuID *uint64) *stats.SampleTags { tags := bs.executionState.Options.RunTags.CloneTags() if bs.executionState.Options.SystemTags.Has(stats.TagScenario) { tags["scenario"] = bs.config.GetName() } if vuID != nil && bs.executionState.Options.SystemTags.Has(stats.TagVU) { - tags["vu"] = strconv.FormatInt(*vuID, 10) + tags["vu"] = strconv.FormatUint(*vuID, 10) } return stats.IntoSampleTags(&tags) } diff --git a/lib/executor/common_test.go b/lib/executor/common_test.go index 1764e4dffa0..d4fbcba1575 100644 --- a/lib/executor/common_test.go +++ b/lib/executor/common_test.go @@ -55,7 +55,7 @@ func setupExecutor(t testing.TB, config lib.ExecutorConfig, es *lib.ExecutionSta logEntry := logrus.NewEntry(testLog) initVUFunc := func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { - return runner.NewVU(int64(es.GetUniqueVUIdentifier()), engineOut) + return runner.NewVU(es.GetUniqueVUIdentifier(), engineOut) } es.SetInitVUFunc(initVUFunc) diff --git a/lib/executor/per_vu_iterations_test.go b/lib/executor/per_vu_iterations_test.go index c5dcb324b03..06895035051 100644 --- a/lib/executor/per_vu_iterations_test.go +++ b/lib/executor/per_vu_iterations_test.go @@ -83,7 +83,7 @@ func TestPerVUIterationsRunVariableVU(t *testing.T) { t.Parallel() var ( result sync.Map - slowVUID = int64(1) + slowVUID = uint64(1) ) et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) diff --git a/lib/executor/ramping_arrival_rate_test.go b/lib/executor/ramping_arrival_rate_test.go index e23bfca716d..1c289938793 100644 --- a/lib/executor/ramping_arrival_rate_test.go +++ b/lib/executor/ramping_arrival_rate_test.go @@ -184,7 +184,7 @@ func TestRampingArrivalRateRunUnplannedVUs(t *testing.T) { time.Sleep(time.Millisecond * 200) cur = atomic.LoadInt64(&count) require.NotEqual(t, cur, int64(2)) - return runner.NewVU(int64(es.GetUniqueVUIdentifier()), engineOut) + return runner.NewVU(es.GetUniqueVUIdentifier(), engineOut) }) err = executor.Run(ctx, engineOut) assert.NoError(t, err) @@ -234,7 +234,7 @@ func TestRampingArrivalRateRunCorrectRateWithSlowRate(t *testing.T) { cur = atomic.LoadInt64(&count) require.NotEqual(t, cur, int64(1)) - return runner.NewVU(int64(es.GetUniqueVUIdentifier()), engineOut) + return runner.NewVU(es.GetUniqueVUIdentifier(), engineOut) }) err = executor.Run(ctx, engineOut) assert.NoError(t, err) diff --git a/lib/executor/shared_iterations_test.go b/lib/executor/shared_iterations_test.go index 9f3714d9a6a..8628e96e4b4 100644 --- a/lib/executor/shared_iterations_test.go +++ b/lib/executor/shared_iterations_test.go @@ -71,7 +71,7 @@ func TestSharedIterationsRunVariableVU(t *testing.T) { t.Parallel() var ( result sync.Map - slowVUID int64 + slowVUID uint64 ) et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) @@ -82,9 +82,9 @@ func TestSharedIterationsRunVariableVU(t *testing.T) { time.Sleep(10 * time.Millisecond) // small wait to stabilize the test state := lib.GetState(ctx) // Pick one VU randomly and always slow it down. - sid := atomic.LoadInt64(&slowVUID) - if sid == int64(0) { - atomic.StoreInt64(&slowVUID, state.Vu) + sid := atomic.LoadUint64(&slowVUID) + if sid == uint64(0) { + atomic.StoreUint64(&slowVUID, state.Vu) } if sid == state.Vu { time.Sleep(200 * time.Millisecond) diff --git a/lib/executor/vu_handle_test.go b/lib/executor/vu_handle_test.go index 02fdff33f75..f4c0984bf64 100644 --- a/lib/executor/vu_handle_test.go +++ b/lib/executor/vu_handle_test.go @@ -122,14 +122,14 @@ func TestVUHandleStartStopRace(t *testing.T) { // testLog.Level = logrus.DebugLevel logEntry := logrus.NewEntry(testLog) - var vuID int64 = -1 + var vuID uint64 testIterations := 10000 returned := make(chan struct{}) getVU := func() (lib.InitializedVU, error) { returned = make(chan struct{}) return &minirunner.VU{ - ID: atomic.AddInt64(&vuID, 1), + ID: atomic.AddUint64(&vuID, 1), R: &minirunner.MiniRunner{ Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { // TODO: do something @@ -140,7 +140,7 @@ func TestVUHandleStartStopRace(t *testing.T) { } returnVU := func(v lib.InitializedVU) { - require.Equal(t, atomic.LoadInt64(&vuID), v.(*minirunner.VU).ID) + require.Equal(t, atomic.LoadUint64(&vuID), v.(*minirunner.VU).ID) close(returned) } var interruptedIter int64 diff --git a/lib/runner.go b/lib/runner.go index e5eb0b0154a..033e866f22b 100644 --- a/lib/runner.go +++ b/lib/runner.go @@ -45,7 +45,7 @@ type InitializedVU interface { Activate(*VUActivationParams) ActiveVU // GetID returns the unique VU ID - GetID() int64 + GetID() uint64 } // VUActivationParams are supplied by each executor when it retrieves a VU from @@ -73,7 +73,7 @@ type Runner interface { // Spawns a new VU. It's fine to make this function rather heavy, if it means a performance // improvement at runtime. Remember, this is called once per VU and normally only at the start // of a test - RunOnce() may be called hundreds of thousands of times, and must be fast. - NewVU(id int64, out chan<- stats.SampleContainer) (InitializedVU, error) + NewVU(id uint64, out chan<- stats.SampleContainer) (InitializedVU, error) // Runs pre-test setup, if applicable. Setup(ctx context.Context, out chan<- stats.SampleContainer) error diff --git a/lib/state.go b/lib/state.go index 7f1399b6e2c..b2cd256107e 100644 --- a/lib/state.go +++ b/lib/state.go @@ -67,7 +67,7 @@ type State struct { // TODO: maybe use https://golang.org/pkg/sync/#Pool ? BPool *bpool.BufferPool - Vu, Iteration int64 + Vu, Iteration uint64 Tags map[string]string } diff --git a/lib/testutils/minirunner/minirunner.go b/lib/testutils/minirunner/minirunner.go index 370d58f7712..a1ab34be3cf 100644 --- a/lib/testutils/minirunner/minirunner.go +++ b/lib/testutils/minirunner/minirunner.go @@ -46,7 +46,7 @@ type MiniRunner struct { SetupData []byte - NextVUID int64 + NextVUID uint64 Group *lib.Group Options lib.Options } @@ -58,7 +58,7 @@ func (r MiniRunner) MakeArchive() *lib.Archive { } // NewVU returns a new VU with an incremental ID. -func (r *MiniRunner) NewVU(id int64, out chan<- stats.SampleContainer) (lib.InitializedVU, error) { +func (r *MiniRunner) NewVU(id uint64, out chan<- stats.SampleContainer) (lib.InitializedVU, error) { return &VU{R: r, Out: out, ID: id}, nil } @@ -124,10 +124,9 @@ func (r *MiniRunner) HandleSummary(ctx context.Context, s *lib.Summary) (map[str // VU is a mock VU, spawned by a MiniRunner. type VU struct { - R *MiniRunner - Out chan<- stats.SampleContainer - ID int64 - Iteration int64 + R *MiniRunner + Out chan<- stats.SampleContainer + ID, Iteration uint64 } // ActiveVU holds a VU and its activation parameters @@ -138,7 +137,7 @@ type ActiveVU struct { } // GetID returns the unique VU ID. -func (vu *VU) GetID() int64 { +func (vu *VU) GetID() uint64 { return vu.ID } From 434e2a23a9f579166c8de6fc18a1b9d30e5fe998 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Mon, 22 Feb 2021 11:31:43 +0100 Subject: [PATCH 04/22] Add per scenario VU ID --- js/runner.go | 7 +++++++ lib/executor/base_executor.go | 9 +++++++++ lib/executor/constant_arrival_rate.go | 3 ++- lib/executor/constant_vus.go | 3 ++- lib/executor/externally_controlled.go | 3 ++- lib/executor/helpers.go | 3 ++- lib/executor/per_vu_iterations.go | 3 ++- lib/executor/ramping_arrival_rate.go | 3 ++- lib/executor/ramping_vus.go | 4 ++-- lib/executor/shared_iterations.go | 3 ++- lib/executor/vu_handle.go | 25 ++++++++++++++----------- lib/executor/vu_handle_test.go | 12 ++++++------ lib/runner.go | 1 + lib/state.go | 18 ++++++++++++++++++ 14 files changed, 71 insertions(+), 26 deletions(-) diff --git a/js/runner.go b/js/runner.go index 7a04a0473d2..ec1e613e5ee 100644 --- a/js/runner.go +++ b/js/runner.go @@ -234,6 +234,7 @@ func (r *Runner) newVU(id uint64, samplesOut chan<- stats.SampleContainer) (*VU, Tags: vu.Runner.Bundle.Options.RunTags.CloneTags(), Group: r.defaultGroup, } + vu.state.Init() vu.Runtime.Set("console", common.Bind(vu.Runtime, vu.Console, vu.Context)) // This is here mostly so if someone tries they get a nice message @@ -601,6 +602,12 @@ func (u *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { params.RunContext = common.WithRuntime(params.RunContext, u.Runtime) params.RunContext = lib.WithState(params.RunContext, u.state) *u.Context = params.RunContext + u.state.ScenarioName = params.Scenario + if params.GetScenarioVUID != nil { + if _, ok := u.state.GetScenarioVUID(); !ok { + u.state.SetScenarioVUID(params.GetScenarioVUID()) + } + } avu := &ActiveVU{ VU: u, diff --git a/lib/executor/base_executor.go b/lib/executor/base_executor.go index 28b32c4caf0..9e9b580bd1e 100644 --- a/lib/executor/base_executor.go +++ b/lib/executor/base_executor.go @@ -23,6 +23,7 @@ package executor import ( "context" "strconv" + "sync/atomic" "github.com/sirupsen/logrus" @@ -38,6 +39,7 @@ import ( type BaseExecutor struct { config lib.ExecutorConfig executionState *lib.ExecutionState + localVUID *uint64 // counter for assigning executor-specific VU IDs logger *logrus.Entry progress *pb.ProgressBar } @@ -47,6 +49,7 @@ func NewBaseExecutor(config lib.ExecutorConfig, es *lib.ExecutionState, logger * return &BaseExecutor{ config: config, executionState: es, + localVUID: new(uint64), logger: logger, progress: pb.New( pb.WithLeft(config.GetName), @@ -66,6 +69,12 @@ func (bs BaseExecutor) GetConfig() lib.ExecutorConfig { return bs.config } +// GetNextLocalVUID increments and returns the next VU ID that's specific for +// this executor (i.e. not global like __VU). +func (bs BaseExecutor) GetNextLocalVUID() uint64 { + return atomic.AddUint64(bs.localVUID, 1) +} + // GetLogger returns the executor logger entry. func (bs BaseExecutor) GetLogger() *logrus.Entry { return bs.logger diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go index 3dfaf65519c..72fd31bb182 100644 --- a/lib/executor/constant_arrival_rate.go +++ b/lib/executor/constant_arrival_rate.go @@ -280,7 +280,8 @@ func (car ConstantArrivalRate) Run(parentCtx context.Context, out chan<- stats.S } activateVU := func(initVU lib.InitializedVU) lib.ActiveVU { activeVUsWg.Add(1) - activeVU := initVU.Activate(getVUActivationParams(maxDurationCtx, car.config.BaseConfig, returnVU)) + activeVU := initVU.Activate(getVUActivationParams( + maxDurationCtx, car.config.BaseConfig, returnVU, car.GetNextLocalVUID)) car.executionState.ModCurrentlyActiveVUsCount(+1) atomic.AddUint64(&activeVUsCount, 1) vusPool.AddVU(maxDurationCtx, activeVU, runIterationBasic) diff --git a/lib/executor/constant_vus.go b/lib/executor/constant_vus.go index 04c2ed3f0e4..223025ead48 100644 --- a/lib/executor/constant_vus.go +++ b/lib/executor/constant_vus.go @@ -192,7 +192,8 @@ func (clv ConstantVUs) Run(parentCtx context.Context, out chan<- stats.SampleCon ctx, cancel := context.WithCancel(maxDurationCtx) defer cancel() - activeVU := initVU.Activate(getVUActivationParams(ctx, clv.config.BaseConfig, returnVU)) + activeVU := initVU.Activate( + getVUActivationParams(ctx, clv.config.BaseConfig, returnVU, clv.GetNextLocalVUID)) for { select { diff --git a/lib/executor/externally_controlled.go b/lib/executor/externally_controlled.go index 0383ade0193..97a6bfb3d9a 100644 --- a/lib/executor/externally_controlled.go +++ b/lib/executor/externally_controlled.go @@ -361,7 +361,8 @@ func (rs *externallyControlledRunState) newManualVUHandle( } ctx, cancel := context.WithCancel(rs.ctx) return &manualVUHandle{ - vuHandle: newStoppedVUHandle(ctx, getVU, returnVU, &rs.executor.config.BaseConfig, logger), + vuHandle: newStoppedVUHandle(ctx, getVU, returnVU, + rs.executor.GetNextLocalVUID, &rs.executor.config.BaseConfig, logger), initVU: initVU, wg: &wg, cancelVU: cancel, diff --git a/lib/executor/helpers.go b/lib/executor/helpers.go index 3971d9454bc..f27656fbf0e 100644 --- a/lib/executor/helpers.go +++ b/lib/executor/helpers.go @@ -223,7 +223,7 @@ func getArrivalRatePerSec(scaledArrivalRate *big.Rat) *big.Rat { } func getVUActivationParams( - ctx context.Context, conf BaseConfig, deactivateCallback func(lib.InitializedVU), + ctx context.Context, conf BaseConfig, deactivateCallback func(lib.InitializedVU), getScenarioVUID func() uint64, ) *lib.VUActivationParams { return &lib.VUActivationParams{ RunContext: ctx, @@ -232,5 +232,6 @@ func getVUActivationParams( Env: conf.GetEnv(), Tags: conf.GetTags(), DeactivateCallback: deactivateCallback, + GetScenarioVUID: getScenarioVUID, } } diff --git a/lib/executor/per_vu_iterations.go b/lib/executor/per_vu_iterations.go index dc0d733c67e..c2c974b4976 100644 --- a/lib/executor/per_vu_iterations.go +++ b/lib/executor/per_vu_iterations.go @@ -218,7 +218,8 @@ func (pvi PerVUIterations) Run(parentCtx context.Context, out chan<- stats.Sampl defer cancel() vuID := initVU.GetID() - activeVU := initVU.Activate(getVUActivationParams(ctx, pvi.config.BaseConfig, returnVU)) + activeVU := initVU.Activate( + getVUActivationParams(ctx, pvi.config.BaseConfig, returnVU, pvi.GetNextLocalVUID)) for i := int64(0); i < iterations; i++ { select { diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index ae5a70b16b8..1ade8467515 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -394,7 +394,8 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.S runIterationBasic := getIterationRunner(varr.executionState, varr.logger) activateVU := func(initVU lib.InitializedVU) lib.ActiveVU { activeVUsWg.Add(1) - activeVU := initVU.Activate(getVUActivationParams(maxDurationCtx, varr.config.BaseConfig, returnVU)) + activeVU := initVU.Activate( + getVUActivationParams(maxDurationCtx, varr.config.BaseConfig, returnVU, varr.GetNextLocalVUID)) varr.executionState.ModCurrentlyActiveVUsCount(+1) atomic.AddUint64(&activeVUsCount, 1) diff --git a/lib/executor/ramping_vus.go b/lib/executor/ramping_vus.go index f3d1132015c..b9c55cc1077 100644 --- a/lib/executor/ramping_vus.go +++ b/lib/executor/ramping_vus.go @@ -637,8 +637,8 @@ func (vlv RampingVUs) Run(parentCtx context.Context, out chan<- stats.SampleCont vuHandles := make([]*vuHandle, maxVUs) for i := uint64(0); i < maxVUs; i++ { vuHandle := newStoppedVUHandle( - maxDurationCtx, getVU, returnVU, &vlv.config.BaseConfig, - vlv.logger.WithField("vuNum", i)) + maxDurationCtx, getVU, returnVU, vlv.GetNextLocalVUID, + &vlv.config.BaseConfig, vlv.logger.WithField("vuNum", i)) go vuHandle.runLoopsIfPossible(runIteration) vuHandles[i] = vuHandle } diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go index 7c60bd83121..ab8aac64b11 100644 --- a/lib/executor/shared_iterations.go +++ b/lib/executor/shared_iterations.go @@ -248,7 +248,8 @@ func (si SharedIterations) Run(parentCtx context.Context, out chan<- stats.Sampl ctx, cancel := context.WithCancel(maxDurationCtx) defer cancel() - activeVU := initVU.Activate(getVUActivationParams(ctx, si.config.BaseConfig, returnVU)) + activeVU := initVU.Activate(getVUActivationParams( + ctx, si.config.BaseConfig, returnVU, si.GetNextLocalVUID)) for { select { diff --git a/lib/executor/vu_handle.go b/lib/executor/vu_handle.go index 34b1e58e6b7..775522d777a 100644 --- a/lib/executor/vu_handle.go +++ b/lib/executor/vu_handle.go @@ -88,11 +88,12 @@ short names for input: // - it's not required but preferable, if where possible to not reactivate VUs and to reuse context // as this speed ups the execution type vuHandle struct { - mutex *sync.Mutex - parentCtx context.Context - getVU func() (lib.InitializedVU, error) - returnVU func(lib.InitializedVU) - config *BaseConfig + mutex *sync.Mutex + parentCtx context.Context + getVU func() (lib.InitializedVU, error) + returnVU func(lib.InitializedVU) + getScenarioVUID func() uint64 + config *BaseConfig initVU lib.InitializedVU activeVU lib.ActiveVU @@ -108,15 +109,17 @@ type vuHandle struct { func newStoppedVUHandle( parentCtx context.Context, getVU func() (lib.InitializedVU, error), - returnVU func(lib.InitializedVU), config *BaseConfig, logger *logrus.Entry, + returnVU func(lib.InitializedVU), getScenarioVUID func() uint64, + config *BaseConfig, logger *logrus.Entry, ) *vuHandle { ctx, cancel := context.WithCancel(parentCtx) return &vuHandle{ - mutex: &sync.Mutex{}, - parentCtx: parentCtx, - getVU: getVU, - config: config, + mutex: &sync.Mutex{}, + parentCtx: parentCtx, + getVU: getVU, + getScenarioVUID: getScenarioVUID, + config: config, canStartIter: make(chan struct{}), state: stopped, @@ -146,7 +149,7 @@ func (vh *vuHandle) start() (err error) { return err } - vh.activeVU = vh.initVU.Activate(getVUActivationParams(vh.ctx, *vh.config, vh.returnVU)) + vh.activeVU = vh.initVU.Activate(getVUActivationParams(vh.ctx, *vh.config, vh.returnVU, vh.getScenarioVUID)) close(vh.canStartIter) vh.changeState(starting) } diff --git a/lib/executor/vu_handle_test.go b/lib/executor/vu_handle_test.go index f4c0984bf64..c4d8d1f692d 100644 --- a/lib/executor/vu_handle_test.go +++ b/lib/executor/vu_handle_test.go @@ -64,7 +64,7 @@ func TestVUHandleRace(t *testing.T) { } } - vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, &BaseConfig{}, logEntry) go vuHandle.runLoopsIfPossible(runIter) var wg sync.WaitGroup wg.Add(3) @@ -159,7 +159,7 @@ func TestVUHandleStartStopRace(t *testing.T) { } } - vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, &BaseConfig{}, logEntry) go vuHandle.runLoopsIfPossible(runIter) for i := 0; i < testIterations; i++ { err := vuHandle.start() @@ -244,7 +244,7 @@ func TestVUHandleSimple(t *testing.T) { test := new(handleVUTest) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { @@ -284,7 +284,7 @@ func TestVUHandleSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { @@ -325,7 +325,7 @@ func TestVUHandleSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { @@ -408,7 +408,7 @@ func BenchmarkVUHandleIterations(b *testing.B) { reset() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { diff --git a/lib/runner.go b/lib/runner.go index 033e866f22b..b5b8a6854a0 100644 --- a/lib/runner.go +++ b/lib/runner.go @@ -55,6 +55,7 @@ type VUActivationParams struct { DeactivateCallback func(InitializedVU) Env, Tags map[string]string Exec, Scenario string + GetScenarioVUID func() uint64 } // A Runner is a factory for VUs. It should precompute as much as possible upon diff --git a/lib/state.go b/lib/state.go index b2cd256107e..e2b0c5e0328 100644 --- a/lib/state.go +++ b/lib/state.go @@ -69,6 +69,13 @@ type State struct { Vu, Iteration uint64 Tags map[string]string + ScenarioName string + scenarioVUID map[string]uint64 +} + +// Init initializes some private state fields. +func (s *State) Init() { + s.scenarioVUID = make(map[string]uint64) } // CloneTags makes a copy of the tags map and returns it. @@ -79,3 +86,14 @@ func (s *State) CloneTags() map[string]string { } return tags } + +// GetScenarioVUID returns the scenario-specific ID of this VU. +func (s *State) GetScenarioVUID() (uint64, bool) { + id, ok := s.scenarioVUID[s.ScenarioName] + return id, ok +} + +// SetScenarioVUID sets the scenario-specific ID for this VU. +func (s *State) SetScenarioVUID(id uint64) { + s.scenarioVUID[s.ScenarioName] = id +} From fc487cf4677bacf83eba0097f5c56cc659fe9713 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Mon, 22 Feb 2021 16:52:10 +0100 Subject: [PATCH 05/22] Add per scenario VU iteration --- js/runner.go | 2 ++ lib/state.go | 28 ++++++++++++++++++++++++---- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/js/runner.go b/js/runner.go index ec1e613e5ee..68101a7b7af 100644 --- a/js/runner.go +++ b/js/runner.go @@ -702,6 +702,8 @@ func (u *VU) runFn( // maybe move it to RunOnce ? u.Runtime.Set("__ITER", u.Iteration) u.Iteration++ + u.state.Iteration = u.Iteration + u.state.IncrScenarioVUIter() defer func() { if r := recover(); r != nil { diff --git a/lib/state.go b/lib/state.go index e2b0c5e0328..ab452ca1ee1 100644 --- a/lib/state.go +++ b/lib/state.go @@ -26,6 +26,7 @@ import ( "net" "net/http" "net/http/cookiejar" + "sync" "github.com/oxtoacart/bpool" "github.com/sirupsen/logrus" @@ -67,15 +68,18 @@ type State struct { // TODO: maybe use https://golang.org/pkg/sync/#Pool ? BPool *bpool.BufferPool - Vu, Iteration uint64 - Tags map[string]string - ScenarioName string - scenarioVUID map[string]uint64 + Vu, Iteration uint64 + Tags map[string]string + ScenarioName string + scenarioVUID map[string]uint64 + scIterMx sync.RWMutex + scenarioVUIter map[string]uint64 } // Init initializes some private state fields. func (s *State) Init() { s.scenarioVUID = make(map[string]uint64) + s.scenarioVUIter = make(map[string]uint64) } // CloneTags makes a copy of the tags map and returns it. @@ -97,3 +101,19 @@ func (s *State) GetScenarioVUID() (uint64, bool) { func (s *State) SetScenarioVUID(id uint64) { s.scenarioVUID[s.ScenarioName] = id } + +// GetScenarioVUIter returns the scenario-specific count of completed iterations +// for this VU. +func (s *State) GetScenarioVUIter() uint64 { + s.scIterMx.RLock() + defer s.scIterMx.RUnlock() + return s.scenarioVUIter[s.ScenarioName] +} + +// IncrScenarioVUIter increments the scenario-specific count of completed +// iterations for this VU. +func (s *State) IncrScenarioVUIter() { + s.scIterMx.Lock() + s.scenarioVUIter[s.ScenarioName]++ + s.scIterMx.Unlock() +} From edff038ada316a0fcb738416e7a1a9c0927ed58e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Mon, 22 Feb 2021 16:52:52 +0100 Subject: [PATCH 06/22] Fix race condition activating VUs --- js/runner.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/js/runner.go b/js/runner.go index 68101a7b7af..c429be30551 100644 --- a/js/runner.go +++ b/js/runner.go @@ -599,9 +599,10 @@ func (u *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { u.state.Tags["scenario"] = params.Scenario } - params.RunContext = common.WithRuntime(params.RunContext, u.Runtime) - params.RunContext = lib.WithState(params.RunContext, u.state) - *u.Context = params.RunContext + ctx := common.WithRuntime(params.RunContext, u.Runtime) + ctx = lib.WithState(ctx, u.state) + params.RunContext = ctx + *u.Context = ctx u.state.ScenarioName = params.Scenario if params.GetScenarioVUID != nil { if _, ok := u.state.GetScenarioVUID(); !ok { @@ -617,7 +618,7 @@ func (u *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { go func() { // Wait for the run context to be over - <-params.RunContext.Done() + <-ctx.Done() // Interrupt the JS runtime u.Runtime.Interrupt(context.Canceled) // Wait for the VU to stop running, if it was, and prevent it from From 6e52ab95e5dec8186046bdd37bfce1ecb3b4eb0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Fri, 18 Jun 2021 15:20:49 +0200 Subject: [PATCH 07/22] Add per scenario global VU iterations --- lib/executor/base_executor.go | 13 +++++++++++++ lib/executor/constant_arrival_rate.go | 3 +++ lib/executor/constant_vus.go | 3 ++- lib/executor/externally_controlled.go | 3 ++- lib/executor/helpers.go | 3 ++- lib/executor/per_vu_iterations.go | 3 ++- lib/executor/ramping_arrival_rate.go | 3 ++- lib/executor/ramping_vus.go | 3 ++- lib/executor/shared_iterations.go | 3 ++- lib/executors.go | 1 + 10 files changed, 31 insertions(+), 7 deletions(-) diff --git a/lib/executor/base_executor.go b/lib/executor/base_executor.go index 9e9b580bd1e..73ea026e891 100644 --- a/lib/executor/base_executor.go +++ b/lib/executor/base_executor.go @@ -40,6 +40,7 @@ type BaseExecutor struct { config lib.ExecutorConfig executionState *lib.ExecutionState localVUID *uint64 // counter for assigning executor-specific VU IDs + localIters *uint64 // counter for keeping track of all VU iterations completed by this executor logger *logrus.Entry progress *pb.ProgressBar } @@ -50,6 +51,7 @@ func NewBaseExecutor(config lib.ExecutorConfig, es *lib.ExecutionState, logger * config: config, executionState: es, localVUID: new(uint64), + localIters: new(uint64), logger: logger, progress: pb.New( pb.WithLeft(config.GetName), @@ -97,3 +99,14 @@ func (bs BaseExecutor) getMetricTags(vuID *uint64) *stats.SampleTags { } return stats.IntoSampleTags(&tags) } + +// getScenarioIter returns the completed iterations by all VUs for this executor. +func (bs *BaseExecutor) getScenarioIter() uint64 { + return atomic.LoadUint64(bs.localIters) +} + +// incrScenarioIter increments the counter of completed iterations by all VUs +// for this executor. +func (bs *BaseExecutor) incrScenarioIter() { + atomic.AddUint64(bs.localIters, 1) +} diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go index 72fd31bb182..31a23e91811 100644 --- a/lib/executor/constant_arrival_rate.go +++ b/lib/executor/constant_arrival_rate.go @@ -272,12 +272,15 @@ func (car ConstantArrivalRate) Run(parentCtx context.Context, out chan<- stats.S Executor: car.config.Type, StartTime: startTime, ProgressFn: progressFn, + GetIter: car.getScenarioIter, }) returnVU := func(u lib.InitializedVU) { car.executionState.ReturnVU(u, true) activeVUsWg.Done() } + + runIterationBasic := getIterationRunner(car.executionState, car.incrScenarioIter, car.logger) activateVU := func(initVU lib.InitializedVU) lib.ActiveVU { activeVUsWg.Add(1) activeVU := initVU.Activate(getVUActivationParams( diff --git a/lib/executor/constant_vus.go b/lib/executor/constant_vus.go index 223025ead48..b038b6047e9 100644 --- a/lib/executor/constant_vus.go +++ b/lib/executor/constant_vus.go @@ -174,13 +174,14 @@ func (clv ConstantVUs) Run(parentCtx context.Context, out chan<- stats.SampleCon defer activeVUs.Wait() regDurationDone := regDurationCtx.Done() - runIteration := getIterationRunner(clv.executionState, clv.logger) + runIteration := getIterationRunner(clv.executionState, clv.incrScenarioIter, clv.logger) maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ Name: clv.config.Name, Executor: clv.config.Type, StartTime: startTime, ProgressFn: progressFn, + GetIter: clv.getScenarioIter, }) returnVU := func(u lib.InitializedVU) { diff --git a/lib/executor/externally_controlled.go b/lib/executor/externally_controlled.go index 97a6bfb3d9a..481c0f07160 100644 --- a/lib/executor/externally_controlled.go +++ b/lib/executor/externally_controlled.go @@ -530,7 +530,7 @@ func (mex *ExternallyControlled) Run(parentCtx context.Context, out chan<- stats currentlyPaused: false, activeVUsCount: new(int64), maxVUs: new(int64), - runIteration: getIterationRunner(mex.executionState, mex.logger), + runIteration: getIterationRunner(mex.executionState, mex.incrScenarioIter, mex.logger), } *runState.maxVUs = startMaxVUs if err = runState.retrieveStartMaxVUs(); err != nil { @@ -542,6 +542,7 @@ func (mex *ExternallyControlled) Run(parentCtx context.Context, out chan<- stats Executor: mex.config.Type, StartTime: time.Now(), ProgressFn: runState.progressFn, + GetIter: mex.getScenarioIter, }) mex.progress.Modify(pb.WithProgress(runState.progressFn)) // Keep track of the progress diff --git a/lib/executor/helpers.go b/lib/executor/helpers.go index f27656fbf0e..febc288080a 100644 --- a/lib/executor/helpers.go +++ b/lib/executor/helpers.go @@ -82,7 +82,7 @@ func validateStages(stages []Stage) []error { // // TODO: emit the end-of-test iteration metrics here (https://github.com/k6io/k6/issues/1250) func getIterationRunner( - executionState *lib.ExecutionState, logger *logrus.Entry, + executionState *lib.ExecutionState, incrScenarioIter func(), logger *logrus.Entry, ) func(context.Context, lib.ActiveVU) bool { return func(ctx context.Context, vu lib.ActiveVU) bool { err := vu.RunOnce() @@ -110,6 +110,7 @@ func getIterationRunner( // TODO: move emission of end-of-iteration metrics here? executionState.AddFullIterations(1) + incrScenarioIter() return true } } diff --git a/lib/executor/per_vu_iterations.go b/lib/executor/per_vu_iterations.go index c2c974b4976..c040689dc9d 100644 --- a/lib/executor/per_vu_iterations.go +++ b/lib/executor/per_vu_iterations.go @@ -198,13 +198,14 @@ func (pvi PerVUIterations) Run(parentCtx context.Context, out chan<- stats.Sampl defer activeVUs.Wait() regDurationDone := regDurationCtx.Done() - runIteration := getIterationRunner(pvi.executionState, pvi.logger) + runIteration := getIterationRunner(pvi.executionState, pvi.incrScenarioIter, pvi.logger) maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ Name: pvi.config.Name, Executor: pvi.config.Type, StartTime: startTime, ProgressFn: progressFn, + GetIter: pvi.getScenarioIter, }) returnVU := func(u lib.InitializedVU) { diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index 1ade8467515..27489880b03 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -384,6 +384,7 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.S Executor: varr.config.Type, StartTime: startTime, ProgressFn: progressFn, + GetIter: varr.getScenarioIter, }) returnVU := func(u lib.InitializedVU) { @@ -391,7 +392,7 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.S activeVUsWg.Done() } - runIterationBasic := getIterationRunner(varr.executionState, varr.logger) + runIterationBasic := getIterationRunner(varr.executionState, varr.incrScenarioIter, varr.logger) activateVU := func(initVU lib.InitializedVU) lib.ActiveVU { activeVUsWg.Add(1) activeVU := initVU.Activate( diff --git a/lib/executor/ramping_vus.go b/lib/executor/ramping_vus.go index b9c55cc1077..1228b4ecdff 100644 --- a/lib/executor/ramping_vus.go +++ b/lib/executor/ramping_vus.go @@ -608,7 +608,7 @@ func (vlv RampingVUs) Run(parentCtx context.Context, out chan<- stats.SampleCont // Actually schedule the VUs and iterations, likely the most complicated // executor among all of them... - runIteration := getIterationRunner(vlv.executionState, vlv.logger) + runIteration := getIterationRunner(vlv.executionState, vlv.incrScenarioIter, vlv.logger) getVU := func() (lib.InitializedVU, error) { initVU, err := vlv.executionState.GetPlannedVU(vlv.logger, false) if err != nil { @@ -633,6 +633,7 @@ func (vlv RampingVUs) Run(parentCtx context.Context, out chan<- stats.SampleCont Executor: vlv.config.Type, StartTime: startTime, ProgressFn: progressFn, + GetIter: vlv.getScenarioIter, }) vuHandles := make([]*vuHandle, maxVUs) for i := uint64(0); i < maxVUs; i++ { diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go index ab8aac64b11..f6cff6d310d 100644 --- a/lib/executor/shared_iterations.go +++ b/lib/executor/shared_iterations.go @@ -230,13 +230,14 @@ func (si SharedIterations) Run(parentCtx context.Context, out chan<- stats.Sampl }() regDurationDone := regDurationCtx.Done() - runIteration := getIterationRunner(si.executionState, si.logger) + runIteration := getIterationRunner(si.executionState, si.incrScenarioIter, si.logger) maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ Name: si.config.Name, Executor: si.config.Type, StartTime: startTime, ProgressFn: progressFn, + GetIter: si.getScenarioIter, }) returnVU := func(u lib.InitializedVU) { diff --git a/lib/executors.go b/lib/executors.go index e8ef95f49aa..9832159bc0e 100644 --- a/lib/executors.go +++ b/lib/executors.go @@ -115,6 +115,7 @@ type ScenarioState struct { Name, Executor string StartTime time.Time ProgressFn func() (float64, []string) + GetIter func() uint64 } // InitVUFunc is just a shorthand so we don't have to type the function From 636382968d8a248b932b01a802ac433d4107ba06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Fri, 26 Feb 2021 12:43:12 +0100 Subject: [PATCH 08/22] Move and export SegmentedIndex to lib --- lib/execution_segment.go | 96 +++++++++++++++ lib/execution_segment_test.go | 191 ++++++++++++++++++++++++++++++ lib/executor/ramping_vus.go | 85 ++------------ lib/executor/ramping_vus_test.go | 196 ------------------------------- 4 files changed, 297 insertions(+), 271 deletions(-) diff --git a/lib/execution_segment.go b/lib/execution_segment.go index 081ef985bfb..e7e15600712 100644 --- a/lib/execution_segment.go +++ b/lib/execution_segment.go @@ -26,6 +26,7 @@ import ( "math/big" "sort" "strings" + "sync" ) // ExecutionSegment represents a (start, end] partition of the total execution @@ -730,3 +731,98 @@ func (et *ExecutionTuple) GetNewExecutionTupleFromValue(value int64) (*Execution SegmentIndex: newIndex, }, nil } + +// SegmentedIndex is an iterator that returns both the scaled and the unscaled +// sequential values according to the given ExecutionTuple. It is not thread-safe, +// concurrent access has to be externally synchronized. +type SegmentedIndex struct { + start, lcd int64 + offsets []int64 + mx sync.RWMutex + scaled, unscaled int64 // for both the first element(vu) is 1 not 0 +} + +// NewSegmentedIndex returns a pointer to a new SegmentedIndex instance, +// given a starting index, LCD and offsets as returned by GetStripedOffsets(). +func NewSegmentedIndex(start, lcd int64, offsets []int64) *SegmentedIndex { + return &SegmentedIndex{start: start, lcd: lcd, offsets: offsets} +} + +// Next goes to the next scaled index and moves the unscaled one accordingly. +func (s *SegmentedIndex) Next() { + s.mx.Lock() + defer s.mx.Unlock() + if s.scaled == 0 { // the 1 element(VU) is at the start + s.unscaled += s.start + 1 // the first element of the start 0, but the here we need it to be 1 so we add 1 + } else { // if we are not at the first element we need to go through the offsets, looping over them + s.unscaled += s.offsets[int(s.scaled-1)%len(s.offsets)] // slice's index start at 0 ours start at 1 + } + s.scaled++ +} + +// Prev goes to the previous scaled value and sets the unscaled one accordingly. +// Calling Prev when s.scaled == 0 is undefined. +func (s *SegmentedIndex) Prev() { + s.mx.Lock() + defer s.mx.Unlock() + if s.scaled == 1 { // we are the first need to go to the 0th element which means we need to remove the start + s.unscaled -= s.start + 1 // this could've been just settign to 0 + } else { // not at the first element - need to get the previously added offset so + s.unscaled -= s.offsets[int(s.scaled-2)%len(s.offsets)] // slice's index start 0 our start at 1 + } + s.scaled-- +} + +// GoTo sets the scaled index to its biggest value for which the corresponding +// unscaled index is smaller or equal to value. +func (s *SegmentedIndex) GoTo(value int64) int64 { // TODO optimize + s.mx.Lock() + defer s.mx.Unlock() + var gi int64 + // Because of the cyclical nature of the striping algorithm (with a cycle + // length of LCD, the least common denominator), when scaling large values + // (i.e. many multiples of the LCD), we can quickly calculate how many times + // the cycle repeats. + wholeCycles := (value / s.lcd) + // So we can set some approximate initial values quickly, since we also know + // precisely how many scaled values there are per cycle length. + s.scaled = wholeCycles * int64(len(s.offsets)) + s.unscaled = wholeCycles*s.lcd + s.start + 1 // our indexes are from 1 the start is from 0 + // Approach the final value using the slow algorithm with the step by step loop + // TODO: this can be optimized by another array with size offsets that instead of the offsets + // from the previous is the offset from either 0 or start + i := s.start + for ; i < value%s.lcd; gi, i = gi+1, i+s.offsets[gi] { + s.scaled++ + s.unscaled += s.offsets[gi] + } + + if gi > 0 { // there were more values after the wholecycles + // the last offset actually shouldn't have been added + s.unscaled -= s.offsets[gi-1] + } else if s.scaled > 0 { // we didn't actually have more values after the wholecycles but we still had some + // in this case the unscaled value needs to move back by the last offset as it would've been + // the one to get it from the value it needs to be to it's current one + s.unscaled -= s.offsets[len(s.offsets)-1] + } + + if s.scaled == 0 { + s.unscaled = 0 // we would've added the start and 1 + } + + return s.scaled +} + +// GetScaled returns the scaled value. +func (s *SegmentedIndex) GetScaled() int64 { + s.mx.RLock() + defer s.mx.RUnlock() + return s.scaled +} + +// GetUnscaled returns the unscaled value. +func (s *SegmentedIndex) GetUnscaled() int64 { + s.mx.RLock() + defer s.mx.RUnlock() + return s.unscaled +} diff --git a/lib/execution_segment_test.go b/lib/execution_segment_test.go index 202e34cc87e..a17665ee5e3 100644 --- a/lib/execution_segment_test.go +++ b/lib/execution_segment_test.go @@ -924,3 +924,194 @@ func BenchmarkExecutionSegmentScale(b *testing.B) { } // TODO: test with randomized things + +func TestSegmentedIndex(t *testing.T) { + // TODO ... more structure ? + t.Run("full", func(t *testing.T) { + s := SegmentedIndex{start: 0, lcd: 1, offsets: []int64{1}} + + s.Next() + assert.EqualValues(t, 1, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.Prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + + s.Next() + assert.EqualValues(t, 1, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.Next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.Next() + assert.EqualValues(t, 3, s.unscaled) + assert.EqualValues(t, 3, s.scaled) + + s.Prev() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.Prev() + assert.EqualValues(t, 1, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.Next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + }) + + t.Run("half", func(t *testing.T) { + s := SegmentedIndex{start: 0, lcd: 2, offsets: []int64{2}} + + s.Next() + assert.EqualValues(t, 1, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.Prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + + s.Next() + assert.EqualValues(t, 1, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.Next() + assert.EqualValues(t, 3, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.Next() + assert.EqualValues(t, 5, s.unscaled) + assert.EqualValues(t, 3, s.scaled) + + s.Prev() + assert.EqualValues(t, 3, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.Prev() + assert.EqualValues(t, 1, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.Prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + + s.Next() + assert.EqualValues(t, 1, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + }) + + t.Run("the other half", func(t *testing.T) { + s := SegmentedIndex{start: 1, lcd: 2, offsets: []int64{2}} + + s.Next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.Prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + + s.Next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.Next() + assert.EqualValues(t, 4, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.Next() + assert.EqualValues(t, 6, s.unscaled) + assert.EqualValues(t, 3, s.scaled) + + s.Prev() + assert.EqualValues(t, 4, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.Prev() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.Prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + + s.Next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + }) + + t.Run("strange", func(t *testing.T) { + s := SegmentedIndex{start: 1, lcd: 7, offsets: []int64{4, 3}} + + s.Next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.Prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + + s.Next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.Next() + assert.EqualValues(t, 6, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.Next() + assert.EqualValues(t, 9, s.unscaled) + assert.EqualValues(t, 3, s.scaled) + + s.Prev() + assert.EqualValues(t, 6, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.Prev() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.Prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + + s.Next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.GoTo(6) + assert.EqualValues(t, 6, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.GoTo(5) + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.GoTo(7) + assert.EqualValues(t, 6, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.GoTo(8) + assert.EqualValues(t, 6, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.GoTo(9) + assert.EqualValues(t, 9, s.unscaled) + assert.EqualValues(t, 3, s.scaled) + + s.Prev() + assert.EqualValues(t, 6, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.Prev() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.Prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + }) +} diff --git a/lib/executor/ramping_vus.go b/lib/executor/ramping_vus.go index 1228b4ecdff..622d397de08 100644 --- a/lib/executor/ramping_vus.go +++ b/lib/executor/ramping_vus.go @@ -191,11 +191,11 @@ func (vlvc RampingVUsConfig) getRawExecutionSteps(et *lib.ExecutionTuple, zeroEn fromVUs = vlvc.StartVUs.Int64 start, offsets, lcd = et.GetStripedOffsets() steps = make([]lib.ExecutionStep, 0, vlvc.precalculateTheRequiredSteps(et, zeroEnd)) - index = segmentedIndex{start: start, lcd: lcd, offsets: offsets} + index = lib.NewSegmentedIndex(start, lcd, offsets) ) // Reserve the scaled StartVUs at the beginning - steps = append(steps, lib.ExecutionStep{TimeOffset: 0, PlannedVUs: uint64(index.goTo(fromVUs))}) + steps = append(steps, lib.ExecutionStep{TimeOffset: 0, PlannedVUs: uint64(index.GoTo(fromVUs))}) addStep := func(timeOffset time.Duration, plannedVUs uint64) { if steps[len(steps)-1].PlannedVUs != plannedVUs { steps = append(steps, lib.ExecutionStep{TimeOffset: timeOffset, PlannedVUs: plannedVUs}) @@ -212,30 +212,30 @@ func (vlvc RampingVUsConfig) getRawExecutionSteps(et *lib.ExecutionTuple, zeroEn continue } if stageDuration == 0 { - addStep(timeTillEnd, uint64(index.goTo(stageEndVUs))) + addStep(timeTillEnd, uint64(index.GoTo(stageEndVUs))) fromVUs = stageEndVUs continue } // VU reservation for gracefully ramping down is handled as a // separate method: reserveVUsForGracefulRampDowns() - if index.unscaled > stageEndVUs { // ramp down + if index.GetUnscaled() > stageEndVUs { // ramp down // here we don't want to emit for the equal to stageEndVUs as it doesn't go below it // it will just go to it - for ; index.unscaled > stageEndVUs; index.prev() { + for ; index.GetUnscaled() > stageEndVUs; index.Prev() { addStep( // this is the time that we should go up 1 if we are ramping up // but we are ramping down so we should go 1 down, but because we want to not // stop VUs immediately we stop it on the next unscaled VU's time - timeTillEnd-time.Duration(int64(stageDuration)*(stageEndVUs-index.unscaled+1)/stageVUDiff), - uint64(index.scaled-1), + timeTillEnd-time.Duration(int64(stageDuration)*(stageEndVUs-index.GetUnscaled()+1)/stageVUDiff), + uint64(index.GetScaled()-1), ) } } else { - for ; index.unscaled <= stageEndVUs; index.next() { + for ; index.GetUnscaled() <= stageEndVUs; index.Next() { addStep( - timeTillEnd-time.Duration(int64(stageDuration)*(stageEndVUs-index.unscaled)/stageVUDiff), - uint64(index.scaled), + timeTillEnd-time.Duration(int64(stageDuration)*(stageEndVUs-index.GetUnscaled())/stageVUDiff), + uint64(index.GetScaled()), ) } } @@ -249,71 +249,6 @@ func (vlvc RampingVUsConfig) getRawExecutionSteps(et *lib.ExecutionTuple, zeroEn return steps } -type segmentedIndex struct { // TODO: rename ... although this is probably the best name so far :D - start, lcd int64 - offsets []int64 - scaled, unscaled int64 // for both the first element(vu) is 1 not 0 -} - -// goes to the next scaled index and move the unscaled one accordingly -func (s *segmentedIndex) next() { - if s.scaled == 0 { // the 1 element(VU) is at the start - s.unscaled += s.start + 1 // the first element of the start 0, but the here we need it to be 1 so we add 1 - } else { // if we are not at the first element we need to go through the offsets, looping over them - s.unscaled += s.offsets[int(s.scaled-1)%len(s.offsets)] // slice's index start at 0 ours start at 1 - } - s.scaled++ -} - -// prev goest to the previous scaled value and sets the unscaled one accordingly -// calling prev when s.scaled == 0 is undefined -func (s *segmentedIndex) prev() { - if s.scaled == 1 { // we are the first need to go to the 0th element which means we need to remove the start - s.unscaled -= s.start + 1 // this could've been just settign to 0 - } else { // not at the first element - need to get the previously added offset so - s.unscaled -= s.offsets[int(s.scaled-2)%len(s.offsets)] // slice's index start 0 our start at 1 - } - s.scaled-- -} - -// goTo sets the scaled index to it's biggest value for which the corresponding unscaled index is -// is smaller or equal to value -func (s *segmentedIndex) goTo(value int64) int64 { // TODO optimize - var gi int64 - // Because of the cyclical nature of the striping algorithm (with a cycle - // length of LCD, the least common denominator), when scaling large values - // (i.e. many multiples of the LCD), we can quickly calculate how many times - // the cycle repeats. - wholeCycles := (value / s.lcd) - // So we can set some approximate initial values quickly, since we also know - // precisely how many scaled values there are per cycle length. - s.scaled = wholeCycles * int64(len(s.offsets)) - s.unscaled = wholeCycles*s.lcd + s.start + 1 // our indexes are from 1 the start is from 0 - // Approach the final value using the slow algorithm with the step by step loop - // TODO: this can be optimized by another array with size offsets that instead of the offsets - // from the previous is the offset from either 0 or start - i := s.start - for ; i < value%s.lcd; gi, i = gi+1, i+s.offsets[gi] { - s.scaled++ - s.unscaled += s.offsets[gi] - } - - if gi > 0 { // there were more values after the wholecycles - // the last offset actually shouldn't have been added - s.unscaled -= s.offsets[gi-1] - } else if s.scaled > 0 { // we didn't actually have more values after the wholecycles but we still had some - // in this case the unscaled value needs to move back by the last offset as it would've been - // the one to get it from the value it needs to be to it's current one - s.unscaled -= s.offsets[len(s.offsets)-1] - } - - if s.scaled == 0 { - s.unscaled = 0 // we would've added the start and 1 - } - - return s.scaled -} - func absInt64(a int64) int64 { if a < 0 { return -a diff --git a/lib/executor/ramping_vus_test.go b/lib/executor/ramping_vus_test.go index 3659efb50d9..28af6f1366c 100644 --- a/lib/executor/ramping_vus_test.go +++ b/lib/executor/ramping_vus_test.go @@ -1001,202 +1001,6 @@ func BenchmarkRampingVUsGetRawExecutionSteps(b *testing.B) { } } -func TestSegmentedIndex(t *testing.T) { - t.Parallel() - // TODO ... more structure ? - t.Run("full", func(t *testing.T) { - t.Parallel() - s := segmentedIndex{start: 0, lcd: 1, offsets: []int64{1}} - - s.next() - assert.EqualValues(t, 1, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.prev() - assert.EqualValues(t, 0, s.unscaled) - assert.EqualValues(t, 0, s.scaled) - - s.next() - assert.EqualValues(t, 1, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.next() - assert.EqualValues(t, 2, s.unscaled) - assert.EqualValues(t, 2, s.scaled) - - s.next() - assert.EqualValues(t, 3, s.unscaled) - assert.EqualValues(t, 3, s.scaled) - - s.prev() - assert.EqualValues(t, 2, s.unscaled) - assert.EqualValues(t, 2, s.scaled) - - s.prev() - assert.EqualValues(t, 1, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.next() - assert.EqualValues(t, 2, s.unscaled) - assert.EqualValues(t, 2, s.scaled) - }) - - t.Run("half", func(t *testing.T) { - t.Parallel() - s := segmentedIndex{start: 0, lcd: 2, offsets: []int64{2}} - - s.next() - assert.EqualValues(t, 1, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.prev() - assert.EqualValues(t, 0, s.unscaled) - assert.EqualValues(t, 0, s.scaled) - - s.next() - assert.EqualValues(t, 1, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.next() - assert.EqualValues(t, 3, s.unscaled) - assert.EqualValues(t, 2, s.scaled) - - s.next() - assert.EqualValues(t, 5, s.unscaled) - assert.EqualValues(t, 3, s.scaled) - - s.prev() - assert.EqualValues(t, 3, s.unscaled) - assert.EqualValues(t, 2, s.scaled) - - s.prev() - assert.EqualValues(t, 1, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.prev() - assert.EqualValues(t, 0, s.unscaled) - assert.EqualValues(t, 0, s.scaled) - - s.next() - assert.EqualValues(t, 1, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - }) - - t.Run("the other half", func(t *testing.T) { - t.Parallel() - s := segmentedIndex{start: 1, lcd: 2, offsets: []int64{2}} - - s.next() - assert.EqualValues(t, 2, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.prev() - assert.EqualValues(t, 0, s.unscaled) - assert.EqualValues(t, 0, s.scaled) - - s.next() - assert.EqualValues(t, 2, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.next() - assert.EqualValues(t, 4, s.unscaled) - assert.EqualValues(t, 2, s.scaled) - - s.next() - assert.EqualValues(t, 6, s.unscaled) - assert.EqualValues(t, 3, s.scaled) - - s.prev() - assert.EqualValues(t, 4, s.unscaled) - assert.EqualValues(t, 2, s.scaled) - - s.prev() - assert.EqualValues(t, 2, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.prev() - assert.EqualValues(t, 0, s.unscaled) - assert.EqualValues(t, 0, s.scaled) - - s.next() - assert.EqualValues(t, 2, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - }) - - t.Run("strange", func(t *testing.T) { - t.Parallel() - s := segmentedIndex{start: 1, lcd: 7, offsets: []int64{4, 3}} - - s.next() - assert.EqualValues(t, 2, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.prev() - assert.EqualValues(t, 0, s.unscaled) - assert.EqualValues(t, 0, s.scaled) - - s.next() - assert.EqualValues(t, 2, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.next() - assert.EqualValues(t, 6, s.unscaled) - assert.EqualValues(t, 2, s.scaled) - - s.next() - assert.EqualValues(t, 9, s.unscaled) - assert.EqualValues(t, 3, s.scaled) - - s.prev() - assert.EqualValues(t, 6, s.unscaled) - assert.EqualValues(t, 2, s.scaled) - - s.prev() - assert.EqualValues(t, 2, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.prev() - assert.EqualValues(t, 0, s.unscaled) - assert.EqualValues(t, 0, s.scaled) - - s.next() - assert.EqualValues(t, 2, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.goTo(6) - assert.EqualValues(t, 6, s.unscaled) - assert.EqualValues(t, 2, s.scaled) - - s.goTo(5) - assert.EqualValues(t, 2, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.goTo(7) - assert.EqualValues(t, 6, s.unscaled) - assert.EqualValues(t, 2, s.scaled) - - s.goTo(8) - assert.EqualValues(t, 6, s.unscaled) - assert.EqualValues(t, 2, s.scaled) - - s.goTo(9) - assert.EqualValues(t, 9, s.unscaled) - assert.EqualValues(t, 3, s.scaled) - - s.prev() - assert.EqualValues(t, 6, s.unscaled) - assert.EqualValues(t, 2, s.scaled) - - s.prev() - assert.EqualValues(t, 2, s.unscaled) - assert.EqualValues(t, 1, s.scaled) - - s.prev() - assert.EqualValues(t, 0, s.unscaled) - assert.EqualValues(t, 0, s.scaled) - }) -} - // TODO: delete in favor of lib.generateRandomSequence() after // https://github.com/k6io/k6/issues/1302 is done (can't import now due to // import loops...) From 58822d2b7b063cd209c45509b3ba04d753c31f4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Fri, 26 Feb 2021 12:56:58 +0100 Subject: [PATCH 09/22] Add global segmented iterations to SharedIterations --- lib/executor/helpers.go | 4 +- lib/executor/shared_iterations.go | 38 ++++++++++++++---- lib/executor/shared_iterations_test.go | 55 ++++++++++++++++++++++++++ lib/executors.go | 1 + 4 files changed, 88 insertions(+), 10 deletions(-) diff --git a/lib/executor/helpers.go b/lib/executor/helpers.go index febc288080a..11961eba8ba 100644 --- a/lib/executor/helpers.go +++ b/lib/executor/helpers.go @@ -82,7 +82,7 @@ func validateStages(stages []Stage) []error { // // TODO: emit the end-of-test iteration metrics here (https://github.com/k6io/k6/issues/1250) func getIterationRunner( - executionState *lib.ExecutionState, incrScenarioIter func(), logger *logrus.Entry, + executionState *lib.ExecutionState, incrIter func(), logger *logrus.Entry, ) func(context.Context, lib.ActiveVU) bool { return func(ctx context.Context, vu lib.ActiveVU) bool { err := vu.RunOnce() @@ -110,7 +110,7 @@ func getIterationRunner( // TODO: move emission of end-of-iteration metrics here? executionState.AddFullIterations(1) - incrScenarioIter() + incrIter() return true } } diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go index f6cff6d310d..9818fbe06c4 100644 --- a/lib/executor/shared_iterations.go +++ b/lib/executor/shared_iterations.go @@ -151,6 +151,7 @@ func (sic SharedIterationsConfig) NewExecutor( return &SharedIterations{ BaseExecutor: NewBaseExecutor(sic, es, logger), config: sic, + globalIter: new(uint64), }, nil } @@ -158,8 +159,10 @@ func (sic SharedIterationsConfig) NewExecutor( // all shared by the configured VUs. type SharedIterations struct { *BaseExecutor - config SharedIterationsConfig - et *lib.ExecutionTuple + config SharedIterationsConfig + et *lib.ExecutionTuple + segIdx *lib.SegmentedIndex + globalIter *uint64 } // Make sure we implement the lib.Executor interface. @@ -176,9 +179,24 @@ func (si *SharedIterations) Init(ctx context.Context) error { // with no work, as determined by their config's HasWork() method. et, err := si.BaseExecutor.executionState.ExecutionTuple.GetNewExecutionTupleFromValue(si.config.VUs.Int64) si.et = et + start, offsets, lcd := et.GetStripedOffsets() + si.segIdx = lib.NewSegmentedIndex(start, lcd, offsets) + return err } +// incrGlobalIter increments the global iteration count for this executor, +// taking into account the configured execution segment. +func (si *SharedIterations) incrGlobalIter() { + si.segIdx.Next() + atomic.StoreUint64(si.globalIter, uint64(si.segIdx.GetUnscaled())) +} + +// getGlobalIter returns the global iteration count for this executor. +func (si *SharedIterations) getGlobalIter() uint64 { + return atomic.LoadUint64(si.globalIter) +} + // Run executes a specific total number of iterations, which are all shared by // the configured VUs. // nolint:funlen @@ -230,14 +248,18 @@ func (si SharedIterations) Run(parentCtx context.Context, out chan<- stats.Sampl }() regDurationDone := regDurationCtx.Done() - runIteration := getIterationRunner(si.executionState, si.incrScenarioIter, si.logger) + runIteration := getIterationRunner(si.executionState, func() { + si.incrScenarioIter() + si.incrGlobalIter() + }, si.logger) maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ - Name: si.config.Name, - Executor: si.config.Type, - StartTime: startTime, - ProgressFn: progressFn, - GetIter: si.getScenarioIter, + Name: si.config.Name, + Executor: si.config.Type, + StartTime: startTime, + ProgressFn: progressFn, + GetIter: si.getScenarioIter, + GetGlobalIter: si.getGlobalIter, }) returnVU := func(u lib.InitializedVU) { diff --git a/lib/executor/shared_iterations_test.go b/lib/executor/shared_iterations_test.go index 8628e96e4b4..0ea001ea2b9 100644 --- a/lib/executor/shared_iterations_test.go +++ b/lib/executor/shared_iterations_test.go @@ -22,6 +22,7 @@ package executor import ( "context" + "fmt" "sync" "sync/atomic" "testing" @@ -33,6 +34,7 @@ import ( "go.k6.io/k6/lib" "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" "go.k6.io/k6/stats" ) @@ -141,3 +143,56 @@ func TestSharedIterationsEmitDroppedIterations(t *testing.T) { assert.Equal(t, int64(5), count) assert.Equal(t, float64(95), sumMetricValues(engineOut, metrics.DroppedIterations.Name)) } + +func TestSharedIterationsGlobalIters(t *testing.T) { + t.Parallel() + + config := &SharedIterationsConfig{ + VUs: null.IntFrom(5), + Iterations: null.IntFrom(50), + MaxDuration: types.NullDurationFrom(1 * time.Second), + } + + testCases := []struct { + seq, seg string + expIters []uint64 + }{ + {"0,1/4,3/4,1", "0:1/4", []uint64{0, 2, 7, 12, 17, 22, 27, 32, 37, 42}}, + {"0,1/4,3/4,1", "1/4:3/4", []uint64{0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25, 26, 28, 30, 31, 33, 35, 36, 38, 40, 41, 43, 45, 46, 48}}, + {"0,1/4,3/4,1", "3/4:1", []uint64{0, 4, 9, 14, 19, 24, 29, 34, 39, 44}}, + } + + for _, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { + ess, err := lib.NewExecutionSegmentSequenceFromString(tc.seq) + require.NoError(t, err) + seg, err := lib.NewExecutionSegmentFromString(tc.seg) + require.NoError(t, err) + et, err := lib.NewExecutionTuple(seg, &ess) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 5, 5) + + runner := &minirunner.MiniRunner{} + ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) + defer cancel() + + gotIters := []uint64{} + var mx sync.Mutex + runner.Fn = func(ctx context.Context, _ chan<- stats.SampleContainer) error { + mx.Lock() + // Slight delay to ensure the lock is held long enough to + // minimize any chances of flakiness... :-/ + time.Sleep(10 * time.Millisecond) + gotIters = append(gotIters, executor.(*SharedIterations).getGlobalIter()) + mx.Unlock() + return nil + } + + engineOut := make(chan stats.SampleContainer, 100) + err = executor.Run(ctx, engineOut) + require.NoError(t, err) + assert.Equal(t, tc.expIters, gotIters) + }) + } +} diff --git a/lib/executors.go b/lib/executors.go index 9832159bc0e..7d8e56851d4 100644 --- a/lib/executors.go +++ b/lib/executors.go @@ -116,6 +116,7 @@ type ScenarioState struct { StartTime time.Time ProgressFn func() (float64, []string) GetIter func() uint64 + GetGlobalIter func() uint64 } // InitVUFunc is just a shorthand so we don't have to type the function From 13dd3f4f2dccc09f79c9d0a6597efd2218c1c27b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Fri, 26 Feb 2021 12:59:09 +0100 Subject: [PATCH 10/22] Add global segmented iterations to ConstantArrivalRate --- lib/executor/constant_arrival_rate.go | 33 ++++++++++--- lib/executor/constant_arrival_rate_test.go | 55 ++++++++++++++++++++++ 2 files changed, 81 insertions(+), 7 deletions(-) diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go index 31a23e91811..e0201790360 100644 --- a/lib/executor/constant_arrival_rate.go +++ b/lib/executor/constant_arrival_rate.go @@ -171,6 +171,7 @@ func (carc ConstantArrivalRateConfig) NewExecutor( return &ConstantArrivalRate{ BaseExecutor: NewBaseExecutor(&carc, es, logger), config: carc, + globalIter: new(uint64), }, nil } @@ -183,8 +184,10 @@ func (carc ConstantArrivalRateConfig) HasWork(et *lib.ExecutionTuple) bool { // specific period. type ConstantArrivalRate struct { *BaseExecutor - config ConstantArrivalRateConfig - et *lib.ExecutionTuple + config ConstantArrivalRateConfig + et *lib.ExecutionTuple + segIdx *lib.SegmentedIndex + globalIter *uint64 } // Make sure we implement the lib.Executor interface. @@ -196,9 +199,24 @@ func (car *ConstantArrivalRate) Init(ctx context.Context) error { // with no work, as determined by their config's HasWork() method. et, err := car.BaseExecutor.executionState.ExecutionTuple.GetNewExecutionTupleFromValue(car.config.MaxVUs.Int64) car.et = et + start, offsets, lcd := et.GetStripedOffsets() + car.segIdx = lib.NewSegmentedIndex(start, lcd, offsets) + return err } +// incrGlobalIter increments the global iteration count for this executor, +// taking into account the configured execution segment. +func (car *ConstantArrivalRate) incrGlobalIter() { + car.segIdx.Next() + atomic.StoreUint64(car.globalIter, uint64(car.segIdx.GetUnscaled())) +} + +// getGlobalIter returns the global iteration count for this executor. +func (car *ConstantArrivalRate) getGlobalIter() uint64 { + return atomic.LoadUint64(car.globalIter) +} + // Run executes a constant number of iterations per second. // // TODO: Split this up and make an independent component that can be reused @@ -268,11 +286,12 @@ func (car ConstantArrivalRate) Run(parentCtx context.Context, out chan<- stats.S go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, &car, progressFn) maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ - Name: car.config.Name, - Executor: car.config.Type, - StartTime: startTime, - ProgressFn: progressFn, - GetIter: car.getScenarioIter, + Name: car.config.Name, + Executor: car.config.Type, + StartTime: startTime, + ProgressFn: progressFn, + GetIter: car.getScenarioIter, + GetGlobalIter: car.getGlobalIter, }) returnVU := func(u lib.InitializedVU) { diff --git a/lib/executor/constant_arrival_rate_test.go b/lib/executor/constant_arrival_rate_test.go index 57d4c58b4d9..3cb164705cf 100644 --- a/lib/executor/constant_arrival_rate_test.go +++ b/lib/executor/constant_arrival_rate_test.go @@ -35,6 +35,7 @@ import ( "go.k6.io/k6/lib" "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" "go.k6.io/k6/stats" ) @@ -335,3 +336,57 @@ func TestConstantArrivalRateDroppedIterations(t *testing.T) { assert.Equal(t, int64(5), count) assert.Equal(t, float64(5), sumMetricValues(engineOut, metrics.DroppedIterations.Name)) } + +func TestConstantArrivalRateGlobalIters(t *testing.T) { + t.Parallel() + + config := &ConstantArrivalRateConfig{ + BaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(100 * time.Millisecond)}, + TimeUnit: types.NullDurationFrom(950 * time.Millisecond), + Rate: null.IntFrom(20), + Duration: types.NullDurationFrom(1 * time.Second), + PreAllocatedVUs: null.IntFrom(5), + MaxVUs: null.IntFrom(5), + } + + testCases := []struct { + seq, seg string + expIters []uint64 + }{ + // 19 is missing?? o.O + {"0,1/4,3/4,1", "0:1/4", []uint64{0, 2, 7, 12, 17}}, + {"0,1/4,3/4,1", "1/4:3/4", []uint64{0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20}}, + {"0,1/4,3/4,1", "3/4:1", []uint64{0, 4, 9, 14}}, + } + + for _, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { + ess, err := lib.NewExecutionSegmentSequenceFromString(tc.seq) + require.NoError(t, err) + seg, err := lib.NewExecutionSegmentFromString(tc.seg) + require.NoError(t, err) + et, err := lib.NewExecutionTuple(seg, &ess) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 5, 5) + + runner := &minirunner.MiniRunner{} + ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) + defer cancel() + + gotIters := []uint64{} + var mx sync.Mutex + runner.Fn = func(ctx context.Context, _ chan<- stats.SampleContainer) error { + mx.Lock() + gotIters = append(gotIters, executor.(*ConstantArrivalRate).getGlobalIter()) + mx.Unlock() + return nil + } + + engineOut := make(chan stats.SampleContainer, 100) + err = executor.Run(ctx, engineOut) + require.NoError(t, err) + assert.Equal(t, tc.expIters, gotIters) + }) + } +} From d46a1bf27343d695d04487ed15f8233d56120b8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Fri, 26 Feb 2021 10:42:29 +0100 Subject: [PATCH 11/22] Add global segmented iterations to RampingArrivalRate --- lib/executor/ramping_arrival_rate.go | 53 +++++++++++++++---- lib/executor/ramping_arrival_rate_test.go | 63 +++++++++++++++++++++++ 2 files changed, 106 insertions(+), 10 deletions(-) diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index 27489880b03..c8fb0de42b3 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -158,9 +158,10 @@ func (varc RampingArrivalRateConfig) GetExecutionRequirements(et *lib.ExecutionT func (varc RampingArrivalRateConfig) NewExecutor( es *lib.ExecutionState, logger *logrus.Entry, ) (lib.Executor, error) { - return RampingArrivalRate{ + return &RampingArrivalRate{ BaseExecutor: NewBaseExecutor(&varc, es, logger), config: varc, + globalIter: new(uint64), }, nil } @@ -174,12 +175,39 @@ func (varc RampingArrivalRateConfig) HasWork(et *lib.ExecutionTuple) bool { // TODO: combine with the ConstantArrivalRate? type RampingArrivalRate struct { *BaseExecutor - config RampingArrivalRateConfig + config RampingArrivalRateConfig + et *lib.ExecutionTuple + segIdx *lib.SegmentedIndex + globalIter *uint64 } // Make sure we implement the lib.Executor interface. var _ lib.Executor = &RampingArrivalRate{} +// Init values needed for the execution +func (varr *RampingArrivalRate) Init(ctx context.Context) error { + // err should always be nil, because Init() won't be called for executors + // with no work, as determined by their config's HasWork() method. + et, err := varr.BaseExecutor.executionState.ExecutionTuple.GetNewExecutionTupleFromValue(varr.config.MaxVUs.Int64) + varr.et = et + start, offsets, lcd := et.GetStripedOffsets() + varr.segIdx = lib.NewSegmentedIndex(start, lcd, offsets) + + return err +} + +// incrGlobalIter increments the global iteration count for this executor, +// taking into account the configured execution segment. +func (varr *RampingArrivalRate) incrGlobalIter() { + varr.segIdx.Next() + atomic.StoreUint64(varr.globalIter, uint64(varr.segIdx.GetUnscaled())) +} + +// getGlobalIter returns the global iteration count for this executor. +func (varr *RampingArrivalRate) getGlobalIter() uint64 { + return atomic.LoadUint64(varr.globalIter) +} + // cal calculates the transtitions between stages and gives the next full value produced by the // stages. In this explanation we are talking about events and in practice those events are starting // of an iteration, but could really be anything that needs to occur at a constant or linear rate. @@ -377,14 +405,15 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.S } varr.progress.Modify(pb.WithProgress(progressFn)) - go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, varr, progressFn) + go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, &varr, progressFn) maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ - Name: varr.config.Name, - Executor: varr.config.Type, - StartTime: startTime, - ProgressFn: progressFn, - GetIter: varr.getScenarioIter, + Name: varr.config.Name, + Executor: varr.config.Type, + StartTime: startTime, + ProgressFn: progressFn, + GetIter: varr.getScenarioIter, + GetGlobalIter: varr.getGlobalIter, }) returnVU := func(u lib.InitializedVU) { @@ -392,7 +421,11 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.S activeVUsWg.Done() } - runIterationBasic := getIterationRunner(varr.executionState, varr.incrScenarioIter, varr.logger) + runIterationBasic := getIterationRunner(varr.executionState, func() { + varr.incrScenarioIter() + varr.incrGlobalIter() + }, varr.logger) + activateVU := func(initVU lib.InitializedVU) lib.ActiveVU { activeVUsWg.Add(1) activeVU := initVU.Activate( @@ -439,7 +472,7 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.S var prevTime time.Duration shownWarning := false metricTags := varr.getMetricTags(nil) - go varr.config.cal(varr.executionState.ExecutionTuple, ch) + go varr.config.cal(varr.et, ch) for nextTime := range ch { select { case <-regDurationDone: diff --git a/lib/executor/ramping_arrival_rate_test.go b/lib/executor/ramping_arrival_rate_test.go index 1c289938793..bacd880ac6a 100644 --- a/lib/executor/ramping_arrival_rate_test.go +++ b/lib/executor/ramping_arrival_rate_test.go @@ -36,6 +36,7 @@ import ( "go.k6.io/k6/lib" "go.k6.io/k6/lib/metrics" + "go.k6.io/k6/lib/testutils/minirunner" "go.k6.io/k6/lib/types" "go.k6.io/k6/stats" ) @@ -686,3 +687,65 @@ func (varc RampingArrivalRateConfig) calRat(et *lib.ExecutionTuple, ch chan<- ti base += time.Duration(stage.Duration.Duration) } } + +func TestRampingArrivalRateGlobalIters(t *testing.T) { + t.Parallel() + + config := &RampingArrivalRateConfig{ + BaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(100 * time.Millisecond)}, + TimeUnit: types.NullDurationFrom(950 * time.Millisecond), + StartRate: null.IntFrom(0), + PreAllocatedVUs: null.IntFrom(2), + MaxVUs: null.IntFrom(5), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(20), + }, + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(0), + }, + }, + } + + testCases := []struct { + seq, seg string + expIters []uint64 + }{ + {"0,1/4,3/4,1", "0:1/4", []uint64{0, 2, 7, 12}}, + {"0,1/4,3/4,1", "1/4:3/4", []uint64{0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20}}, + {"0,1/4,3/4,1", "3/4:1", []uint64{0, 4, 9}}, + } + + for _, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { + ess, err := lib.NewExecutionSegmentSequenceFromString(tc.seq) + require.NoError(t, err) + seg, err := lib.NewExecutionSegmentFromString(tc.seg) + require.NoError(t, err) + et, err := lib.NewExecutionTuple(seg, &ess) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 5, 5) + + runner := &minirunner.MiniRunner{} + ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) + defer cancel() + + gotIters := []uint64{} + var mx sync.Mutex + runner.Fn = func(ctx context.Context, _ chan<- stats.SampleContainer) error { + mx.Lock() + gotIters = append(gotIters, executor.(*RampingArrivalRate).getGlobalIter()) + mx.Unlock() + return nil + } + + engineOut := make(chan stats.SampleContainer, 100) + err = executor.Run(ctx, engineOut) + require.NoError(t, err) + assert.Equal(t, tc.expIters, gotIters) + }) + } +} From 8a58c0ce2bdad8520be6c3524456bb9649da966c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Mon, 12 Apr 2021 17:01:03 +0200 Subject: [PATCH 12/22] Fix and silence some linter issues --- lib/execution_segment_test.go | 5 +++++ lib/executor/common_test.go | 2 +- lib/executor/constant_arrival_rate_test.go | 3 ++- lib/executor/ramping_arrival_rate.go | 2 +- lib/executor/ramping_arrival_rate_test.go | 7 ++++--- lib/executor/shared_iterations_test.go | 3 ++- 6 files changed, 15 insertions(+), 7 deletions(-) diff --git a/lib/execution_segment_test.go b/lib/execution_segment_test.go index a17665ee5e3..07f490841e8 100644 --- a/lib/execution_segment_test.go +++ b/lib/execution_segment_test.go @@ -926,8 +926,10 @@ func BenchmarkExecutionSegmentScale(b *testing.B) { // TODO: test with randomized things func TestSegmentedIndex(t *testing.T) { + t.Parallel() // TODO ... more structure ? t.Run("full", func(t *testing.T) { + t.Parallel() s := SegmentedIndex{start: 0, lcd: 1, offsets: []int64{1}} s.Next() @@ -964,6 +966,7 @@ func TestSegmentedIndex(t *testing.T) { }) t.Run("half", func(t *testing.T) { + t.Parallel() s := SegmentedIndex{start: 0, lcd: 2, offsets: []int64{2}} s.Next() @@ -1004,6 +1007,7 @@ func TestSegmentedIndex(t *testing.T) { }) t.Run("the other half", func(t *testing.T) { + t.Parallel() s := SegmentedIndex{start: 1, lcd: 2, offsets: []int64{2}} s.Next() @@ -1044,6 +1048,7 @@ func TestSegmentedIndex(t *testing.T) { }) t.Run("strange", func(t *testing.T) { + t.Parallel() s := SegmentedIndex{start: 1, lcd: 7, offsets: []int64{4, 3}} s.Next() diff --git a/lib/executor/common_test.go b/lib/executor/common_test.go index d4fbcba1575..a2aba342460 100644 --- a/lib/executor/common_test.go +++ b/lib/executor/common_test.go @@ -55,7 +55,7 @@ func setupExecutor(t testing.TB, config lib.ExecutorConfig, es *lib.ExecutionSta logEntry := logrus.NewEntry(testLog) initVUFunc := func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { - return runner.NewVU(es.GetUniqueVUIdentifier(), engineOut) + return runner.NewVU(es.GetUniqueVUIdentifier(), engineOut) //nolint: wrapcheck } es.SetInitVUFunc(initVUFunc) diff --git a/lib/executor/constant_arrival_rate_test.go b/lib/executor/constant_arrival_rate_test.go index 3cb164705cf..347bd0fd25f 100644 --- a/lib/executor/constant_arrival_rate_test.go +++ b/lib/executor/constant_arrival_rate_test.go @@ -359,9 +359,10 @@ func TestConstantArrivalRateGlobalIters(t *testing.T) { {"0,1/4,3/4,1", "3/4:1", []uint64{0, 4, 9, 14}}, } - for _, tc := range testCases { + for _, tc := range testCases { //nolint: paralleltest // false positive: https://github.com/kunwardeep/paralleltest/issues/8 tc := tc t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { + t.Parallel() ess, err := lib.NewExecutionSegmentSequenceFromString(tc.seq) require.NoError(t, err) seg, err := lib.NewExecutionSegmentFromString(tc.seg) diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index c8fb0de42b3..e3bd095650f 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -193,7 +193,7 @@ func (varr *RampingArrivalRate) Init(ctx context.Context) error { start, offsets, lcd := et.GetStripedOffsets() varr.segIdx = lib.NewSegmentedIndex(start, lcd, offsets) - return err + return err //nolint: wrapcheck } // incrGlobalIter increments the global iteration count for this executor, diff --git a/lib/executor/ramping_arrival_rate_test.go b/lib/executor/ramping_arrival_rate_test.go index bacd880ac6a..be834b89339 100644 --- a/lib/executor/ramping_arrival_rate_test.go +++ b/lib/executor/ramping_arrival_rate_test.go @@ -185,7 +185,7 @@ func TestRampingArrivalRateRunUnplannedVUs(t *testing.T) { time.Sleep(time.Millisecond * 200) cur = atomic.LoadInt64(&count) require.NotEqual(t, cur, int64(2)) - return runner.NewVU(es.GetUniqueVUIdentifier(), engineOut) + return runner.NewVU(es.GetUniqueVUIdentifier(), engineOut) //nolint: wrapcheck }) err = executor.Run(ctx, engineOut) assert.NoError(t, err) @@ -235,7 +235,7 @@ func TestRampingArrivalRateRunCorrectRateWithSlowRate(t *testing.T) { cur = atomic.LoadInt64(&count) require.NotEqual(t, cur, int64(1)) - return runner.NewVU(es.GetUniqueVUIdentifier(), engineOut) + return runner.NewVU(es.GetUniqueVUIdentifier(), engineOut) //nolint: wrapcheck }) err = executor.Run(ctx, engineOut) assert.NoError(t, err) @@ -718,9 +718,10 @@ func TestRampingArrivalRateGlobalIters(t *testing.T) { {"0,1/4,3/4,1", "3/4:1", []uint64{0, 4, 9}}, } - for _, tc := range testCases { + for _, tc := range testCases { //nolint: paralleltest // false positive: https://github.com/kunwardeep/paralleltest/issues/8 tc := tc t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { + t.Parallel() ess, err := lib.NewExecutionSegmentSequenceFromString(tc.seq) require.NoError(t, err) seg, err := lib.NewExecutionSegmentFromString(tc.seg) diff --git a/lib/executor/shared_iterations_test.go b/lib/executor/shared_iterations_test.go index 0ea001ea2b9..1060cfe2f26 100644 --- a/lib/executor/shared_iterations_test.go +++ b/lib/executor/shared_iterations_test.go @@ -162,9 +162,10 @@ func TestSharedIterationsGlobalIters(t *testing.T) { {"0,1/4,3/4,1", "3/4:1", []uint64{0, 4, 9, 14, 19, 24, 29, 34, 39, 44}}, } - for _, tc := range testCases { + for _, tc := range testCases { //nolint: paralleltest // false positive: https://github.com/kunwardeep/paralleltest/issues/8 tc := tc t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { + t.Parallel() ess, err := lib.NewExecutionSegmentSequenceFromString(tc.seq) require.NoError(t, err) seg, err := lib.NewExecutionSegmentFromString(tc.seg) From bb9761c4b16d961a12be3b07f6c163160324a03c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Fri, 14 May 2021 15:35:34 +0200 Subject: [PATCH 13/22] Remove redundant nolint comments This is now disabled globally in .golangci.yml. --- lib/executor/constant_arrival_rate_test.go | 2 +- lib/executor/ramping_arrival_rate_test.go | 2 +- lib/executor/shared_iterations_test.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/executor/constant_arrival_rate_test.go b/lib/executor/constant_arrival_rate_test.go index 347bd0fd25f..2d6f02c2e05 100644 --- a/lib/executor/constant_arrival_rate_test.go +++ b/lib/executor/constant_arrival_rate_test.go @@ -359,7 +359,7 @@ func TestConstantArrivalRateGlobalIters(t *testing.T) { {"0,1/4,3/4,1", "3/4:1", []uint64{0, 4, 9, 14}}, } - for _, tc := range testCases { //nolint: paralleltest // false positive: https://github.com/kunwardeep/paralleltest/issues/8 + for _, tc := range testCases { tc := tc t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { t.Parallel() diff --git a/lib/executor/ramping_arrival_rate_test.go b/lib/executor/ramping_arrival_rate_test.go index be834b89339..873b8d5c890 100644 --- a/lib/executor/ramping_arrival_rate_test.go +++ b/lib/executor/ramping_arrival_rate_test.go @@ -718,7 +718,7 @@ func TestRampingArrivalRateGlobalIters(t *testing.T) { {"0,1/4,3/4,1", "3/4:1", []uint64{0, 4, 9}}, } - for _, tc := range testCases { //nolint: paralleltest // false positive: https://github.com/kunwardeep/paralleltest/issues/8 + for _, tc := range testCases { tc := tc t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { t.Parallel() diff --git a/lib/executor/shared_iterations_test.go b/lib/executor/shared_iterations_test.go index 1060cfe2f26..580f192405a 100644 --- a/lib/executor/shared_iterations_test.go +++ b/lib/executor/shared_iterations_test.go @@ -162,7 +162,7 @@ func TestSharedIterationsGlobalIters(t *testing.T) { {"0,1/4,3/4,1", "3/4:1", []uint64{0, 4, 9, 14, 19, 24, 29, 34, 39, 44}}, } - for _, tc := range testCases { //nolint: paralleltest // false positive: https://github.com/kunwardeep/paralleltest/issues/8 + for _, tc := range testCases { tc := tc t.Run(fmt.Sprintf("%s_%s", tc.seq, tc.seg), func(t *testing.T) { t.Parallel() From 0fdb1c2a35184431c89813dc2da5cc10c47bbe85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Fri, 28 May 2021 11:36:21 +0200 Subject: [PATCH 14/22] Make scenario iteration local to VUs, fix iteration increment, start at 0 This moves the iteration increment and setting of __ITER outside of runFn() so it will no longer be defined in setup() and teardown(). Apologies for the large commit. I worked simultaneously on these changes and it would be difficult to split. --- js/runner.go | 33 +++++------ lib/executor/base_executor.go | 15 ++--- lib/executor/constant_arrival_rate.go | 32 +++++----- lib/executor/constant_arrival_rate_test.go | 11 ++-- lib/executor/constant_vus.go | 6 +- lib/executor/externally_controlled.go | 3 +- lib/executor/helpers.go | 9 ++- lib/executor/per_vu_iterations.go | 6 +- lib/executor/ramping_arrival_rate.go | 37 ++++++------ lib/executor/ramping_arrival_rate_test.go | 10 ++-- lib/executor/ramping_vus.go | 3 +- lib/executor/shared_iterations.go | 36 ++++++------ lib/executor/shared_iterations_test.go | 19 +++--- lib/executor/vu_handle.go | 2 +- lib/executor/vu_handle_test.go | 65 ++++++++------------- lib/executors.go | 2 - lib/netext/httpext/request.go | 2 +- lib/runner.go | 2 + lib/state.go | 68 +++++++++++++++++----- lib/testutils/minirunner/minirunner.go | 23 ++++---- 20 files changed, 209 insertions(+), 175 deletions(-) diff --git a/js/runner.go b/js/runner.go index c429be30551..0b12820a262 100644 --- a/js/runner.go +++ b/js/runner.go @@ -230,7 +230,6 @@ func (r *Runner) newVU(id uint64, samplesOut chan<- stats.SampleContainer) (*VU, BPool: vu.BPool, Vu: vu.ID, Samples: vu.Samples, - Iteration: vu.Iteration, Tags: vu.Runner.Bundle.Options.RunTags.CloneTags(), Group: r.defaultGroup, } @@ -527,12 +526,12 @@ func (r *Runner) getTimeoutFor(stage string) time.Duration { type VU struct { BundleInstance - Runner *Runner - Transport *http.Transport - Dialer *netext.Dialer - CookieJar *cookiejar.Jar - TLSConfig *tls.Config - ID, Iteration uint64 + Runner *Runner + Transport *http.Transport + Dialer *netext.Dialer + CookieJar *cookiejar.Jar + TLSConfig *tls.Config + ID uint64 Console *console BPool *bpool.BufferPool @@ -590,7 +589,7 @@ func (u *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { u.state.Tags["vu"] = strconv.FormatUint(u.ID, 10) } if opts.SystemTags.Has(stats.TagIter) { - u.state.Tags["iter"] = strconv.FormatUint(u.Iteration, 10) + u.state.Tags["iter"] = strconv.FormatInt(u.state.GetIteration(), 10) } if opts.SystemTags.Has(stats.TagGroup) { u.state.Tags["group"] = u.state.Group.Path @@ -610,6 +609,9 @@ func (u *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { } } + u.state.IncrScIter = params.IncrScIter + u.state.IncrScIterGlobal = params.IncrScIterGlobal + avu := &ActiveVU{ VU: u, VUActivationParams: params, @@ -665,6 +667,11 @@ func (u *ActiveVU) RunOnce() error { panic(fmt.Sprintf("function '%s' not found in exports", u.Exec)) } + u.state.IncrIteration() + if err := u.Runtime.Set("__ITER", u.state.GetIteration()); err != nil { + panic(fmt.Errorf("error setting __ITER in goja runtime: %w", err)) + } + // Call the exported function. _, isFullIteration, totalTime, err := u.runFn(u.RunContext, true, fn, u.setupData) @@ -695,17 +702,9 @@ func (u *VU) runFn( opts := &u.Runner.Bundle.Options if opts.SystemTags.Has(stats.TagIter) { - u.state.Tags["iter"] = strconv.FormatUint(u.Iteration, 10) + u.state.Tags["iter"] = strconv.FormatInt(u.state.GetIteration(), 10) } - // TODO: this seems like the wrong place for the iteration incrementation - // also this means that teardown and setup have __ITER defined - // maybe move it to RunOnce ? - u.Runtime.Set("__ITER", u.Iteration) - u.Iteration++ - u.state.Iteration = u.Iteration - u.state.IncrScenarioVUIter() - defer func() { if r := recover(); r != nil { gojaStack := u.Runtime.CaptureCallStack(20, nil) diff --git a/lib/executor/base_executor.go b/lib/executor/base_executor.go index 73ea026e891..b43e8f3e5a5 100644 --- a/lib/executor/base_executor.go +++ b/lib/executor/base_executor.go @@ -40,18 +40,20 @@ type BaseExecutor struct { config lib.ExecutorConfig executionState *lib.ExecutionState localVUID *uint64 // counter for assigning executor-specific VU IDs - localIters *uint64 // counter for keeping track of all VU iterations completed by this executor + localIter *int64 // counter for keeping track of all VU iterations completed by this executor logger *logrus.Entry progress *pb.ProgressBar } // NewBaseExecutor returns an initialized BaseExecutor func NewBaseExecutor(config lib.ExecutorConfig, es *lib.ExecutionState, logger *logrus.Entry) *BaseExecutor { + // Start at -1 so that the first iteration can be 0 + startLocalIter := int64(-1) return &BaseExecutor{ config: config, executionState: es, localVUID: new(uint64), - localIters: new(uint64), + localIter: &startLocalIter, logger: logger, progress: pb.New( pb.WithLeft(config.GetName), @@ -100,13 +102,8 @@ func (bs BaseExecutor) getMetricTags(vuID *uint64) *stats.SampleTags { return stats.IntoSampleTags(&tags) } -// getScenarioIter returns the completed iterations by all VUs for this executor. -func (bs *BaseExecutor) getScenarioIter() uint64 { - return atomic.LoadUint64(bs.localIters) -} - // incrScenarioIter increments the counter of completed iterations by all VUs // for this executor. -func (bs *BaseExecutor) incrScenarioIter() { - atomic.AddUint64(bs.localIters, 1) +func (bs *BaseExecutor) incrScenarioIter() int64 { + return atomic.AddInt64(bs.localIter, 1) } diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go index e0201790360..40eaf374693 100644 --- a/lib/executor/constant_arrival_rate.go +++ b/lib/executor/constant_arrival_rate.go @@ -168,10 +168,11 @@ func (carc ConstantArrivalRateConfig) GetExecutionRequirements(et *lib.Execution func (carc ConstantArrivalRateConfig) NewExecutor( es *lib.ExecutionState, logger *logrus.Entry, ) (lib.Executor, error) { + startGlobalIter := int64(-1) return &ConstantArrivalRate{ BaseExecutor: NewBaseExecutor(&carc, es, logger), config: carc, - globalIter: new(uint64), + globalIter: &startGlobalIter, }, nil } @@ -187,7 +188,8 @@ type ConstantArrivalRate struct { config ConstantArrivalRateConfig et *lib.ExecutionTuple segIdx *lib.SegmentedIndex - globalIter *uint64 + iterMx sync.Mutex + globalIter *int64 } // Make sure we implement the lib.Executor interface. @@ -207,14 +209,17 @@ func (car *ConstantArrivalRate) Init(ctx context.Context) error { // incrGlobalIter increments the global iteration count for this executor, // taking into account the configured execution segment. -func (car *ConstantArrivalRate) incrGlobalIter() { +func (car *ConstantArrivalRate) incrGlobalIter() int64 { + car.iterMx.Lock() + defer car.iterMx.Unlock() car.segIdx.Next() - atomic.StoreUint64(car.globalIter, uint64(car.segIdx.GetUnscaled())) + atomic.StoreInt64(car.globalIter, car.segIdx.GetUnscaled()-1) + return atomic.LoadInt64(car.globalIter) } // getGlobalIter returns the global iteration count for this executor. -func (car *ConstantArrivalRate) getGlobalIter() uint64 { - return atomic.LoadUint64(car.globalIter) +func (car *ConstantArrivalRate) getGlobalIter() int64 { + return atomic.LoadInt64(car.globalIter) } // Run executes a constant number of iterations per second. @@ -286,12 +291,10 @@ func (car ConstantArrivalRate) Run(parentCtx context.Context, out chan<- stats.S go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, &car, progressFn) maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ - Name: car.config.Name, - Executor: car.config.Type, - StartTime: startTime, - ProgressFn: progressFn, - GetIter: car.getScenarioIter, - GetGlobalIter: car.getGlobalIter, + Name: car.config.Name, + Executor: car.config.Type, + StartTime: startTime, + ProgressFn: progressFn, }) returnVU := func(u lib.InitializedVU) { @@ -299,11 +302,12 @@ func (car ConstantArrivalRate) Run(parentCtx context.Context, out chan<- stats.S activeVUsWg.Done() } - runIterationBasic := getIterationRunner(car.executionState, car.incrScenarioIter, car.logger) + runIterationBasic := getIterationRunner(car.executionState, car.logger) activateVU := func(initVU lib.InitializedVU) lib.ActiveVU { activeVUsWg.Add(1) activeVU := initVU.Activate(getVUActivationParams( - maxDurationCtx, car.config.BaseConfig, returnVU, car.GetNextLocalVUID)) + maxDurationCtx, car.config.BaseConfig, returnVU, + car.GetNextLocalVUID, car.incrScenarioIter, car.incrGlobalIter)) car.executionState.ModCurrentlyActiveVUsCount(+1) atomic.AddUint64(&activeVUsCount, 1) vusPool.AddVU(maxDurationCtx, activeVU, runIterationBasic) diff --git a/lib/executor/constant_arrival_rate_test.go b/lib/executor/constant_arrival_rate_test.go index 2d6f02c2e05..741b0a158ac 100644 --- a/lib/executor/constant_arrival_rate_test.go +++ b/lib/executor/constant_arrival_rate_test.go @@ -351,12 +351,11 @@ func TestConstantArrivalRateGlobalIters(t *testing.T) { testCases := []struct { seq, seg string - expIters []uint64 + expIters []int64 }{ - // 19 is missing?? o.O - {"0,1/4,3/4,1", "0:1/4", []uint64{0, 2, 7, 12, 17}}, - {"0,1/4,3/4,1", "1/4:3/4", []uint64{0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20}}, - {"0,1/4,3/4,1", "3/4:1", []uint64{0, 4, 9, 14}}, + {"0,1/4,3/4,1", "0:1/4", []int64{1, 6, 11, 16, 21}}, + {"0,1/4,3/4,1", "1/4:3/4", []int64{0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20}}, + {"0,1/4,3/4,1", "3/4:1", []int64{3, 8, 13, 18}}, } for _, tc := range testCases { @@ -375,7 +374,7 @@ func TestConstantArrivalRateGlobalIters(t *testing.T) { ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) defer cancel() - gotIters := []uint64{} + gotIters := []int64{} var mx sync.Mutex runner.Fn = func(ctx context.Context, _ chan<- stats.SampleContainer) error { mx.Lock() diff --git a/lib/executor/constant_vus.go b/lib/executor/constant_vus.go index b038b6047e9..43f666c66c3 100644 --- a/lib/executor/constant_vus.go +++ b/lib/executor/constant_vus.go @@ -174,14 +174,13 @@ func (clv ConstantVUs) Run(parentCtx context.Context, out chan<- stats.SampleCon defer activeVUs.Wait() regDurationDone := regDurationCtx.Done() - runIteration := getIterationRunner(clv.executionState, clv.incrScenarioIter, clv.logger) + runIteration := getIterationRunner(clv.executionState, clv.logger) maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ Name: clv.config.Name, Executor: clv.config.Type, StartTime: startTime, ProgressFn: progressFn, - GetIter: clv.getScenarioIter, }) returnVU := func(u lib.InitializedVU) { @@ -194,7 +193,8 @@ func (clv ConstantVUs) Run(parentCtx context.Context, out chan<- stats.SampleCon defer cancel() activeVU := initVU.Activate( - getVUActivationParams(ctx, clv.config.BaseConfig, returnVU, clv.GetNextLocalVUID)) + getVUActivationParams(ctx, clv.config.BaseConfig, returnVU, + clv.GetNextLocalVUID, clv.incrScenarioIter, nil)) for { select { diff --git a/lib/executor/externally_controlled.go b/lib/executor/externally_controlled.go index 481c0f07160..97a6bfb3d9a 100644 --- a/lib/executor/externally_controlled.go +++ b/lib/executor/externally_controlled.go @@ -530,7 +530,7 @@ func (mex *ExternallyControlled) Run(parentCtx context.Context, out chan<- stats currentlyPaused: false, activeVUsCount: new(int64), maxVUs: new(int64), - runIteration: getIterationRunner(mex.executionState, mex.incrScenarioIter, mex.logger), + runIteration: getIterationRunner(mex.executionState, mex.logger), } *runState.maxVUs = startMaxVUs if err = runState.retrieveStartMaxVUs(); err != nil { @@ -542,7 +542,6 @@ func (mex *ExternallyControlled) Run(parentCtx context.Context, out chan<- stats Executor: mex.config.Type, StartTime: time.Now(), ProgressFn: runState.progressFn, - GetIter: mex.getScenarioIter, }) mex.progress.Modify(pb.WithProgress(runState.progressFn)) // Keep track of the progress diff --git a/lib/executor/helpers.go b/lib/executor/helpers.go index 11961eba8ba..81ec1777e6e 100644 --- a/lib/executor/helpers.go +++ b/lib/executor/helpers.go @@ -82,7 +82,7 @@ func validateStages(stages []Stage) []error { // // TODO: emit the end-of-test iteration metrics here (https://github.com/k6io/k6/issues/1250) func getIterationRunner( - executionState *lib.ExecutionState, incrIter func(), logger *logrus.Entry, + executionState *lib.ExecutionState, logger *logrus.Entry, ) func(context.Context, lib.ActiveVU) bool { return func(ctx context.Context, vu lib.ActiveVU) bool { err := vu.RunOnce() @@ -110,7 +110,6 @@ func getIterationRunner( // TODO: move emission of end-of-iteration metrics here? executionState.AddFullIterations(1) - incrIter() return true } } @@ -223,8 +222,10 @@ func getArrivalRatePerSec(scaledArrivalRate *big.Rat) *big.Rat { return perSecRate.Mul(perSecRate, scaledArrivalRate) } +// TODO: Refactor this, maybe move all scenario things to an embedded struct? func getVUActivationParams( - ctx context.Context, conf BaseConfig, deactivateCallback func(lib.InitializedVU), getScenarioVUID func() uint64, + ctx context.Context, conf BaseConfig, deactivateCallback func(lib.InitializedVU), + getScenarioVUID func() uint64, incrScIter func() int64, incrScIterGlobal func() int64, ) *lib.VUActivationParams { return &lib.VUActivationParams{ RunContext: ctx, @@ -234,5 +235,7 @@ func getVUActivationParams( Tags: conf.GetTags(), DeactivateCallback: deactivateCallback, GetScenarioVUID: getScenarioVUID, + IncrScIter: incrScIter, + IncrScIterGlobal: incrScIterGlobal, } } diff --git a/lib/executor/per_vu_iterations.go b/lib/executor/per_vu_iterations.go index c040689dc9d..f019478d5b7 100644 --- a/lib/executor/per_vu_iterations.go +++ b/lib/executor/per_vu_iterations.go @@ -198,14 +198,13 @@ func (pvi PerVUIterations) Run(parentCtx context.Context, out chan<- stats.Sampl defer activeVUs.Wait() regDurationDone := regDurationCtx.Done() - runIteration := getIterationRunner(pvi.executionState, pvi.incrScenarioIter, pvi.logger) + runIteration := getIterationRunner(pvi.executionState, pvi.logger) maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ Name: pvi.config.Name, Executor: pvi.config.Type, StartTime: startTime, ProgressFn: progressFn, - GetIter: pvi.getScenarioIter, }) returnVU := func(u lib.InitializedVU) { @@ -220,7 +219,8 @@ func (pvi PerVUIterations) Run(parentCtx context.Context, out chan<- stats.Sampl vuID := initVU.GetID() activeVU := initVU.Activate( - getVUActivationParams(ctx, pvi.config.BaseConfig, returnVU, pvi.GetNextLocalVUID)) + getVUActivationParams(ctx, pvi.config.BaseConfig, returnVU, + pvi.GetNextLocalVUID, pvi.incrScenarioIter, nil)) for i := int64(0); i < iterations; i++ { select { diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index e3bd095650f..7226c807945 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -158,10 +158,11 @@ func (varc RampingArrivalRateConfig) GetExecutionRequirements(et *lib.ExecutionT func (varc RampingArrivalRateConfig) NewExecutor( es *lib.ExecutionState, logger *logrus.Entry, ) (lib.Executor, error) { + startGlobalIter := int64(-1) return &RampingArrivalRate{ BaseExecutor: NewBaseExecutor(&varc, es, logger), config: varc, - globalIter: new(uint64), + globalIter: &startGlobalIter, }, nil } @@ -178,7 +179,8 @@ type RampingArrivalRate struct { config RampingArrivalRateConfig et *lib.ExecutionTuple segIdx *lib.SegmentedIndex - globalIter *uint64 + iterMx sync.Mutex + globalIter *int64 } // Make sure we implement the lib.Executor interface. @@ -198,14 +200,17 @@ func (varr *RampingArrivalRate) Init(ctx context.Context) error { // incrGlobalIter increments the global iteration count for this executor, // taking into account the configured execution segment. -func (varr *RampingArrivalRate) incrGlobalIter() { +func (varr *RampingArrivalRate) incrGlobalIter() int64 { + varr.iterMx.Lock() + defer varr.iterMx.Unlock() varr.segIdx.Next() - atomic.StoreUint64(varr.globalIter, uint64(varr.segIdx.GetUnscaled())) + atomic.StoreInt64(varr.globalIter, varr.segIdx.GetUnscaled()-1) + return atomic.LoadInt64(varr.globalIter) } // getGlobalIter returns the global iteration count for this executor. -func (varr *RampingArrivalRate) getGlobalIter() uint64 { - return atomic.LoadUint64(varr.globalIter) +func (varr *RampingArrivalRate) getGlobalIter() int64 { + return atomic.LoadInt64(varr.globalIter) } // cal calculates the transtitions between stages and gives the next full value produced by the @@ -408,12 +413,10 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.S go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, &varr, progressFn) maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ - Name: varr.config.Name, - Executor: varr.config.Type, - StartTime: startTime, - ProgressFn: progressFn, - GetIter: varr.getScenarioIter, - GetGlobalIter: varr.getGlobalIter, + Name: varr.config.Name, + Executor: varr.config.Type, + StartTime: startTime, + ProgressFn: progressFn, }) returnVU := func(u lib.InitializedVU) { @@ -421,15 +424,15 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.S activeVUsWg.Done() } - runIterationBasic := getIterationRunner(varr.executionState, func() { - varr.incrScenarioIter() - varr.incrGlobalIter() - }, varr.logger) + runIterationBasic := getIterationRunner(varr.executionState, varr.logger) activateVU := func(initVU lib.InitializedVU) lib.ActiveVU { activeVUsWg.Add(1) activeVU := initVU.Activate( - getVUActivationParams(maxDurationCtx, varr.config.BaseConfig, returnVU, varr.GetNextLocalVUID)) + getVUActivationParams( + maxDurationCtx, varr.config.BaseConfig, returnVU, + varr.GetNextLocalVUID, varr.incrScenarioIter, + varr.incrGlobalIter)) varr.executionState.ModCurrentlyActiveVUsCount(+1) atomic.AddUint64(&activeVUsCount, 1) diff --git a/lib/executor/ramping_arrival_rate_test.go b/lib/executor/ramping_arrival_rate_test.go index 873b8d5c890..b2b5f6bf9c8 100644 --- a/lib/executor/ramping_arrival_rate_test.go +++ b/lib/executor/ramping_arrival_rate_test.go @@ -711,11 +711,11 @@ func TestRampingArrivalRateGlobalIters(t *testing.T) { testCases := []struct { seq, seg string - expIters []uint64 + expIters []int64 }{ - {"0,1/4,3/4,1", "0:1/4", []uint64{0, 2, 7, 12}}, - {"0,1/4,3/4,1", "1/4:3/4", []uint64{0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20}}, - {"0,1/4,3/4,1", "3/4:1", []uint64{0, 4, 9}}, + {"0,1/4,3/4,1", "0:1/4", []int64{1, 6, 11, 16}}, + {"0,1/4,3/4,1", "1/4:3/4", []int64{0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20}}, + {"0,1/4,3/4,1", "3/4:1", []int64{3, 8, 13}}, } for _, tc := range testCases { @@ -734,7 +734,7 @@ func TestRampingArrivalRateGlobalIters(t *testing.T) { ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) defer cancel() - gotIters := []uint64{} + gotIters := []int64{} var mx sync.Mutex runner.Fn = func(ctx context.Context, _ chan<- stats.SampleContainer) error { mx.Lock() diff --git a/lib/executor/ramping_vus.go b/lib/executor/ramping_vus.go index 622d397de08..e3359aa1162 100644 --- a/lib/executor/ramping_vus.go +++ b/lib/executor/ramping_vus.go @@ -543,7 +543,7 @@ func (vlv RampingVUs) Run(parentCtx context.Context, out chan<- stats.SampleCont // Actually schedule the VUs and iterations, likely the most complicated // executor among all of them... - runIteration := getIterationRunner(vlv.executionState, vlv.incrScenarioIter, vlv.logger) + runIteration := getIterationRunner(vlv.executionState, vlv.logger) getVU := func() (lib.InitializedVU, error) { initVU, err := vlv.executionState.GetPlannedVU(vlv.logger, false) if err != nil { @@ -568,7 +568,6 @@ func (vlv RampingVUs) Run(parentCtx context.Context, out chan<- stats.SampleCont Executor: vlv.config.Type, StartTime: startTime, ProgressFn: progressFn, - GetIter: vlv.getScenarioIter, }) vuHandles := make([]*vuHandle, maxVUs) for i := uint64(0); i < maxVUs; i++ { diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go index 9818fbe06c4..60bd645ab52 100644 --- a/lib/executor/shared_iterations.go +++ b/lib/executor/shared_iterations.go @@ -148,10 +148,11 @@ func (sic SharedIterationsConfig) GetExecutionRequirements(et *lib.ExecutionTupl func (sic SharedIterationsConfig) NewExecutor( es *lib.ExecutionState, logger *logrus.Entry, ) (lib.Executor, error) { + startGlobalIter := int64(-1) return &SharedIterations{ BaseExecutor: NewBaseExecutor(sic, es, logger), config: sic, - globalIter: new(uint64), + globalIter: &startGlobalIter, }, nil } @@ -162,7 +163,8 @@ type SharedIterations struct { config SharedIterationsConfig et *lib.ExecutionTuple segIdx *lib.SegmentedIndex - globalIter *uint64 + iterMx sync.Mutex + globalIter *int64 } // Make sure we implement the lib.Executor interface. @@ -187,14 +189,17 @@ func (si *SharedIterations) Init(ctx context.Context) error { // incrGlobalIter increments the global iteration count for this executor, // taking into account the configured execution segment. -func (si *SharedIterations) incrGlobalIter() { +func (si *SharedIterations) incrGlobalIter() int64 { + si.iterMx.Lock() + defer si.iterMx.Unlock() si.segIdx.Next() - atomic.StoreUint64(si.globalIter, uint64(si.segIdx.GetUnscaled())) + atomic.StoreInt64(si.globalIter, si.segIdx.GetUnscaled()-1) + return atomic.LoadInt64(si.globalIter) } // getGlobalIter returns the global iteration count for this executor. -func (si *SharedIterations) getGlobalIter() uint64 { - return atomic.LoadUint64(si.globalIter) +func (si *SharedIterations) getGlobalIter() int64 { + return atomic.LoadInt64(si.globalIter) } // Run executes a specific total number of iterations, which are all shared by @@ -248,18 +253,13 @@ func (si SharedIterations) Run(parentCtx context.Context, out chan<- stats.Sampl }() regDurationDone := regDurationCtx.Done() - runIteration := getIterationRunner(si.executionState, func() { - si.incrScenarioIter() - si.incrGlobalIter() - }, si.logger) + runIteration := getIterationRunner(si.executionState, si.logger) maxDurationCtx = lib.WithScenarioState(maxDurationCtx, &lib.ScenarioState{ - Name: si.config.Name, - Executor: si.config.Type, - StartTime: startTime, - ProgressFn: progressFn, - GetIter: si.getScenarioIter, - GetGlobalIter: si.getGlobalIter, + Name: si.config.Name, + Executor: si.config.Type, + StartTime: startTime, + ProgressFn: progressFn, }) returnVU := func(u lib.InitializedVU) { @@ -272,7 +272,9 @@ func (si SharedIterations) Run(parentCtx context.Context, out chan<- stats.Sampl defer cancel() activeVU := initVU.Activate(getVUActivationParams( - ctx, si.config.BaseConfig, returnVU, si.GetNextLocalVUID)) + ctx, si.config.BaseConfig, returnVU, si.GetNextLocalVUID, + si.incrScenarioIter, si.incrGlobalIter, + )) for { select { diff --git a/lib/executor/shared_iterations_test.go b/lib/executor/shared_iterations_test.go index 580f192405a..7d92fe17a60 100644 --- a/lib/executor/shared_iterations_test.go +++ b/lib/executor/shared_iterations_test.go @@ -155,11 +155,14 @@ func TestSharedIterationsGlobalIters(t *testing.T) { testCases := []struct { seq, seg string - expIters []uint64 + expIters []int64 }{ - {"0,1/4,3/4,1", "0:1/4", []uint64{0, 2, 7, 12, 17, 22, 27, 32, 37, 42}}, - {"0,1/4,3/4,1", "1/4:3/4", []uint64{0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25, 26, 28, 30, 31, 33, 35, 36, 38, 40, 41, 43, 45, 46, 48}}, - {"0,1/4,3/4,1", "3/4:1", []uint64{0, 4, 9, 14, 19, 24, 29, 34, 39, 44}}, + {"0,1/4,3/4,1", "0:1/4", []int64{1, 6, 11, 16, 21, 26, 31, 36, 41, 46}}, + {"0,1/4,3/4,1", "1/4:3/4", []int64{0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40, 42, 44, 45, 47, 49}}, + // FIXME: The skewed values are because of the time.Sleep() in the + // VU function below. + // {"0,1/4,3/4,1", "1/4:3/4", []int64{4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40, 42, 44, 45, 47, 49, 49, 49}}, + {"0,1/4,3/4,1", "3/4:1", []int64{3, 8, 13, 18, 23, 28, 33, 38, 43, 48}}, } for _, tc := range testCases { @@ -178,14 +181,14 @@ func TestSharedIterationsGlobalIters(t *testing.T) { ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) defer cancel() - gotIters := []uint64{} + gotIters := []int64{} var mx sync.Mutex runner.Fn = func(ctx context.Context, _ chan<- stats.SampleContainer) error { mx.Lock() - // Slight delay to ensure the lock is held long enough to - // minimize any chances of flakiness... :-/ - time.Sleep(10 * time.Millisecond) gotIters = append(gotIters, executor.(*SharedIterations).getGlobalIter()) + // FIXME: This delay minimizes chances of flakiness, but + // produces skewed values. :-/ + // time.Sleep(10 * time.Millisecond) mx.Unlock() return nil } diff --git a/lib/executor/vu_handle.go b/lib/executor/vu_handle.go index 775522d777a..e17e443291e 100644 --- a/lib/executor/vu_handle.go +++ b/lib/executor/vu_handle.go @@ -149,7 +149,7 @@ func (vh *vuHandle) start() (err error) { return err } - vh.activeVU = vh.initVU.Activate(getVUActivationParams(vh.ctx, *vh.config, vh.returnVU, vh.getScenarioVUID)) + vh.activeVU = vh.initVU.Activate(getVUActivationParams(vh.ctx, *vh.config, vh.returnVU, vh.getScenarioVUID, nil, nil)) close(vh.canStartIter) vh.changeState(starting) } diff --git a/lib/executor/vu_handle_test.go b/lib/executor/vu_handle_test.go index c4d8d1f692d..b16dd6f1f83 100644 --- a/lib/executor/vu_handle_test.go +++ b/lib/executor/vu_handle_test.go @@ -30,18 +30,15 @@ func TestVUHandleRace(t *testing.T) { // testLog.Level = logrus.DebugLevel logEntry := logrus.NewEntry(testLog) + runner := &minirunner.MiniRunner{} + runner.Fn = func(ctx context.Context, out chan<- stats.SampleContainer) error { + return nil + } + var getVUCount int64 var returnVUCount int64 getVU := func() (lib.InitializedVU, error) { - atomic.AddInt64(&getVUCount, 1) - return &minirunner.VU{ - R: &minirunner.MiniRunner{ - Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { - // TODO: do something - return nil - }, - }, - }, nil + return runner.NewVU(uint64(atomic.AddInt64(&getVUCount, 1)), nil) } returnVU := func(_ lib.InitializedVU) { @@ -122,21 +119,18 @@ func TestVUHandleStartStopRace(t *testing.T) { // testLog.Level = logrus.DebugLevel logEntry := logrus.NewEntry(testLog) - var vuID uint64 + runner := &minirunner.MiniRunner{} + runner.Fn = func(ctx context.Context, out chan<- stats.SampleContainer) error { + return nil + } + var vuID uint64 testIterations := 10000 returned := make(chan struct{}) + getVU := func() (lib.InitializedVU, error) { returned = make(chan struct{}) - return &minirunner.VU{ - ID: atomic.AddUint64(&vuID, 1), - R: &minirunner.MiniRunner{ - Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { - // TODO: do something - return nil - }, - }, - }, nil + return runner.NewVU(atomic.AddUint64(&vuID, 1), nil) } returnVU := func(v lib.InitializedVU) { @@ -190,6 +184,7 @@ func TestVUHandleStartStopRace(t *testing.T) { } type handleVUTest struct { + runner *minirunner.MiniRunner getVUCount uint32 returnVUCount uint32 interruptedIter int64 @@ -197,16 +192,7 @@ type handleVUTest struct { } func (h *handleVUTest) getVU() (lib.InitializedVU, error) { - atomic.AddUint32(&h.getVUCount, 1) - - return &minirunner.VU{ - R: &minirunner.MiniRunner{ - Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { - // TODO: do something - return nil - }, - }, - }, nil + return h.runner.NewVU(uint64(atomic.AddUint32(&h.getVUCount, 1)), nil) } func (h *handleVUTest) returnVU(_ lib.InitializedVU) { @@ -241,7 +227,7 @@ func TestVUHandleSimple(t *testing.T) { testLog.SetOutput(testutils.NewTestOutput(t)) // testLog.Level = logrus.DebugLevel logEntry := logrus.NewEntry(testLog) - test := new(handleVUTest) + test := &handleVUTest{runner: &minirunner.MiniRunner{}} ctx, cancel := context.WithCancel(context.Background()) defer cancel() vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, &BaseConfig{}, logEntry) @@ -280,7 +266,7 @@ func TestVUHandleSimple(t *testing.T) { testLog.SetOutput(testutils.NewTestOutput(t)) // testLog.Level = logrus.DebugLevel logEntry := logrus.NewEntry(testLog) - test := new(handleVUTest) + test := &handleVUTest{runner: &minirunner.MiniRunner{}} ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -321,7 +307,7 @@ func TestVUHandleSimple(t *testing.T) { testLog.SetOutput(testutils.NewTestOutput(t)) // testLog.Level = logrus.DebugLevel logEntry := logrus.NewEntry(testLog) - test := new(handleVUTest) + test := &handleVUTest{runner: &minirunner.MiniRunner{}} ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -375,17 +361,12 @@ func BenchmarkVUHandleIterations(b *testing.B) { fullIterations = 0 } + runner := &minirunner.MiniRunner{} + runner.Fn = func(ctx context.Context, out chan<- stats.SampleContainer) error { + return nil + } getVU := func() (lib.InitializedVU, error) { - atomic.AddUint32(&getVUCount, 1) - - return &minirunner.VU{ - R: &minirunner.MiniRunner{ - Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { - // TODO: do something - return nil - }, - }, - }, nil + return runner.NewVU(uint64(atomic.AddUint32(&getVUCount, 1)), nil) } returnVU := func(_ lib.InitializedVU) { diff --git a/lib/executors.go b/lib/executors.go index 7d8e56851d4..e8ef95f49aa 100644 --- a/lib/executors.go +++ b/lib/executors.go @@ -115,8 +115,6 @@ type ScenarioState struct { Name, Executor string StartTime time.Time ProgressFn func() (float64, []string) - GetIter func() uint64 - GetGlobalIter func() uint64 } // InitVUFunc is just a shorthand so we don't have to type the function diff --git a/lib/netext/httpext/request.go b/lib/netext/httpext/request.go index b48cd45a17b..553203a9e62 100644 --- a/lib/netext/httpext/request.go +++ b/lib/netext/httpext/request.go @@ -210,7 +210,7 @@ func MakeRequest(ctx context.Context, preq *ParsedHTTPRequest) (*Response, error var transport http.RoundTripper = tracerTransport // Combine tags with common log fields - combinedLogFields := map[string]interface{}{"source": "http-debug", "vu": state.Vu, "iter": state.Iteration} + combinedLogFields := map[string]interface{}{"source": "http-debug", "vu": state.Vu, "iter": state.GetIteration()} for k, v := range tags { if _, present := combinedLogFields[k]; !present { combinedLogFields[k] = v diff --git a/lib/runner.go b/lib/runner.go index b5b8a6854a0..0667623e759 100644 --- a/lib/runner.go +++ b/lib/runner.go @@ -56,6 +56,8 @@ type VUActivationParams struct { Env, Tags map[string]string Exec, Scenario string GetScenarioVUID func() uint64 + IncrScIter func() int64 + IncrScIterGlobal func() int64 } // A Runner is a factory for VUs. It should precompute as much as possible upon diff --git a/lib/state.go b/lib/state.go index ab452ca1ee1..a8f64b5f284 100644 --- a/lib/state.go +++ b/lib/state.go @@ -68,18 +68,23 @@ type State struct { // TODO: maybe use https://golang.org/pkg/sync/#Pool ? BPool *bpool.BufferPool - Vu, Iteration uint64 - Tags map[string]string - ScenarioName string - scenarioVUID map[string]uint64 - scIterMx sync.RWMutex - scenarioVUIter map[string]uint64 + Vu uint64 + iteration int64 + Tags map[string]string + ScenarioName string + scenarioVUID map[string]uint64 + IncrScIter func() int64 + IncrScIterGlobal func() int64 + scIterMx sync.RWMutex + scenarioVUIter map[string]int64 + scIter, scIterGlobal int64 } // Init initializes some private state fields. func (s *State) Init() { s.scenarioVUID = make(map[string]uint64) - s.scenarioVUIter = make(map[string]uint64) + s.scenarioVUIter = make(map[string]int64) + s.iteration, s.scIter, s.scIterGlobal = -1, -1, -1 } // CloneTags makes a copy of the tags map and returns it. @@ -104,16 +109,53 @@ func (s *State) SetScenarioVUID(id uint64) { // GetScenarioVUIter returns the scenario-specific count of completed iterations // for this VU. -func (s *State) GetScenarioVUIter() uint64 { +func (s *State) GetScenarioVUIter() int64 { s.scIterMx.RLock() defer s.scIterMx.RUnlock() return s.scenarioVUIter[s.ScenarioName] } -// IncrScenarioVUIter increments the scenario-specific count of completed -// iterations for this VU. -func (s *State) IncrScenarioVUIter() { +// IncrIteration increments all iteration counters for the specific VU with this +// State. +func (s *State) IncrIteration() { s.scIterMx.Lock() - s.scenarioVUIter[s.ScenarioName]++ - s.scIterMx.Unlock() + defer s.scIterMx.Unlock() + + s.iteration++ + if _, ok := s.scenarioVUIter[s.ScenarioName]; ok { + s.scenarioVUIter[s.ScenarioName]++ + } else { + s.scenarioVUIter[s.ScenarioName] = 0 + } + if s.IncrScIter != nil { + s.scIter = s.IncrScIter() + } + if s.IncrScIterGlobal != nil { + s.scIterGlobal = s.IncrScIterGlobal() + } +} + +// GetScenarioLocalVUIter returns the iteration local to the scenario currently +// executing the VU with this State. +func (s *State) GetScenarioLocalVUIter() int64 { + s.scIterMx.RLock() + defer s.scIterMx.RUnlock() + return s.scIter +} + +// GetScenarioGlobalVUIter returns the global iteration of the scenario +// currently executing the VU with this State, or -1 if the executor doesn't +// keep track of global iterations. +func (s *State) GetScenarioGlobalVUIter() int64 { + s.scIterMx.RLock() + defer s.scIterMx.RUnlock() + if s.IncrScIterGlobal == nil { + return -1 + } + return s.scIterGlobal +} + +// GetIteration returns the iteration local to the VU with this State. +func (s *State) GetIteration() int64 { + return s.iteration } diff --git a/lib/testutils/minirunner/minirunner.go b/lib/testutils/minirunner/minirunner.go index a1ab34be3cf..f7d4e391108 100644 --- a/lib/testutils/minirunner/minirunner.go +++ b/lib/testutils/minirunner/minirunner.go @@ -59,7 +59,9 @@ func (r MiniRunner) MakeArchive() *lib.Archive { // NewVU returns a new VU with an incremental ID. func (r *MiniRunner) NewVU(id uint64, out chan<- stats.SampleContainer) (lib.InitializedVU, error) { - return &VU{R: r, Out: out, ID: id}, nil + state := &lib.State{Vu: id} + state.Init() + return &VU{R: r, Out: out, ID: id, state: state}, nil } // Setup calls the supplied mock setup() function, if present. @@ -127,6 +129,7 @@ type VU struct { R *MiniRunner Out chan<- stats.SampleContainer ID, Iteration uint64 + state *lib.State } // ActiveVU holds a VU and its activation parameters @@ -143,6 +146,11 @@ func (vu *VU) GetID() uint64 { // Activate the VU so it will be able to run code. func (vu *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { + vu.state.IncrScIter = params.IncrScIter + vu.state.IncrScIterGlobal = params.IncrScIterGlobal + + ctx := lib.WithState(params.RunContext, vu.state) + avu := &ActiveVU{ VU: vu, VUActivationParams: params, @@ -150,7 +158,7 @@ func (vu *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { } go func() { - <-params.RunContext.Done() + <-ctx.Done() // Wait for the VU to stop running, if it was, and prevent it from // running again for this activation @@ -180,13 +188,8 @@ func (vu *ActiveVU) RunOnce() error { <-vu.busy // unlock deactivation again }() - state := &lib.State{ - Vu: vu.ID, - Iteration: vu.Iteration, - } - newctx := lib.WithState(vu.RunContext, state) - - vu.Iteration++ + ctx := lib.WithState(vu.RunContext, vu.state) + vu.state.IncrIteration() - return vu.R.Fn(newctx, vu.Out) + return vu.R.Fn(ctx, vu.Out) } From 467759ecb8c502b6fe07d5130855b68fc38df873 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Wed, 2 Jun 2021 12:16:03 +0200 Subject: [PATCH 15/22] Fix sync of scenario iteration increments This moves the iteration state from lib.State to js.VU and js.ActiveVU, and adds synchronization between scenario iteration increments to ensure the information returned by k6/execution within a single VU iteration is stable. Resolves https://github.com/k6io/k6/pull/1863#discussion_r642850439 --- js/runner.go | 86 +++++++++++++++---- lib/executor/base_executor.go | 34 ++++---- lib/executor/constant_arrival_rate.go | 35 ++++---- lib/executor/constant_arrival_rate_test.go | 3 +- lib/executor/constant_vus.go | 2 +- lib/executor/externally_controlled.go | 8 +- lib/executor/helpers.go | 22 ++--- lib/executor/per_vu_iterations.go | 2 +- lib/executor/ramping_arrival_rate.go | 35 ++++---- lib/executor/ramping_arrival_rate_test.go | 3 +- lib/executor/ramping_vus.go | 8 +- lib/executor/shared_iterations.go | 35 ++++---- lib/executor/shared_iterations_test.go | 9 +- lib/executor/vu_handle.go | 31 ++++--- lib/executor/vu_handle_test.go | 18 ++-- lib/netext/httpext/request.go | 2 +- lib/runner.go | 15 ++-- lib/state.go | 96 ++++------------------ lib/testutils/minirunner/minirunner.go | 91 ++++++++++++++++---- 19 files changed, 303 insertions(+), 232 deletions(-) diff --git a/js/runner.go b/js/runner.go index 0b12820a262..417915d7088 100644 --- a/js/runner.go +++ b/js/runner.go @@ -208,6 +208,7 @@ func (r *Runner) newVU(id uint64, samplesOut chan<- stats.SampleContainer) (*VU, vu := &VU{ ID: id, + iteration: int64(-1), BundleInstance: *bi, Runner: r, Transport: transport, @@ -217,6 +218,8 @@ func (r *Runner) newVU(id uint64, samplesOut chan<- stats.SampleContainer) (*VU, Console: r.console, BPool: bpool.NewBufferPool(100), Samples: samplesOut, + scenarioID: make(map[string]uint64), + scenarioIter: make(map[string]int64), } vu.state = &lib.State{ @@ -233,7 +236,6 @@ func (r *Runner) newVU(id uint64, samplesOut chan<- stats.SampleContainer) (*VU, Tags: vu.Runner.Bundle.Options.RunTags.CloneTags(), Group: r.defaultGroup, } - vu.state.Init() vu.Runtime.Set("console", common.Bind(vu.Runtime, vu.Console, vu.Context)) // This is here mostly so if someone tries they get a nice message @@ -532,6 +534,7 @@ type VU struct { CookieJar *cookiejar.Jar TLSConfig *tls.Config ID uint64 + iteration int64 Console *console BPool *bpool.BufferPool @@ -541,6 +544,10 @@ type VU struct { setupData goja.Value state *lib.State + // ID of this VU in each scenario + scenarioID map[string]uint64 + // count of iterations executed by this VU in each scenario + scenarioIter map[string]int64 } // Verify that interfaces are implemented @@ -554,6 +561,18 @@ type ActiveVU struct { *VU *lib.VUActivationParams busy chan struct{} + + scenarioName string + // Used to synchronize iteration increments for scenarios between VUs. + iterSync chan struct{} + // Returns the iteration number across all VUs in the current scenario + // unique to this single k6 instance. + getNextScLocalIter func() uint64 + // Returns the iteration number across all VUs in the current scenario + // unique globally across k6 instances (taking into account execution + // segments). + getNextScGlobalIter func() uint64 + scIterLocal, scIterGlobal uint64 } // GetID returns the unique VU ID. @@ -589,7 +608,7 @@ func (u *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { u.state.Tags["vu"] = strconv.FormatUint(u.ID, 10) } if opts.SystemTags.Has(stats.TagIter) { - u.state.Tags["iter"] = strconv.FormatInt(u.state.GetIteration(), 10) + u.state.Tags["iter"] = strconv.FormatInt(u.iteration, 10) } if opts.SystemTags.Has(stats.TagGroup) { u.state.Tags["group"] = u.state.Group.Path @@ -602,20 +621,34 @@ func (u *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { ctx = lib.WithState(ctx, u.state) params.RunContext = ctx *u.Context = ctx - u.state.ScenarioName = params.Scenario - if params.GetScenarioVUID != nil { - if _, ok := u.state.GetScenarioVUID(); !ok { - u.state.SetScenarioVUID(params.GetScenarioVUID()) + if params.GetNextScVUID != nil { + if _, ok := u.scenarioID[params.Scenario]; !ok { + u.state.VUIDScenario = params.GetNextScVUID() + u.scenarioID[params.Scenario] = u.state.VUIDScenario } } - u.state.IncrScIter = params.IncrScIter - u.state.IncrScIterGlobal = params.IncrScIterGlobal + u.state.GetScenarioVUIter = func() int64 { + return u.scenarioIter[params.Scenario] + } avu := &ActiveVU{ - VU: u, - VUActivationParams: params, - busy: make(chan struct{}, 1), + VU: u, + VUActivationParams: params, + busy: make(chan struct{}, 1), + scenarioName: params.Scenario, + iterSync: params.IterSync, + scIterLocal: int64(-1), + scIterGlobal: int64(-1), + getNextScLocalIter: params.GetNextScLocalIter, + getNextScGlobalIter: params.GetNextScGlobalIter, + } + + u.state.GetScenarioLocalVUIter = func() int64 { + return avu.scIterLocal + } + u.state.GetScenarioGlobalVUIter = func() int64 { + return avu.scIterGlobal } go func() { @@ -667,8 +700,8 @@ func (u *ActiveVU) RunOnce() error { panic(fmt.Sprintf("function '%s' not found in exports", u.Exec)) } - u.state.IncrIteration() - if err := u.Runtime.Set("__ITER", u.state.GetIteration()); err != nil { + u.incrIteration() + if err := u.Runtime.Set("__ITER", u.iteration); err != nil { panic(fmt.Errorf("error setting __ITER in goja runtime: %w", err)) } @@ -702,7 +735,7 @@ func (u *VU) runFn( opts := &u.Runner.Bundle.Options if opts.SystemTags.Has(stats.TagIter) { - u.state.Tags["iter"] = strconv.FormatInt(u.state.GetIteration(), 10) + u.state.Tags["iter"] = strconv.FormatInt(u.state.Iteration, 10) } defer func() { @@ -745,6 +778,31 @@ func (u *VU) runFn( return v, isFullIteration, endTime.Sub(startTime), err } +func (u *ActiveVU) incrIteration() { + u.iteration++ + u.state.Iteration = u.iteration + + if u.iterSync != nil { + // block other VUs from incrementing scenario iterations + u.iterSync <- struct{}{} + defer func() { + <-u.iterSync // unlock + }() + } + + if _, ok := u.scenarioIter[u.scenarioName]; ok { + u.scenarioIter[u.scenarioName]++ + } else { + u.scenarioIter[u.scenarioName] = 0 + } + if u.getNextScLocalIter != nil { + u.scIterLocal = u.getNextScLocalIter() + } + if u.getNextScGlobalIter != nil { + u.scIterGlobal = u.getNextScGlobalIter() + } +} + type scriptException struct { inner *goja.Exception } diff --git a/lib/executor/base_executor.go b/lib/executor/base_executor.go index b43e8f3e5a5..74455170da0 100644 --- a/lib/executor/base_executor.go +++ b/lib/executor/base_executor.go @@ -39,21 +39,23 @@ import ( type BaseExecutor struct { config lib.ExecutorConfig executionState *lib.ExecutionState - localVUID *uint64 // counter for assigning executor-specific VU IDs - localIter *int64 // counter for keeping track of all VU iterations completed by this executor - logger *logrus.Entry - progress *pb.ProgressBar + VUIDLocal *uint64 // counter for assigning executor-specific VU IDs + // Counter for keeping track of all VU iterations completed by this executor + // in the current (local) k6 instance. + iterLocal *int64 + logger *logrus.Entry + progress *pb.ProgressBar } // NewBaseExecutor returns an initialized BaseExecutor func NewBaseExecutor(config lib.ExecutorConfig, es *lib.ExecutionState, logger *logrus.Entry) *BaseExecutor { // Start at -1 so that the first iteration can be 0 - startLocalIter := int64(-1) + startIterLocal := int64(-1) return &BaseExecutor{ config: config, executionState: es, - localVUID: new(uint64), - localIter: &startLocalIter, + VUIDLocal: new(uint64), + iterLocal: &startIterLocal, logger: logger, progress: pb.New( pb.WithLeft(config.GetName), @@ -73,10 +75,16 @@ func (bs BaseExecutor) GetConfig() lib.ExecutorConfig { return bs.config } -// GetNextLocalVUID increments and returns the next VU ID that's specific for +// getNextLocalVUID increments and returns the next VU ID that's specific for // this executor (i.e. not global like __VU). -func (bs BaseExecutor) GetNextLocalVUID() uint64 { - return atomic.AddUint64(bs.localVUID, 1) +func (bs BaseExecutor) getNextLocalVUID() uint64 { + return atomic.AddUint64(bs.VUIDLocal, 1) +} + +// getNextLocalIter increments and returns the next local iteration number, for +// keeping track of total iterations executed by this scenario/executor. +func (bs *BaseExecutor) getNextLocalIter() int64 { + return atomic.AddInt64(bs.iterLocal, 1) } // GetLogger returns the executor logger entry. @@ -101,9 +109,3 @@ func (bs BaseExecutor) getMetricTags(vuID *uint64) *stats.SampleTags { } return stats.IntoSampleTags(&tags) } - -// incrScenarioIter increments the counter of completed iterations by all VUs -// for this executor. -func (bs *BaseExecutor) incrScenarioIter() int64 { - return atomic.AddInt64(bs.localIter, 1) -} diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go index 40eaf374693..847faf2319f 100644 --- a/lib/executor/constant_arrival_rate.go +++ b/lib/executor/constant_arrival_rate.go @@ -168,11 +168,10 @@ func (carc ConstantArrivalRateConfig) GetExecutionRequirements(et *lib.Execution func (carc ConstantArrivalRateConfig) NewExecutor( es *lib.ExecutionState, logger *logrus.Entry, ) (lib.Executor, error) { - startGlobalIter := int64(-1) return &ConstantArrivalRate{ BaseExecutor: NewBaseExecutor(&carc, es, logger), config: carc, - globalIter: &startGlobalIter, + iterMx: &sync.Mutex{}, }, nil } @@ -185,11 +184,10 @@ func (carc ConstantArrivalRateConfig) HasWork(et *lib.ExecutionTuple) bool { // specific period. type ConstantArrivalRate struct { *BaseExecutor - config ConstantArrivalRateConfig - et *lib.ExecutionTuple - segIdx *lib.SegmentedIndex - iterMx sync.Mutex - globalIter *int64 + config ConstantArrivalRateConfig + et *lib.ExecutionTuple + iterMx *sync.Mutex + segIdx *lib.SegmentedIndex } // Make sure we implement the lib.Executor interface. @@ -207,19 +205,16 @@ func (car *ConstantArrivalRate) Init(ctx context.Context) error { return err } -// incrGlobalIter increments the global iteration count for this executor, -// taking into account the configured execution segment. -func (car *ConstantArrivalRate) incrGlobalIter() int64 { +// getNextGlobalIter advances and returns the next global iteration number for +// this executor, taking into account the configured execution segment. +// Unlike the local iteration number returned by getNextLocalIter(), this +// iteration number will be unique across k6 instances. +func (car *ConstantArrivalRate) getNextGlobalIter() int64 { car.iterMx.Lock() defer car.iterMx.Unlock() car.segIdx.Next() - atomic.StoreInt64(car.globalIter, car.segIdx.GetUnscaled()-1) - return atomic.LoadInt64(car.globalIter) -} - -// getGlobalIter returns the global iteration count for this executor. -func (car *ConstantArrivalRate) getGlobalIter() int64 { - return atomic.LoadInt64(car.globalIter) + // iterations are 0-based + return car.segIdx.GetUnscaled() - 1 } // Run executes a constant number of iterations per second. @@ -302,12 +297,16 @@ func (car ConstantArrivalRate) Run(parentCtx context.Context, out chan<- stats.S activeVUsWg.Done() } + // Channel for synchronizing scenario-specific iteration increments + iterSync := make(chan struct{}, 1) runIterationBasic := getIterationRunner(car.executionState, car.logger) activateVU := func(initVU lib.InitializedVU) lib.ActiveVU { activeVUsWg.Add(1) activeVU := initVU.Activate(getVUActivationParams( maxDurationCtx, car.config.BaseConfig, returnVU, - car.GetNextLocalVUID, car.incrScenarioIter, car.incrGlobalIter)) + car.getNextLocalVUID, car.getNextLocalIter, car.getNextGlobalIter, + iterSync, + )) car.executionState.ModCurrentlyActiveVUsCount(+1) atomic.AddUint64(&activeVUsCount, 1) vusPool.AddVU(maxDurationCtx, activeVU, runIterationBasic) diff --git a/lib/executor/constant_arrival_rate_test.go b/lib/executor/constant_arrival_rate_test.go index 741b0a158ac..dd0b06168fd 100644 --- a/lib/executor/constant_arrival_rate_test.go +++ b/lib/executor/constant_arrival_rate_test.go @@ -377,8 +377,9 @@ func TestConstantArrivalRateGlobalIters(t *testing.T) { gotIters := []int64{} var mx sync.Mutex runner.Fn = func(ctx context.Context, _ chan<- stats.SampleContainer) error { + state := lib.GetState(ctx) mx.Lock() - gotIters = append(gotIters, executor.(*ConstantArrivalRate).getGlobalIter()) + gotIters = append(gotIters, state.GetScenarioGlobalVUIter()) mx.Unlock() return nil } diff --git a/lib/executor/constant_vus.go b/lib/executor/constant_vus.go index 43f666c66c3..8fc20ae9b78 100644 --- a/lib/executor/constant_vus.go +++ b/lib/executor/constant_vus.go @@ -194,7 +194,7 @@ func (clv ConstantVUs) Run(parentCtx context.Context, out chan<- stats.SampleCon activeVU := initVU.Activate( getVUActivationParams(ctx, clv.config.BaseConfig, returnVU, - clv.GetNextLocalVUID, clv.incrScenarioIter, nil)) + clv.getNextLocalVUID, clv.getNextLocalIter, nil, iterSync)) for { select { diff --git a/lib/executor/externally_controlled.go b/lib/executor/externally_controlled.go index 97a6bfb3d9a..006d2caac2f 100644 --- a/lib/executor/externally_controlled.go +++ b/lib/executor/externally_controlled.go @@ -362,7 +362,8 @@ func (rs *externallyControlledRunState) newManualVUHandle( ctx, cancel := context.WithCancel(rs.ctx) return &manualVUHandle{ vuHandle: newStoppedVUHandle(ctx, getVU, returnVU, - rs.executor.GetNextLocalVUID, &rs.executor.config.BaseConfig, logger), + rs.executor.getNextLocalVUID, rs.executor.getNextLocalIter, + rs.iterSync, &rs.executor.config.BaseConfig, logger), initVU: initVU, wg: &wg, cancelVU: cancel, @@ -383,6 +384,8 @@ type externallyControlledRunState struct { currentlyPaused bool // whether the executor is currently paused runIteration func(context.Context, lib.ActiveVU) bool // a helper closure function that runs a single iteration + // channel for synchronizing scenario-specific iteration increments + iterSync chan struct{} } // retrieveStartMaxVUs gets and initializes the (scaled) number of MaxVUs @@ -521,6 +524,8 @@ func (mex *ExternallyControlled) Run(parentCtx context.Context, out chan<- stats ).Debug("Starting executor run...") startMaxVUs := mex.executionState.Options.ExecutionSegment.Scale(mex.config.MaxVUs.Int64) + // Channel for synchronizing scenario-specific iteration increments + iterSync := make(chan struct{}, 1) runState := &externallyControlledRunState{ ctx: ctx, executor: mex, @@ -531,6 +536,7 @@ func (mex *ExternallyControlled) Run(parentCtx context.Context, out chan<- stats activeVUsCount: new(int64), maxVUs: new(int64), runIteration: getIterationRunner(mex.executionState, mex.logger), + iterSync: iterSync, } *runState.maxVUs = startMaxVUs if err = runState.retrieveStartMaxVUs(); err != nil { diff --git a/lib/executor/helpers.go b/lib/executor/helpers.go index 81ec1777e6e..4bb2c76f3bf 100644 --- a/lib/executor/helpers.go +++ b/lib/executor/helpers.go @@ -225,17 +225,19 @@ func getArrivalRatePerSec(scaledArrivalRate *big.Rat) *big.Rat { // TODO: Refactor this, maybe move all scenario things to an embedded struct? func getVUActivationParams( ctx context.Context, conf BaseConfig, deactivateCallback func(lib.InitializedVU), - getScenarioVUID func() uint64, incrScIter func() int64, incrScIterGlobal func() int64, + getNextScVUID func() uint64, getNextScLocalIter, getNextScGlobalIter func() int64, + iterSync chan struct{}, ) *lib.VUActivationParams { return &lib.VUActivationParams{ - RunContext: ctx, - Scenario: conf.Name, - Exec: conf.GetExec(), - Env: conf.GetEnv(), - Tags: conf.GetTags(), - DeactivateCallback: deactivateCallback, - GetScenarioVUID: getScenarioVUID, - IncrScIter: incrScIter, - IncrScIterGlobal: incrScIterGlobal, + RunContext: ctx, + Scenario: conf.Name, + Exec: conf.GetExec(), + Env: conf.GetEnv(), + Tags: conf.GetTags(), + DeactivateCallback: deactivateCallback, + GetNextScVUID: getNextScVUID, + IterSync: iterSync, + GetNextScLocalIter: getNextScLocalIter, + GetNextScGlobalIter: getNextScGlobalIter, } } diff --git a/lib/executor/per_vu_iterations.go b/lib/executor/per_vu_iterations.go index f019478d5b7..3ffc5c04e4e 100644 --- a/lib/executor/per_vu_iterations.go +++ b/lib/executor/per_vu_iterations.go @@ -220,7 +220,7 @@ func (pvi PerVUIterations) Run(parentCtx context.Context, out chan<- stats.Sampl vuID := initVU.GetID() activeVU := initVU.Activate( getVUActivationParams(ctx, pvi.config.BaseConfig, returnVU, - pvi.GetNextLocalVUID, pvi.incrScenarioIter, nil)) + pvi.getNextLocalVUID, pvi.getNextLocalIter, nil, iterSync)) for i := int64(0); i < iterations; i++ { select { diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index 7226c807945..623a7b91b13 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -158,11 +158,10 @@ func (varc RampingArrivalRateConfig) GetExecutionRequirements(et *lib.ExecutionT func (varc RampingArrivalRateConfig) NewExecutor( es *lib.ExecutionState, logger *logrus.Entry, ) (lib.Executor, error) { - startGlobalIter := int64(-1) return &RampingArrivalRate{ BaseExecutor: NewBaseExecutor(&varc, es, logger), config: varc, - globalIter: &startGlobalIter, + iterMx: &sync.Mutex{}, }, nil } @@ -176,11 +175,10 @@ func (varc RampingArrivalRateConfig) HasWork(et *lib.ExecutionTuple) bool { // TODO: combine with the ConstantArrivalRate? type RampingArrivalRate struct { *BaseExecutor - config RampingArrivalRateConfig - et *lib.ExecutionTuple - segIdx *lib.SegmentedIndex - iterMx sync.Mutex - globalIter *int64 + config RampingArrivalRateConfig + et *lib.ExecutionTuple + iterMx *sync.Mutex + segIdx *lib.SegmentedIndex } // Make sure we implement the lib.Executor interface. @@ -198,19 +196,16 @@ func (varr *RampingArrivalRate) Init(ctx context.Context) error { return err //nolint: wrapcheck } -// incrGlobalIter increments the global iteration count for this executor, -// taking into account the configured execution segment. -func (varr *RampingArrivalRate) incrGlobalIter() int64 { +// getNextGlobalIter advances and returns the next global iteration number for +// this executor, taking into account the configured execution segment. +// Unlike the local iteration number returned by getNextLocalIter(), this +// iteration number will be unique across k6 instances. +func (varr *RampingArrivalRate) getNextGlobalIter() int64 { varr.iterMx.Lock() defer varr.iterMx.Unlock() varr.segIdx.Next() - atomic.StoreInt64(varr.globalIter, varr.segIdx.GetUnscaled()-1) - return atomic.LoadInt64(varr.globalIter) -} - -// getGlobalIter returns the global iteration count for this executor. -func (varr *RampingArrivalRate) getGlobalIter() int64 { - return atomic.LoadInt64(varr.globalIter) + // iterations are 0-based + return varr.segIdx.GetUnscaled() - 1 } // cal calculates the transtitions between stages and gives the next full value produced by the @@ -426,13 +421,15 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.S runIterationBasic := getIterationRunner(varr.executionState, varr.logger) + // Channel for synchronizing scenario-specific iteration increments + iterSync := make(chan struct{}, 1) activateVU := func(initVU lib.InitializedVU) lib.ActiveVU { activeVUsWg.Add(1) activeVU := initVU.Activate( getVUActivationParams( maxDurationCtx, varr.config.BaseConfig, returnVU, - varr.GetNextLocalVUID, varr.incrScenarioIter, - varr.incrGlobalIter)) + varr.getNextLocalVUID, varr.getNextLocalIter, + varr.getNextGlobalIter, iterSync)) varr.executionState.ModCurrentlyActiveVUsCount(+1) atomic.AddUint64(&activeVUsCount, 1) diff --git a/lib/executor/ramping_arrival_rate_test.go b/lib/executor/ramping_arrival_rate_test.go index b2b5f6bf9c8..2177fa77671 100644 --- a/lib/executor/ramping_arrival_rate_test.go +++ b/lib/executor/ramping_arrival_rate_test.go @@ -737,8 +737,9 @@ func TestRampingArrivalRateGlobalIters(t *testing.T) { gotIters := []int64{} var mx sync.Mutex runner.Fn = func(ctx context.Context, _ chan<- stats.SampleContainer) error { + state := lib.GetState(ctx) mx.Lock() - gotIters = append(gotIters, executor.(*RampingArrivalRate).getGlobalIter()) + gotIters = append(gotIters, state.GetScenarioGlobalVUIter()) mx.Unlock() return nil } diff --git a/lib/executor/ramping_vus.go b/lib/executor/ramping_vus.go index e3359aa1162..9e5da1272e1 100644 --- a/lib/executor/ramping_vus.go +++ b/lib/executor/ramping_vus.go @@ -569,11 +569,15 @@ func (vlv RampingVUs) Run(parentCtx context.Context, out chan<- stats.SampleCont StartTime: startTime, ProgressFn: progressFn, }) + + // Channel for synchronizing scenario-specific iteration increments + iterSync := make(chan struct{}, 1) vuHandles := make([]*vuHandle, maxVUs) for i := uint64(0); i < maxVUs; i++ { vuHandle := newStoppedVUHandle( - maxDurationCtx, getVU, returnVU, vlv.GetNextLocalVUID, - &vlv.config.BaseConfig, vlv.logger.WithField("vuNum", i)) + maxDurationCtx, getVU, returnVU, vlv.getNextLocalVUID, + vlv.getNextLocalIter, iterSync, &vlv.config.BaseConfig, + vlv.logger.WithField("vuNum", i)) go vuHandle.runLoopsIfPossible(runIteration) vuHandles[i] = vuHandle } diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go index 60bd645ab52..a5591cb83fd 100644 --- a/lib/executor/shared_iterations.go +++ b/lib/executor/shared_iterations.go @@ -148,11 +148,10 @@ func (sic SharedIterationsConfig) GetExecutionRequirements(et *lib.ExecutionTupl func (sic SharedIterationsConfig) NewExecutor( es *lib.ExecutionState, logger *logrus.Entry, ) (lib.Executor, error) { - startGlobalIter := int64(-1) return &SharedIterations{ BaseExecutor: NewBaseExecutor(sic, es, logger), config: sic, - globalIter: &startGlobalIter, + iterMx: &sync.Mutex{}, }, nil } @@ -160,11 +159,10 @@ func (sic SharedIterationsConfig) NewExecutor( // all shared by the configured VUs. type SharedIterations struct { *BaseExecutor - config SharedIterationsConfig - et *lib.ExecutionTuple - segIdx *lib.SegmentedIndex - iterMx sync.Mutex - globalIter *int64 + config SharedIterationsConfig + et *lib.ExecutionTuple + iterMx *sync.Mutex + segIdx *lib.SegmentedIndex } // Make sure we implement the lib.Executor interface. @@ -187,19 +185,16 @@ func (si *SharedIterations) Init(ctx context.Context) error { return err } -// incrGlobalIter increments the global iteration count for this executor, -// taking into account the configured execution segment. -func (si *SharedIterations) incrGlobalIter() int64 { +// getNextGlobalIter advances and returns the next global iteration number for +// this executor, taking into account the configured execution segment. +// Unlike the local iteration number returned by getNextLocalIter(), this +// iteration number will be unique across k6 instances. +func (si *SharedIterations) getNextGlobalIter() int64 { si.iterMx.Lock() defer si.iterMx.Unlock() si.segIdx.Next() - atomic.StoreInt64(si.globalIter, si.segIdx.GetUnscaled()-1) - return atomic.LoadInt64(si.globalIter) -} - -// getGlobalIter returns the global iteration count for this executor. -func (si *SharedIterations) getGlobalIter() int64 { - return atomic.LoadInt64(si.globalIter) + // iterations are 0-based + return si.segIdx.GetUnscaled() - 1 } // Run executes a specific total number of iterations, which are all shared by @@ -267,13 +262,15 @@ func (si SharedIterations) Run(parentCtx context.Context, out chan<- stats.Sampl activeVUs.Done() } + // Channel for synchronizing scenario-specific iteration increments + iterSync := make(chan struct{}, 1) handleVU := func(initVU lib.InitializedVU) { ctx, cancel := context.WithCancel(maxDurationCtx) defer cancel() activeVU := initVU.Activate(getVUActivationParams( - ctx, si.config.BaseConfig, returnVU, si.GetNextLocalVUID, - si.incrScenarioIter, si.incrGlobalIter, + ctx, si.config.BaseConfig, returnVU, si.getNextLocalVUID, + si.getNextLocalIter, si.getNextGlobalIter, iterSync, )) for { diff --git a/lib/executor/shared_iterations_test.go b/lib/executor/shared_iterations_test.go index 7d92fe17a60..3e0fbb4f8b9 100644 --- a/lib/executor/shared_iterations_test.go +++ b/lib/executor/shared_iterations_test.go @@ -159,9 +159,6 @@ func TestSharedIterationsGlobalIters(t *testing.T) { }{ {"0,1/4,3/4,1", "0:1/4", []int64{1, 6, 11, 16, 21, 26, 31, 36, 41, 46}}, {"0,1/4,3/4,1", "1/4:3/4", []int64{0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40, 42, 44, 45, 47, 49}}, - // FIXME: The skewed values are because of the time.Sleep() in the - // VU function below. - // {"0,1/4,3/4,1", "1/4:3/4", []int64{4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40, 42, 44, 45, 47, 49, 49, 49}}, {"0,1/4,3/4,1", "3/4:1", []int64{3, 8, 13, 18, 23, 28, 33, 38, 43, 48}}, } @@ -184,11 +181,9 @@ func TestSharedIterationsGlobalIters(t *testing.T) { gotIters := []int64{} var mx sync.Mutex runner.Fn = func(ctx context.Context, _ chan<- stats.SampleContainer) error { + state := lib.GetState(ctx) mx.Lock() - gotIters = append(gotIters, executor.(*SharedIterations).getGlobalIter()) - // FIXME: This delay minimizes chances of flakiness, but - // produces skewed values. :-/ - // time.Sleep(10 * time.Millisecond) + gotIters = append(gotIters, state.GetScenarioGlobalVUIter()) mx.Unlock() return nil } diff --git a/lib/executor/vu_handle.go b/lib/executor/vu_handle.go index e17e443291e..c93f0098fc8 100644 --- a/lib/executor/vu_handle.go +++ b/lib/executor/vu_handle.go @@ -88,12 +88,14 @@ short names for input: // - it's not required but preferable, if where possible to not reactivate VUs and to reuse context // as this speed ups the execution type vuHandle struct { - mutex *sync.Mutex - parentCtx context.Context - getVU func() (lib.InitializedVU, error) - returnVU func(lib.InitializedVU) - getScenarioVUID func() uint64 - config *BaseConfig + mutex *sync.Mutex + parentCtx context.Context + getVU func() (lib.InitializedVU, error) + returnVU func(lib.InitializedVU) + getScenarioVUID func() uint64 + getScenarioLocalIter func() int64 + iterSync chan struct{} + config *BaseConfig initVU lib.InitializedVU activeVU lib.ActiveVU @@ -110,19 +112,22 @@ type vuHandle struct { func newStoppedVUHandle( parentCtx context.Context, getVU func() (lib.InitializedVU, error), returnVU func(lib.InitializedVU), getScenarioVUID func() uint64, + getScenarioLocalIter func() int64, iterSync chan struct{}, config *BaseConfig, logger *logrus.Entry, ) *vuHandle { ctx, cancel := context.WithCancel(parentCtx) return &vuHandle{ - mutex: &sync.Mutex{}, - parentCtx: parentCtx, - getVU: getVU, - getScenarioVUID: getScenarioVUID, - config: config, + mutex: &sync.Mutex{}, + parentCtx: parentCtx, + getVU: getVU, + getScenarioVUID: getScenarioVUID, + getScenarioLocalIter: getScenarioLocalIter, + config: config, canStartIter: make(chan struct{}), state: stopped, + iterSync: iterSync, ctx: ctx, cancel: cancel, @@ -149,7 +154,9 @@ func (vh *vuHandle) start() (err error) { return err } - vh.activeVU = vh.initVU.Activate(getVUActivationParams(vh.ctx, *vh.config, vh.returnVU, vh.getScenarioVUID, nil, nil)) + vh.activeVU = vh.initVU.Activate(getVUActivationParams( + vh.ctx, *vh.config, vh.returnVU, vh.getScenarioVUID, + vh.getScenarioLocalIter, nil, vh.iterSync)) close(vh.canStartIter) vh.changeState(starting) } diff --git a/lib/executor/vu_handle_test.go b/lib/executor/vu_handle_test.go index b16dd6f1f83..e53d7fa34c6 100644 --- a/lib/executor/vu_handle_test.go +++ b/lib/executor/vu_handle_test.go @@ -61,7 +61,8 @@ func TestVUHandleRace(t *testing.T) { } } - vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, &BaseConfig{}, logEntry) + iterSync := make(chan struct{}, 1) + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, nil, iterSync, &BaseConfig{}, logEntry) go vuHandle.runLoopsIfPossible(runIter) var wg sync.WaitGroup wg.Add(3) @@ -153,7 +154,8 @@ func TestVUHandleStartStopRace(t *testing.T) { } } - vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, &BaseConfig{}, logEntry) + iterSync := make(chan struct{}, 1) + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, nil, iterSync, &BaseConfig{}, logEntry) go vuHandle.runLoopsIfPossible(runIter) for i := 0; i < testIterations; i++ { err := vuHandle.start() @@ -230,7 +232,8 @@ func TestVUHandleSimple(t *testing.T) { test := &handleVUTest{runner: &minirunner.MiniRunner{}} ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, &BaseConfig{}, logEntry) + iterSync := make(chan struct{}, 1) + vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, nil, iterSync, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { @@ -270,7 +273,8 @@ func TestVUHandleSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, &BaseConfig{}, logEntry) + iterSync := make(chan struct{}, 1) + vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, nil, iterSync, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { @@ -311,7 +315,8 @@ func TestVUHandleSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, &BaseConfig{}, logEntry) + iterSync := make(chan struct{}, 1) + vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, nil, iterSync, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { @@ -389,7 +394,8 @@ func BenchmarkVUHandleIterations(b *testing.B) { reset() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, &BaseConfig{}, logEntry) + iterSync := make(chan struct{}, 1) + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, nil, iterSync, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { diff --git a/lib/netext/httpext/request.go b/lib/netext/httpext/request.go index 553203a9e62..b48cd45a17b 100644 --- a/lib/netext/httpext/request.go +++ b/lib/netext/httpext/request.go @@ -210,7 +210,7 @@ func MakeRequest(ctx context.Context, preq *ParsedHTTPRequest) (*Response, error var transport http.RoundTripper = tracerTransport // Combine tags with common log fields - combinedLogFields := map[string]interface{}{"source": "http-debug", "vu": state.Vu, "iter": state.GetIteration()} + combinedLogFields := map[string]interface{}{"source": "http-debug", "vu": state.Vu, "iter": state.Iteration} for k, v := range tags { if _, present := combinedLogFields[k]; !present { combinedLogFields[k] = v diff --git a/lib/runner.go b/lib/runner.go index 0667623e759..ff43c5b8c42 100644 --- a/lib/runner.go +++ b/lib/runner.go @@ -51,13 +51,14 @@ type InitializedVU interface { // VUActivationParams are supplied by each executor when it retrieves a VU from // the buffer pool and activates it for use. type VUActivationParams struct { - RunContext context.Context - DeactivateCallback func(InitializedVU) - Env, Tags map[string]string - Exec, Scenario string - GetScenarioVUID func() uint64 - IncrScIter func() int64 - IncrScIterGlobal func() int64 + RunContext context.Context + DeactivateCallback func(InitializedVU) + Env, Tags map[string]string + Exec, Scenario string + GetNextScVUID func() uint64 + IterSync chan struct{} + GetNextScLocalIter func() int64 + GetNextScGlobalIter func() int64 } // A Runner is a factory for VUs. It should precompute as much as possible upon diff --git a/lib/state.go b/lib/state.go index a8f64b5f284..ec286f121b1 100644 --- a/lib/state.go +++ b/lib/state.go @@ -26,7 +26,6 @@ import ( "net" "net/http" "net/http/cookiejar" - "sync" "github.com/oxtoacart/bpool" "github.com/sirupsen/logrus" @@ -68,23 +67,20 @@ type State struct { // TODO: maybe use https://golang.org/pkg/sync/#Pool ? BPool *bpool.BufferPool - Vu uint64 - iteration int64 - Tags map[string]string - ScenarioName string - scenarioVUID map[string]uint64 - IncrScIter func() int64 - IncrScIterGlobal func() int64 - scIterMx sync.RWMutex - scenarioVUIter map[string]int64 - scIter, scIterGlobal int64 -} - -// Init initializes some private state fields. -func (s *State) Init() { - s.scenarioVUID = make(map[string]uint64) - s.scenarioVUIter = make(map[string]int64) - s.iteration, s.scIter, s.scIterGlobal = -1, -1, -1 + Vu, VUIDScenario uint64 + Iteration int64 + Tags map[string]string + // These will be assigned on VU activation. + // Returns the iteration number of this VU in the current scenario. + GetScenarioVUIter func() uint64 + // Returns the iteration number across all VUs in the current scenario + // unique to this single k6 instance. + // TODO: Maybe this doesn't belong here but in ScenarioState? + GetScenarioLocalVUIter func() uint64 + // Returns the iteration number across all VUs in the current scenario + // unique globally across k6 instances (taking into account execution + // segments). + GetScenarioGlobalVUIter func() uint64 } // CloneTags makes a copy of the tags map and returns it. @@ -95,67 +91,3 @@ func (s *State) CloneTags() map[string]string { } return tags } - -// GetScenarioVUID returns the scenario-specific ID of this VU. -func (s *State) GetScenarioVUID() (uint64, bool) { - id, ok := s.scenarioVUID[s.ScenarioName] - return id, ok -} - -// SetScenarioVUID sets the scenario-specific ID for this VU. -func (s *State) SetScenarioVUID(id uint64) { - s.scenarioVUID[s.ScenarioName] = id -} - -// GetScenarioVUIter returns the scenario-specific count of completed iterations -// for this VU. -func (s *State) GetScenarioVUIter() int64 { - s.scIterMx.RLock() - defer s.scIterMx.RUnlock() - return s.scenarioVUIter[s.ScenarioName] -} - -// IncrIteration increments all iteration counters for the specific VU with this -// State. -func (s *State) IncrIteration() { - s.scIterMx.Lock() - defer s.scIterMx.Unlock() - - s.iteration++ - if _, ok := s.scenarioVUIter[s.ScenarioName]; ok { - s.scenarioVUIter[s.ScenarioName]++ - } else { - s.scenarioVUIter[s.ScenarioName] = 0 - } - if s.IncrScIter != nil { - s.scIter = s.IncrScIter() - } - if s.IncrScIterGlobal != nil { - s.scIterGlobal = s.IncrScIterGlobal() - } -} - -// GetScenarioLocalVUIter returns the iteration local to the scenario currently -// executing the VU with this State. -func (s *State) GetScenarioLocalVUIter() int64 { - s.scIterMx.RLock() - defer s.scIterMx.RUnlock() - return s.scIter -} - -// GetScenarioGlobalVUIter returns the global iteration of the scenario -// currently executing the VU with this State, or -1 if the executor doesn't -// keep track of global iterations. -func (s *State) GetScenarioGlobalVUIter() int64 { - s.scIterMx.RLock() - defer s.scIterMx.RUnlock() - if s.IncrScIterGlobal == nil { - return -1 - } - return s.scIterGlobal -} - -// GetIteration returns the iteration local to the VU with this State. -func (s *State) GetIteration() int64 { - return s.iteration -} diff --git a/lib/testutils/minirunner/minirunner.go b/lib/testutils/minirunner/minirunner.go index f7d4e391108..54a405a36fd 100644 --- a/lib/testutils/minirunner/minirunner.go +++ b/lib/testutils/minirunner/minirunner.go @@ -59,9 +59,15 @@ func (r MiniRunner) MakeArchive() *lib.Archive { // NewVU returns a new VU with an incremental ID. func (r *MiniRunner) NewVU(id uint64, out chan<- stats.SampleContainer) (lib.InitializedVU, error) { - state := &lib.State{Vu: id} - state.Init() - return &VU{R: r, Out: out, ID: id, state: state}, nil + state := &lib.State{Vu: id, Iteration: int64(-1)} + return &VU{ + R: r, + Out: out, + ID: id, + state: state, + scenarioID: make(map[string]uint64), + scenarioIter: make(map[string]int64), + }, nil } // Setup calls the supplied mock setup() function, if present. @@ -126,10 +132,15 @@ func (r *MiniRunner) HandleSummary(ctx context.Context, s *lib.Summary) (map[str // VU is a mock VU, spawned by a MiniRunner. type VU struct { - R *MiniRunner - Out chan<- stats.SampleContainer - ID, Iteration uint64 - state *lib.State + R *MiniRunner + Out chan<- stats.SampleContainer + ID uint64 + Iteration int64 + state *lib.State + // ID of this VU in each scenario + scenarioID map[string]uint64 + // count of iterations executed by this VU in each scenario + scenarioIter map[string]int64 } // ActiveVU holds a VU and its activation parameters @@ -137,6 +148,12 @@ type ActiveVU struct { *VU *lib.VUActivationParams busy chan struct{} + + scenarioName string + iterSync chan struct{} + getNextScLocalIter func() int64 + getNextScGlobalIter func() int64 + scIterLocal, scIterGlobal int64 } // GetID returns the unique VU ID. @@ -146,15 +163,36 @@ func (vu *VU) GetID() uint64 { // Activate the VU so it will be able to run code. func (vu *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { - vu.state.IncrScIter = params.IncrScIter - vu.state.IncrScIterGlobal = params.IncrScIterGlobal - ctx := lib.WithState(params.RunContext, vu.state) + if params.GetNextScVUID != nil { + if _, ok := vu.scenarioID[params.Scenario]; !ok { + vu.state.VUIDScenario = params.GetNextScVUID() + vu.scenarioID[params.Scenario] = vu.state.VUIDScenario + } + } + + vu.state.GetScenarioVUIter = func() int64 { + return vu.scenarioIter[params.Scenario] + } + avu := &ActiveVU{ - VU: vu, - VUActivationParams: params, - busy: make(chan struct{}, 1), + VU: vu, + VUActivationParams: params, + busy: make(chan struct{}, 1), + scenarioName: params.Scenario, + iterSync: params.IterSync, + scIterLocal: int64(-1), + scIterGlobal: int64(-1), + getNextScLocalIter: params.GetNextScLocalIter, + getNextScGlobalIter: params.GetNextScGlobalIter, + } + + vu.state.GetScenarioLocalVUIter = func() int64 { + return avu.scIterLocal + } + vu.state.GetScenarioGlobalVUIter = func() int64 { + return avu.scIterGlobal } go func() { @@ -172,6 +210,31 @@ func (vu *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { return avu } +func (vu *ActiveVU) incrIteration() { + vu.Iteration++ + vu.state.Iteration = vu.Iteration + + if vu.iterSync != nil { + // block other VUs from incrementing scenario iterations + vu.iterSync <- struct{}{} + defer func() { + <-vu.iterSync // unlock + }() + } + + if _, ok := vu.scenarioIter[vu.scenarioName]; ok { + vu.scenarioIter[vu.scenarioName]++ + } else { + vu.scenarioIter[vu.scenarioName] = 0 + } + if vu.getNextScLocalIter != nil { + vu.scIterLocal = vu.getNextScLocalIter() + } + if vu.getNextScGlobalIter != nil { + vu.scIterGlobal = vu.getNextScGlobalIter() + } +} + // RunOnce runs the mock default function once, incrementing its iteration. func (vu *ActiveVU) RunOnce() error { if vu.R.Fn == nil { @@ -189,7 +252,7 @@ func (vu *ActiveVU) RunOnce() error { }() ctx := lib.WithState(vu.RunContext, vu.state) - vu.state.IncrIteration() + vu.incrIteration() return vu.R.Fn(ctx, vu.Out) } From 85ae41b0bdfb4856edeed5887f18d3c1e036fc20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Thu, 3 Jun 2021 15:47:46 +0200 Subject: [PATCH 16/22] Use uint64 for iterations Resolves https://github.com/k6io/k6/pull/1863#discussion_r641465216 --- js/runner.go | 18 ++++++++++-------- lib/executor/base_executor.go | 10 +++++----- lib/executor/constant_arrival_rate.go | 4 ++-- lib/executor/constant_arrival_rate_test.go | 10 +++++----- lib/executor/helpers.go | 2 +- lib/executor/ramping_arrival_rate.go | 4 ++-- lib/executor/ramping_arrival_rate_test.go | 10 +++++----- lib/executor/shared_iterations.go | 4 ++-- lib/executor/shared_iterations_test.go | 10 +++++----- lib/executor/vu_handle.go | 4 ++-- lib/runner.go | 4 ++-- lib/testutils/minirunner/minirunner.go | 20 ++++++++++---------- 12 files changed, 51 insertions(+), 49 deletions(-) diff --git a/js/runner.go b/js/runner.go index 417915d7088..05a747d8e9c 100644 --- a/js/runner.go +++ b/js/runner.go @@ -219,7 +219,7 @@ func (r *Runner) newVU(id uint64, samplesOut chan<- stats.SampleContainer) (*VU, BPool: bpool.NewBufferPool(100), Samples: samplesOut, scenarioID: make(map[string]uint64), - scenarioIter: make(map[string]int64), + scenarioIter: make(map[string]uint64), } vu.state = &lib.State{ @@ -547,7 +547,7 @@ type VU struct { // ID of this VU in each scenario scenarioID map[string]uint64 // count of iterations executed by this VU in each scenario - scenarioIter map[string]int64 + scenarioIter map[string]uint64 } // Verify that interfaces are implemented @@ -628,7 +628,7 @@ func (u *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { } } - u.state.GetScenarioVUIter = func() int64 { + u.state.GetScenarioVUIter = func() uint64 { return u.scenarioIter[params.Scenario] } @@ -638,17 +638,19 @@ func (u *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { busy: make(chan struct{}, 1), scenarioName: params.Scenario, iterSync: params.IterSync, - scIterLocal: int64(-1), - scIterGlobal: int64(-1), + scIterLocal: ^uint64(0), + scIterGlobal: ^uint64(0), getNextScLocalIter: params.GetNextScLocalIter, getNextScGlobalIter: params.GetNextScGlobalIter, } - u.state.GetScenarioLocalVUIter = func() int64 { + u.state.GetScenarioLocalVUIter = func() uint64 { return avu.scIterLocal } - u.state.GetScenarioGlobalVUIter = func() int64 { - return avu.scIterGlobal + if params.GetNextScGlobalIter != nil { + u.state.GetScenarioGlobalVUIter = func() uint64 { + return avu.scIterGlobal + } } go func() { diff --git a/lib/executor/base_executor.go b/lib/executor/base_executor.go index 74455170da0..5807e437152 100644 --- a/lib/executor/base_executor.go +++ b/lib/executor/base_executor.go @@ -42,15 +42,15 @@ type BaseExecutor struct { VUIDLocal *uint64 // counter for assigning executor-specific VU IDs // Counter for keeping track of all VU iterations completed by this executor // in the current (local) k6 instance. - iterLocal *int64 + iterLocal *uint64 logger *logrus.Entry progress *pb.ProgressBar } // NewBaseExecutor returns an initialized BaseExecutor func NewBaseExecutor(config lib.ExecutorConfig, es *lib.ExecutionState, logger *logrus.Entry) *BaseExecutor { - // Start at -1 so that the first iteration can be 0 - startIterLocal := int64(-1) + // Start at max uint64 so that the first iteration can be 0 + startIterLocal := ^uint64(0) return &BaseExecutor{ config: config, executionState: es, @@ -83,8 +83,8 @@ func (bs BaseExecutor) getNextLocalVUID() uint64 { // getNextLocalIter increments and returns the next local iteration number, for // keeping track of total iterations executed by this scenario/executor. -func (bs *BaseExecutor) getNextLocalIter() int64 { - return atomic.AddInt64(bs.iterLocal, 1) +func (bs *BaseExecutor) getNextLocalIter() uint64 { + return atomic.AddUint64(bs.iterLocal, 1) } // GetLogger returns the executor logger entry. diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go index 847faf2319f..d34e3d36a21 100644 --- a/lib/executor/constant_arrival_rate.go +++ b/lib/executor/constant_arrival_rate.go @@ -209,12 +209,12 @@ func (car *ConstantArrivalRate) Init(ctx context.Context) error { // this executor, taking into account the configured execution segment. // Unlike the local iteration number returned by getNextLocalIter(), this // iteration number will be unique across k6 instances. -func (car *ConstantArrivalRate) getNextGlobalIter() int64 { +func (car *ConstantArrivalRate) getNextGlobalIter() uint64 { car.iterMx.Lock() defer car.iterMx.Unlock() car.segIdx.Next() // iterations are 0-based - return car.segIdx.GetUnscaled() - 1 + return uint64(car.segIdx.GetUnscaled() - 1) } // Run executes a constant number of iterations per second. diff --git a/lib/executor/constant_arrival_rate_test.go b/lib/executor/constant_arrival_rate_test.go index dd0b06168fd..d690605be41 100644 --- a/lib/executor/constant_arrival_rate_test.go +++ b/lib/executor/constant_arrival_rate_test.go @@ -351,11 +351,11 @@ func TestConstantArrivalRateGlobalIters(t *testing.T) { testCases := []struct { seq, seg string - expIters []int64 + expIters []uint64 }{ - {"0,1/4,3/4,1", "0:1/4", []int64{1, 6, 11, 16, 21}}, - {"0,1/4,3/4,1", "1/4:3/4", []int64{0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20}}, - {"0,1/4,3/4,1", "3/4:1", []int64{3, 8, 13, 18}}, + {"0,1/4,3/4,1", "0:1/4", []uint64{1, 6, 11, 16, 21}}, + {"0,1/4,3/4,1", "1/4:3/4", []uint64{0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20}}, + {"0,1/4,3/4,1", "3/4:1", []uint64{3, 8, 13, 18}}, } for _, tc := range testCases { @@ -374,7 +374,7 @@ func TestConstantArrivalRateGlobalIters(t *testing.T) { ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) defer cancel() - gotIters := []int64{} + gotIters := []uint64{} var mx sync.Mutex runner.Fn = func(ctx context.Context, _ chan<- stats.SampleContainer) error { state := lib.GetState(ctx) diff --git a/lib/executor/helpers.go b/lib/executor/helpers.go index 4bb2c76f3bf..73c301d9afa 100644 --- a/lib/executor/helpers.go +++ b/lib/executor/helpers.go @@ -225,7 +225,7 @@ func getArrivalRatePerSec(scaledArrivalRate *big.Rat) *big.Rat { // TODO: Refactor this, maybe move all scenario things to an embedded struct? func getVUActivationParams( ctx context.Context, conf BaseConfig, deactivateCallback func(lib.InitializedVU), - getNextScVUID func() uint64, getNextScLocalIter, getNextScGlobalIter func() int64, + getNextScVUID func() uint64, getNextScLocalIter, getNextScGlobalIter func() uint64, iterSync chan struct{}, ) *lib.VUActivationParams { return &lib.VUActivationParams{ diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index 623a7b91b13..13f00dd304e 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -200,12 +200,12 @@ func (varr *RampingArrivalRate) Init(ctx context.Context) error { // this executor, taking into account the configured execution segment. // Unlike the local iteration number returned by getNextLocalIter(), this // iteration number will be unique across k6 instances. -func (varr *RampingArrivalRate) getNextGlobalIter() int64 { +func (varr *RampingArrivalRate) getNextGlobalIter() uint64 { varr.iterMx.Lock() defer varr.iterMx.Unlock() varr.segIdx.Next() // iterations are 0-based - return varr.segIdx.GetUnscaled() - 1 + return uint64(varr.segIdx.GetUnscaled() - 1) } // cal calculates the transtitions between stages and gives the next full value produced by the diff --git a/lib/executor/ramping_arrival_rate_test.go b/lib/executor/ramping_arrival_rate_test.go index 2177fa77671..15572607a6a 100644 --- a/lib/executor/ramping_arrival_rate_test.go +++ b/lib/executor/ramping_arrival_rate_test.go @@ -711,11 +711,11 @@ func TestRampingArrivalRateGlobalIters(t *testing.T) { testCases := []struct { seq, seg string - expIters []int64 + expIters []uint64 }{ - {"0,1/4,3/4,1", "0:1/4", []int64{1, 6, 11, 16}}, - {"0,1/4,3/4,1", "1/4:3/4", []int64{0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20}}, - {"0,1/4,3/4,1", "3/4:1", []int64{3, 8, 13}}, + {"0,1/4,3/4,1", "0:1/4", []uint64{1, 6, 11, 16}}, + {"0,1/4,3/4,1", "1/4:3/4", []uint64{0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20}}, + {"0,1/4,3/4,1", "3/4:1", []uint64{3, 8, 13}}, } for _, tc := range testCases { @@ -734,7 +734,7 @@ func TestRampingArrivalRateGlobalIters(t *testing.T) { ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) defer cancel() - gotIters := []int64{} + gotIters := []uint64{} var mx sync.Mutex runner.Fn = func(ctx context.Context, _ chan<- stats.SampleContainer) error { state := lib.GetState(ctx) diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go index a5591cb83fd..b886e6010d1 100644 --- a/lib/executor/shared_iterations.go +++ b/lib/executor/shared_iterations.go @@ -189,12 +189,12 @@ func (si *SharedIterations) Init(ctx context.Context) error { // this executor, taking into account the configured execution segment. // Unlike the local iteration number returned by getNextLocalIter(), this // iteration number will be unique across k6 instances. -func (si *SharedIterations) getNextGlobalIter() int64 { +func (si *SharedIterations) getNextGlobalIter() uint64 { si.iterMx.Lock() defer si.iterMx.Unlock() si.segIdx.Next() // iterations are 0-based - return si.segIdx.GetUnscaled() - 1 + return uint64(si.segIdx.GetUnscaled() - 1) } // Run executes a specific total number of iterations, which are all shared by diff --git a/lib/executor/shared_iterations_test.go b/lib/executor/shared_iterations_test.go index 3e0fbb4f8b9..bc2e3292abc 100644 --- a/lib/executor/shared_iterations_test.go +++ b/lib/executor/shared_iterations_test.go @@ -155,11 +155,11 @@ func TestSharedIterationsGlobalIters(t *testing.T) { testCases := []struct { seq, seg string - expIters []int64 + expIters []uint64 }{ - {"0,1/4,3/4,1", "0:1/4", []int64{1, 6, 11, 16, 21, 26, 31, 36, 41, 46}}, - {"0,1/4,3/4,1", "1/4:3/4", []int64{0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40, 42, 44, 45, 47, 49}}, - {"0,1/4,3/4,1", "3/4:1", []int64{3, 8, 13, 18, 23, 28, 33, 38, 43, 48}}, + {"0,1/4,3/4,1", "0:1/4", []uint64{1, 6, 11, 16, 21, 26, 31, 36, 41, 46}}, + {"0,1/4,3/4,1", "1/4:3/4", []uint64{0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40, 42, 44, 45, 47, 49}}, + {"0,1/4,3/4,1", "3/4:1", []uint64{3, 8, 13, 18, 23, 28, 33, 38, 43, 48}}, } for _, tc := range testCases { @@ -178,7 +178,7 @@ func TestSharedIterationsGlobalIters(t *testing.T) { ctx, cancel, executor, _ := setupExecutor(t, config, es, runner) defer cancel() - gotIters := []int64{} + gotIters := []uint64{} var mx sync.Mutex runner.Fn = func(ctx context.Context, _ chan<- stats.SampleContainer) error { state := lib.GetState(ctx) diff --git a/lib/executor/vu_handle.go b/lib/executor/vu_handle.go index c93f0098fc8..64812dc8313 100644 --- a/lib/executor/vu_handle.go +++ b/lib/executor/vu_handle.go @@ -93,7 +93,7 @@ type vuHandle struct { getVU func() (lib.InitializedVU, error) returnVU func(lib.InitializedVU) getScenarioVUID func() uint64 - getScenarioLocalIter func() int64 + getScenarioLocalIter func() uint64 iterSync chan struct{} config *BaseConfig @@ -112,7 +112,7 @@ type vuHandle struct { func newStoppedVUHandle( parentCtx context.Context, getVU func() (lib.InitializedVU, error), returnVU func(lib.InitializedVU), getScenarioVUID func() uint64, - getScenarioLocalIter func() int64, iterSync chan struct{}, + getScenarioLocalIter func() uint64, iterSync chan struct{}, config *BaseConfig, logger *logrus.Entry, ) *vuHandle { ctx, cancel := context.WithCancel(parentCtx) diff --git a/lib/runner.go b/lib/runner.go index ff43c5b8c42..488d2b81d84 100644 --- a/lib/runner.go +++ b/lib/runner.go @@ -57,8 +57,8 @@ type VUActivationParams struct { Exec, Scenario string GetNextScVUID func() uint64 IterSync chan struct{} - GetNextScLocalIter func() int64 - GetNextScGlobalIter func() int64 + GetNextScLocalIter func() uint64 + GetNextScGlobalIter func() uint64 } // A Runner is a factory for VUs. It should precompute as much as possible upon diff --git a/lib/testutils/minirunner/minirunner.go b/lib/testutils/minirunner/minirunner.go index 54a405a36fd..f2863b15d46 100644 --- a/lib/testutils/minirunner/minirunner.go +++ b/lib/testutils/minirunner/minirunner.go @@ -66,7 +66,7 @@ func (r *MiniRunner) NewVU(id uint64, out chan<- stats.SampleContainer) (lib.Ini ID: id, state: state, scenarioID: make(map[string]uint64), - scenarioIter: make(map[string]int64), + scenarioIter: make(map[string]uint64), }, nil } @@ -140,7 +140,7 @@ type VU struct { // ID of this VU in each scenario scenarioID map[string]uint64 // count of iterations executed by this VU in each scenario - scenarioIter map[string]int64 + scenarioIter map[string]uint64 } // ActiveVU holds a VU and its activation parameters @@ -151,9 +151,9 @@ type ActiveVU struct { scenarioName string iterSync chan struct{} - getNextScLocalIter func() int64 - getNextScGlobalIter func() int64 - scIterLocal, scIterGlobal int64 + getNextScLocalIter func() uint64 + getNextScGlobalIter func() uint64 + scIterLocal, scIterGlobal uint64 } // GetID returns the unique VU ID. @@ -172,7 +172,7 @@ func (vu *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { } } - vu.state.GetScenarioVUIter = func() int64 { + vu.state.GetScenarioVUIter = func() uint64 { return vu.scenarioIter[params.Scenario] } @@ -182,16 +182,16 @@ func (vu *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { busy: make(chan struct{}, 1), scenarioName: params.Scenario, iterSync: params.IterSync, - scIterLocal: int64(-1), - scIterGlobal: int64(-1), + scIterLocal: ^uint64(0), + scIterGlobal: ^uint64(0), getNextScLocalIter: params.GetNextScLocalIter, getNextScGlobalIter: params.GetNextScGlobalIter, } - vu.state.GetScenarioLocalVUIter = func() int64 { + vu.state.GetScenarioLocalVUIter = func() uint64 { return avu.scIterLocal } - vu.state.GetScenarioGlobalVUIter = func() int64 { + vu.state.GetScenarioGlobalVUIter = func() uint64 { return avu.scIterGlobal } From 6de5a40be23f4fd5a314dd380178e9b9abc55c8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Thu, 10 Jun 2021 12:00:51 +0200 Subject: [PATCH 17/22] Return result from SegmentedIndex methods, remove superfluous locking Resolves https://github.com/k6io/k6/pull/1863#discussion_r644140057 --- lib/execution_segment.go | 34 ++++++++++++--------------- lib/executor/constant_arrival_rate.go | 8 ++----- lib/executor/ramping_arrival_rate.go | 8 ++----- lib/executor/ramping_vus.go | 20 +++++++++------- lib/executor/shared_iterations.go | 8 ++----- 5 files changed, 32 insertions(+), 46 deletions(-) diff --git a/lib/execution_segment.go b/lib/execution_segment.go index e7e15600712..db637f866f3 100644 --- a/lib/execution_segment.go +++ b/lib/execution_segment.go @@ -738,10 +738,16 @@ func (et *ExecutionTuple) GetNewExecutionTupleFromValue(value int64) (*Execution type SegmentedIndex struct { start, lcd int64 offsets []int64 - mx sync.RWMutex + mx sync.Mutex scaled, unscaled int64 // for both the first element(vu) is 1 not 0 } +// SegmentedIndexResult holds the new index values after being changed by +// Next(), Prev() or GoTo(). +type SegmentedIndexResult struct { + Scaled, Unscaled int64 +} + // NewSegmentedIndex returns a pointer to a new SegmentedIndex instance, // given a starting index, LCD and offsets as returned by GetStripedOffsets(). func NewSegmentedIndex(start, lcd int64, offsets []int64) *SegmentedIndex { @@ -749,7 +755,7 @@ func NewSegmentedIndex(start, lcd int64, offsets []int64) *SegmentedIndex { } // Next goes to the next scaled index and moves the unscaled one accordingly. -func (s *SegmentedIndex) Next() { +func (s *SegmentedIndex) Next() SegmentedIndexResult { s.mx.Lock() defer s.mx.Unlock() if s.scaled == 0 { // the 1 element(VU) is at the start @@ -758,11 +764,13 @@ func (s *SegmentedIndex) Next() { s.unscaled += s.offsets[int(s.scaled-1)%len(s.offsets)] // slice's index start at 0 ours start at 1 } s.scaled++ + + return SegmentedIndexResult{Scaled: s.scaled, Unscaled: s.unscaled} } // Prev goes to the previous scaled value and sets the unscaled one accordingly. // Calling Prev when s.scaled == 0 is undefined. -func (s *SegmentedIndex) Prev() { +func (s *SegmentedIndex) Prev() SegmentedIndexResult { s.mx.Lock() defer s.mx.Unlock() if s.scaled == 1 { // we are the first need to go to the 0th element which means we need to remove the start @@ -771,11 +779,13 @@ func (s *SegmentedIndex) Prev() { s.unscaled -= s.offsets[int(s.scaled-2)%len(s.offsets)] // slice's index start 0 our start at 1 } s.scaled-- + + return SegmentedIndexResult{Scaled: s.scaled, Unscaled: s.unscaled} } // GoTo sets the scaled index to its biggest value for which the corresponding // unscaled index is smaller or equal to value. -func (s *SegmentedIndex) GoTo(value int64) int64 { // TODO optimize +func (s *SegmentedIndex) GoTo(value int64) SegmentedIndexResult { // TODO optimize s.mx.Lock() defer s.mx.Unlock() var gi int64 @@ -810,19 +820,5 @@ func (s *SegmentedIndex) GoTo(value int64) int64 { // TODO optimize s.unscaled = 0 // we would've added the start and 1 } - return s.scaled -} - -// GetScaled returns the scaled value. -func (s *SegmentedIndex) GetScaled() int64 { - s.mx.RLock() - defer s.mx.RUnlock() - return s.scaled -} - -// GetUnscaled returns the unscaled value. -func (s *SegmentedIndex) GetUnscaled() int64 { - s.mx.RLock() - defer s.mx.RUnlock() - return s.unscaled + return SegmentedIndexResult{Scaled: s.scaled, Unscaled: s.unscaled} } diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go index d34e3d36a21..9d811624e3e 100644 --- a/lib/executor/constant_arrival_rate.go +++ b/lib/executor/constant_arrival_rate.go @@ -171,7 +171,6 @@ func (carc ConstantArrivalRateConfig) NewExecutor( return &ConstantArrivalRate{ BaseExecutor: NewBaseExecutor(&carc, es, logger), config: carc, - iterMx: &sync.Mutex{}, }, nil } @@ -186,7 +185,6 @@ type ConstantArrivalRate struct { *BaseExecutor config ConstantArrivalRateConfig et *lib.ExecutionTuple - iterMx *sync.Mutex segIdx *lib.SegmentedIndex } @@ -210,11 +208,9 @@ func (car *ConstantArrivalRate) Init(ctx context.Context) error { // Unlike the local iteration number returned by getNextLocalIter(), this // iteration number will be unique across k6 instances. func (car *ConstantArrivalRate) getNextGlobalIter() uint64 { - car.iterMx.Lock() - defer car.iterMx.Unlock() - car.segIdx.Next() + res := car.segIdx.Next() // iterations are 0-based - return uint64(car.segIdx.GetUnscaled() - 1) + return uint64(res.Unscaled - 1) } // Run executes a constant number of iterations per second. diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index 13f00dd304e..fe017f979fa 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -161,7 +161,6 @@ func (varc RampingArrivalRateConfig) NewExecutor( return &RampingArrivalRate{ BaseExecutor: NewBaseExecutor(&varc, es, logger), config: varc, - iterMx: &sync.Mutex{}, }, nil } @@ -177,7 +176,6 @@ type RampingArrivalRate struct { *BaseExecutor config RampingArrivalRateConfig et *lib.ExecutionTuple - iterMx *sync.Mutex segIdx *lib.SegmentedIndex } @@ -201,11 +199,9 @@ func (varr *RampingArrivalRate) Init(ctx context.Context) error { // Unlike the local iteration number returned by getNextLocalIter(), this // iteration number will be unique across k6 instances. func (varr *RampingArrivalRate) getNextGlobalIter() uint64 { - varr.iterMx.Lock() - defer varr.iterMx.Unlock() - varr.segIdx.Next() + res := varr.segIdx.Next() // iterations are 0-based - return uint64(varr.segIdx.GetUnscaled() - 1) + return uint64(res.Unscaled - 1) } // cal calculates the transtitions between stages and gives the next full value produced by the diff --git a/lib/executor/ramping_vus.go b/lib/executor/ramping_vus.go index 9e5da1272e1..2d46dc967e9 100644 --- a/lib/executor/ramping_vus.go +++ b/lib/executor/ramping_vus.go @@ -195,7 +195,8 @@ func (vlvc RampingVUsConfig) getRawExecutionSteps(et *lib.ExecutionTuple, zeroEn ) // Reserve the scaled StartVUs at the beginning - steps = append(steps, lib.ExecutionStep{TimeOffset: 0, PlannedVUs: uint64(index.GoTo(fromVUs))}) + res := index.GoTo(fromVUs) + steps = append(steps, lib.ExecutionStep{TimeOffset: 0, PlannedVUs: uint64(res.Scaled)}) addStep := func(timeOffset time.Duration, plannedVUs uint64) { if steps[len(steps)-1].PlannedVUs != plannedVUs { steps = append(steps, lib.ExecutionStep{TimeOffset: timeOffset, PlannedVUs: plannedVUs}) @@ -212,30 +213,31 @@ func (vlvc RampingVUsConfig) getRawExecutionSteps(et *lib.ExecutionTuple, zeroEn continue } if stageDuration == 0 { - addStep(timeTillEnd, uint64(index.GoTo(stageEndVUs))) + res = index.GoTo(stageEndVUs) + addStep(timeTillEnd, uint64(res.Scaled)) fromVUs = stageEndVUs continue } // VU reservation for gracefully ramping down is handled as a // separate method: reserveVUsForGracefulRampDowns() - if index.GetUnscaled() > stageEndVUs { // ramp down + if res.Unscaled > stageEndVUs { // ramp down // here we don't want to emit for the equal to stageEndVUs as it doesn't go below it // it will just go to it - for ; index.GetUnscaled() > stageEndVUs; index.Prev() { + for ; res.Unscaled > stageEndVUs; res = index.Prev() { addStep( // this is the time that we should go up 1 if we are ramping up // but we are ramping down so we should go 1 down, but because we want to not // stop VUs immediately we stop it on the next unscaled VU's time - timeTillEnd-time.Duration(int64(stageDuration)*(stageEndVUs-index.GetUnscaled()+1)/stageVUDiff), - uint64(index.GetScaled()-1), + timeTillEnd-time.Duration(int64(stageDuration)*(stageEndVUs-res.Unscaled+1)/stageVUDiff), + uint64(res.Scaled-1), ) } } else { - for ; index.GetUnscaled() <= stageEndVUs; index.Next() { + for ; res.Unscaled <= stageEndVUs; res = index.Next() { addStep( - timeTillEnd-time.Duration(int64(stageDuration)*(stageEndVUs-index.GetUnscaled())/stageVUDiff), - uint64(index.GetScaled()), + timeTillEnd-time.Duration(int64(stageDuration)*(stageEndVUs-res.Unscaled)/stageVUDiff), + uint64(res.Scaled), ) } } diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go index b886e6010d1..a20ea88f0f8 100644 --- a/lib/executor/shared_iterations.go +++ b/lib/executor/shared_iterations.go @@ -151,7 +151,6 @@ func (sic SharedIterationsConfig) NewExecutor( return &SharedIterations{ BaseExecutor: NewBaseExecutor(sic, es, logger), config: sic, - iterMx: &sync.Mutex{}, }, nil } @@ -161,7 +160,6 @@ type SharedIterations struct { *BaseExecutor config SharedIterationsConfig et *lib.ExecutionTuple - iterMx *sync.Mutex segIdx *lib.SegmentedIndex } @@ -190,11 +188,9 @@ func (si *SharedIterations) Init(ctx context.Context) error { // Unlike the local iteration number returned by getNextLocalIter(), this // iteration number will be unique across k6 instances. func (si *SharedIterations) getNextGlobalIter() uint64 { - si.iterMx.Lock() - defer si.iterMx.Unlock() - si.segIdx.Next() + res := si.segIdx.Next() // iterations are 0-based - return uint64(si.segIdx.GetUnscaled() - 1) + return uint64(res.Unscaled - 1) } // Run executes a specific total number of iterations, which are all shared by From 2e02ed4416f1ce44a1411f2ab60cec6f271fbac2 Mon Sep 17 00:00:00 2001 From: Mihail Stoykov Date: Fri, 11 Jun 2021 13:27:35 +0300 Subject: [PATCH 18/22] Use a single segmented index for local and global iterations (#2057) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Use a single segmented index for local and global iterations This removes the need for a lot of the additional synchronization. This also makes it so there are global iterations for all executors. For some of those it means that the global iteration will be *unique* among instances but doesn't necessary mean that iterations with smaller numbers *have* run or *will* run in the same test run. This is somewhat like how it is for the executors for which we already had it, there is nothing making certain that a shared-iterations executor will not run out of time to run all the iterations on one of the instances or a arrival-rate to not have to drop iterations due to lack VUs to execute them. * Update lib/executor/base_executor.go Co-authored-by: Ivan Mirić * unexport and comment nextIterationCounters * Remove synchronization in SegmetnedIndex * Drop SegmentedIndexResult * Fix race in TestSharedIterationsGlobalIters There is nothing stopping 5 VUs starting a shared iterations at the same time and one of them getting "slower" to the actual execution than it's iteration counter suggests. Co-authored-by: Ivan Mirić --- js/init_and_modules_test.go | 6 ++-- js/runner.go | 50 ++++++++------------------ lib/execution_segment.go | 26 ++++---------- lib/executor/base_executor.go | 31 ++++++++-------- lib/executor/constant_arrival_rate.go | 18 ++-------- lib/executor/constant_vus.go | 2 +- lib/executor/externally_controlled.go | 9 ++--- lib/executor/helpers.go | 21 +++++------ lib/executor/per_vu_iterations.go | 2 +- lib/executor/ramping_arrival_rate.go | 18 ++-------- lib/executor/ramping_vus.go | 26 +++++++------- lib/executor/shared_iterations.go | 17 ++------- lib/executor/shared_iterations_test.go | 4 ++- lib/executor/vu_handle.go | 32 ++++++++--------- lib/executor/vu_handle_test.go | 24 ++++++------- lib/runner.go | 14 ++++---- lib/testutils/minirunner/minirunner.go | 35 +++++------------- 17 files changed, 116 insertions(+), 219 deletions(-) diff --git a/js/init_and_modules_test.go b/js/init_and_modules_test.go index cfc43b02b92..201f7f9a9a9 100644 --- a/js/init_and_modules_test.go +++ b/js/init_and_modules_test.go @@ -28,6 +28,9 @@ import ( "testing" "time" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.k6.io/k6/js" "go.k6.io/k6/js/common" "go.k6.io/k6/js/modules" @@ -35,9 +38,6 @@ import ( "go.k6.io/k6/lib/testutils" "go.k6.io/k6/loader" "go.k6.io/k6/stats" - "github.com/spf13/afero" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v3" ) diff --git a/js/runner.go b/js/runner.go index 05a747d8e9c..4468ee90ea5 100644 --- a/js/runner.go +++ b/js/runner.go @@ -562,16 +562,8 @@ type ActiveVU struct { *lib.VUActivationParams busy chan struct{} - scenarioName string - // Used to synchronize iteration increments for scenarios between VUs. - iterSync chan struct{} - // Returns the iteration number across all VUs in the current scenario - // unique to this single k6 instance. - getNextScLocalIter func() uint64 - // Returns the iteration number across all VUs in the current scenario - // unique globally across k6 instances (taking into account execution - // segments). - getNextScGlobalIter func() uint64 + scenarioName string + getNextIterationCounters func() (uint64, uint64) scIterLocal, scIterGlobal uint64 } @@ -633,24 +625,20 @@ func (u *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { } avu := &ActiveVU{ - VU: u, - VUActivationParams: params, - busy: make(chan struct{}, 1), - scenarioName: params.Scenario, - iterSync: params.IterSync, - scIterLocal: ^uint64(0), - scIterGlobal: ^uint64(0), - getNextScLocalIter: params.GetNextScLocalIter, - getNextScGlobalIter: params.GetNextScGlobalIter, + VU: u, + VUActivationParams: params, + busy: make(chan struct{}, 1), + scenarioName: params.Scenario, + scIterLocal: ^uint64(0), + scIterGlobal: ^uint64(0), + getNextIterationCounters: params.GetNextIterationCounters, } u.state.GetScenarioLocalVUIter = func() uint64 { return avu.scIterLocal } - if params.GetNextScGlobalIter != nil { - u.state.GetScenarioGlobalVUIter = func() uint64 { - return avu.scIterGlobal - } + u.state.GetScenarioGlobalVUIter = func() uint64 { + return avu.scIterGlobal } go func() { @@ -784,24 +772,14 @@ func (u *ActiveVU) incrIteration() { u.iteration++ u.state.Iteration = u.iteration - if u.iterSync != nil { - // block other VUs from incrementing scenario iterations - u.iterSync <- struct{}{} - defer func() { - <-u.iterSync // unlock - }() - } - if _, ok := u.scenarioIter[u.scenarioName]; ok { u.scenarioIter[u.scenarioName]++ } else { u.scenarioIter[u.scenarioName] = 0 } - if u.getNextScLocalIter != nil { - u.scIterLocal = u.getNextScLocalIter() - } - if u.getNextScGlobalIter != nil { - u.scIterGlobal = u.getNextScGlobalIter() + // TODO remove this + if u.getNextIterationCounters != nil { + u.scIterLocal, u.scIterGlobal = u.getNextIterationCounters() } } diff --git a/lib/execution_segment.go b/lib/execution_segment.go index db637f866f3..b42f3779d17 100644 --- a/lib/execution_segment.go +++ b/lib/execution_segment.go @@ -26,7 +26,6 @@ import ( "math/big" "sort" "strings" - "sync" ) // ExecutionSegment represents a (start, end] partition of the total execution @@ -738,16 +737,9 @@ func (et *ExecutionTuple) GetNewExecutionTupleFromValue(value int64) (*Execution type SegmentedIndex struct { start, lcd int64 offsets []int64 - mx sync.Mutex scaled, unscaled int64 // for both the first element(vu) is 1 not 0 } -// SegmentedIndexResult holds the new index values after being changed by -// Next(), Prev() or GoTo(). -type SegmentedIndexResult struct { - Scaled, Unscaled int64 -} - // NewSegmentedIndex returns a pointer to a new SegmentedIndex instance, // given a starting index, LCD and offsets as returned by GetStripedOffsets(). func NewSegmentedIndex(start, lcd int64, offsets []int64) *SegmentedIndex { @@ -755,9 +747,7 @@ func NewSegmentedIndex(start, lcd int64, offsets []int64) *SegmentedIndex { } // Next goes to the next scaled index and moves the unscaled one accordingly. -func (s *SegmentedIndex) Next() SegmentedIndexResult { - s.mx.Lock() - defer s.mx.Unlock() +func (s *SegmentedIndex) Next() (int64, int64) { if s.scaled == 0 { // the 1 element(VU) is at the start s.unscaled += s.start + 1 // the first element of the start 0, but the here we need it to be 1 so we add 1 } else { // if we are not at the first element we need to go through the offsets, looping over them @@ -765,14 +755,12 @@ func (s *SegmentedIndex) Next() SegmentedIndexResult { } s.scaled++ - return SegmentedIndexResult{Scaled: s.scaled, Unscaled: s.unscaled} + return s.scaled, s.unscaled } // Prev goes to the previous scaled value and sets the unscaled one accordingly. // Calling Prev when s.scaled == 0 is undefined. -func (s *SegmentedIndex) Prev() SegmentedIndexResult { - s.mx.Lock() - defer s.mx.Unlock() +func (s *SegmentedIndex) Prev() (int64, int64) { if s.scaled == 1 { // we are the first need to go to the 0th element which means we need to remove the start s.unscaled -= s.start + 1 // this could've been just settign to 0 } else { // not at the first element - need to get the previously added offset so @@ -780,14 +768,12 @@ func (s *SegmentedIndex) Prev() SegmentedIndexResult { } s.scaled-- - return SegmentedIndexResult{Scaled: s.scaled, Unscaled: s.unscaled} + return s.scaled, s.unscaled } // GoTo sets the scaled index to its biggest value for which the corresponding // unscaled index is smaller or equal to value. -func (s *SegmentedIndex) GoTo(value int64) SegmentedIndexResult { // TODO optimize - s.mx.Lock() - defer s.mx.Unlock() +func (s *SegmentedIndex) GoTo(value int64) (int64, int64) { // TODO optimize var gi int64 // Because of the cyclical nature of the striping algorithm (with a cycle // length of LCD, the least common denominator), when scaling large values @@ -820,5 +806,5 @@ func (s *SegmentedIndex) GoTo(value int64) SegmentedIndexResult { // TODO optimi s.unscaled = 0 // we would've added the start and 1 } - return SegmentedIndexResult{Scaled: s.scaled, Unscaled: s.unscaled} + return s.scaled, s.unscaled } diff --git a/lib/executor/base_executor.go b/lib/executor/base_executor.go index 5807e437152..0c9de1c0208 100644 --- a/lib/executor/base_executor.go +++ b/lib/executor/base_executor.go @@ -23,6 +23,7 @@ package executor import ( "context" "strconv" + "sync" "sync/atomic" "github.com/sirupsen/logrus" @@ -40,23 +41,23 @@ type BaseExecutor struct { config lib.ExecutorConfig executionState *lib.ExecutionState VUIDLocal *uint64 // counter for assigning executor-specific VU IDs - // Counter for keeping track of all VU iterations completed by this executor - // in the current (local) k6 instance. - iterLocal *uint64 - logger *logrus.Entry - progress *pb.ProgressBar + iterSegIndexMx *sync.Mutex + iterSegIndex *lib.SegmentedIndex + logger *logrus.Entry + progress *pb.ProgressBar } // NewBaseExecutor returns an initialized BaseExecutor func NewBaseExecutor(config lib.ExecutorConfig, es *lib.ExecutionState, logger *logrus.Entry) *BaseExecutor { - // Start at max uint64 so that the first iteration can be 0 - startIterLocal := ^uint64(0) + start, offsets, lcd := es.ExecutionTuple.GetStripedOffsets() + segIdx := lib.NewSegmentedIndex(start, lcd, offsets) return &BaseExecutor{ config: config, executionState: es, VUIDLocal: new(uint64), - iterLocal: &startIterLocal, logger: logger, + iterSegIndexMx: new(sync.Mutex), + iterSegIndex: segIdx, progress: pb.New( pb.WithLeft(config.GetName), pb.WithLogger(logger), @@ -64,6 +65,14 @@ func NewBaseExecutor(config lib.ExecutorConfig, es *lib.ExecutionState, logger * } } +// nextIterationCounters next scaled(local) and unscaled(global) iteration counters +func (bs *BaseExecutor) nextIterationCounters() (uint64, uint64) { + bs.iterSegIndexMx.Lock() + defer bs.iterSegIndexMx.Unlock() + scaled, unscaled := bs.iterSegIndex.Next() + return uint64(scaled - 1), uint64(unscaled - 1) +} + // Init doesn't do anything for most executors, since initialization of all // planned VUs is handled by the executor. func (bs *BaseExecutor) Init(_ context.Context) error { @@ -81,12 +90,6 @@ func (bs BaseExecutor) getNextLocalVUID() uint64 { return atomic.AddUint64(bs.VUIDLocal, 1) } -// getNextLocalIter increments and returns the next local iteration number, for -// keeping track of total iterations executed by this scenario/executor. -func (bs *BaseExecutor) getNextLocalIter() uint64 { - return atomic.AddUint64(bs.iterLocal, 1) -} - // GetLogger returns the executor logger entry. func (bs BaseExecutor) GetLogger() *logrus.Entry { return bs.logger diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go index 9d811624e3e..c51c34198fd 100644 --- a/lib/executor/constant_arrival_rate.go +++ b/lib/executor/constant_arrival_rate.go @@ -185,7 +185,6 @@ type ConstantArrivalRate struct { *BaseExecutor config ConstantArrivalRateConfig et *lib.ExecutionTuple - segIdx *lib.SegmentedIndex } // Make sure we implement the lib.Executor interface. @@ -198,21 +197,11 @@ func (car *ConstantArrivalRate) Init(ctx context.Context) error { et, err := car.BaseExecutor.executionState.ExecutionTuple.GetNewExecutionTupleFromValue(car.config.MaxVUs.Int64) car.et = et start, offsets, lcd := et.GetStripedOffsets() - car.segIdx = lib.NewSegmentedIndex(start, lcd, offsets) + car.iterSegIndex = lib.NewSegmentedIndex(start, lcd, offsets) return err } -// getNextGlobalIter advances and returns the next global iteration number for -// this executor, taking into account the configured execution segment. -// Unlike the local iteration number returned by getNextLocalIter(), this -// iteration number will be unique across k6 instances. -func (car *ConstantArrivalRate) getNextGlobalIter() uint64 { - res := car.segIdx.Next() - // iterations are 0-based - return uint64(res.Unscaled - 1) -} - // Run executes a constant number of iterations per second. // // TODO: Split this up and make an independent component that can be reused @@ -293,15 +282,12 @@ func (car ConstantArrivalRate) Run(parentCtx context.Context, out chan<- stats.S activeVUsWg.Done() } - // Channel for synchronizing scenario-specific iteration increments - iterSync := make(chan struct{}, 1) runIterationBasic := getIterationRunner(car.executionState, car.logger) activateVU := func(initVU lib.InitializedVU) lib.ActiveVU { activeVUsWg.Add(1) activeVU := initVU.Activate(getVUActivationParams( maxDurationCtx, car.config.BaseConfig, returnVU, - car.getNextLocalVUID, car.getNextLocalIter, car.getNextGlobalIter, - iterSync, + car.getNextLocalVUID, car.nextIterationCounters, )) car.executionState.ModCurrentlyActiveVUsCount(+1) atomic.AddUint64(&activeVUsCount, 1) diff --git a/lib/executor/constant_vus.go b/lib/executor/constant_vus.go index 8fc20ae9b78..86b7bfda25f 100644 --- a/lib/executor/constant_vus.go +++ b/lib/executor/constant_vus.go @@ -194,7 +194,7 @@ func (clv ConstantVUs) Run(parentCtx context.Context, out chan<- stats.SampleCon activeVU := initVU.Activate( getVUActivationParams(ctx, clv.config.BaseConfig, returnVU, - clv.getNextLocalVUID, clv.getNextLocalIter, nil, iterSync)) + clv.getNextLocalVUID, clv.nextIterationCounters)) for { select { diff --git a/lib/executor/externally_controlled.go b/lib/executor/externally_controlled.go index 006d2caac2f..5acf23c24c1 100644 --- a/lib/executor/externally_controlled.go +++ b/lib/executor/externally_controlled.go @@ -362,8 +362,8 @@ func (rs *externallyControlledRunState) newManualVUHandle( ctx, cancel := context.WithCancel(rs.ctx) return &manualVUHandle{ vuHandle: newStoppedVUHandle(ctx, getVU, returnVU, - rs.executor.getNextLocalVUID, rs.executor.getNextLocalIter, - rs.iterSync, &rs.executor.config.BaseConfig, logger), + rs.executor.getNextLocalVUID, rs.executor.nextIterationCounters, + &rs.executor.config.BaseConfig, logger), initVU: initVU, wg: &wg, cancelVU: cancel, @@ -384,8 +384,6 @@ type externallyControlledRunState struct { currentlyPaused bool // whether the executor is currently paused runIteration func(context.Context, lib.ActiveVU) bool // a helper closure function that runs a single iteration - // channel for synchronizing scenario-specific iteration increments - iterSync chan struct{} } // retrieveStartMaxVUs gets and initializes the (scaled) number of MaxVUs @@ -524,8 +522,6 @@ func (mex *ExternallyControlled) Run(parentCtx context.Context, out chan<- stats ).Debug("Starting executor run...") startMaxVUs := mex.executionState.Options.ExecutionSegment.Scale(mex.config.MaxVUs.Int64) - // Channel for synchronizing scenario-specific iteration increments - iterSync := make(chan struct{}, 1) runState := &externallyControlledRunState{ ctx: ctx, executor: mex, @@ -536,7 +532,6 @@ func (mex *ExternallyControlled) Run(parentCtx context.Context, out chan<- stats activeVUsCount: new(int64), maxVUs: new(int64), runIteration: getIterationRunner(mex.executionState, mex.logger), - iterSync: iterSync, } *runState.maxVUs = startMaxVUs if err = runState.retrieveStartMaxVUs(); err != nil { diff --git a/lib/executor/helpers.go b/lib/executor/helpers.go index 73c301d9afa..e2a972df988 100644 --- a/lib/executor/helpers.go +++ b/lib/executor/helpers.go @@ -225,19 +225,16 @@ func getArrivalRatePerSec(scaledArrivalRate *big.Rat) *big.Rat { // TODO: Refactor this, maybe move all scenario things to an embedded struct? func getVUActivationParams( ctx context.Context, conf BaseConfig, deactivateCallback func(lib.InitializedVU), - getNextScVUID func() uint64, getNextScLocalIter, getNextScGlobalIter func() uint64, - iterSync chan struct{}, + getNextScVUID func() uint64, nextIterationCounters func() (uint64, uint64), ) *lib.VUActivationParams { return &lib.VUActivationParams{ - RunContext: ctx, - Scenario: conf.Name, - Exec: conf.GetExec(), - Env: conf.GetEnv(), - Tags: conf.GetTags(), - DeactivateCallback: deactivateCallback, - GetNextScVUID: getNextScVUID, - IterSync: iterSync, - GetNextScLocalIter: getNextScLocalIter, - GetNextScGlobalIter: getNextScGlobalIter, + RunContext: ctx, + Scenario: conf.Name, + Exec: conf.GetExec(), + Env: conf.GetEnv(), + Tags: conf.GetTags(), + DeactivateCallback: deactivateCallback, + GetNextScVUID: getNextScVUID, + GetNextIterationCounters: nextIterationCounters, } } diff --git a/lib/executor/per_vu_iterations.go b/lib/executor/per_vu_iterations.go index 3ffc5c04e4e..204e41f49fa 100644 --- a/lib/executor/per_vu_iterations.go +++ b/lib/executor/per_vu_iterations.go @@ -220,7 +220,7 @@ func (pvi PerVUIterations) Run(parentCtx context.Context, out chan<- stats.Sampl vuID := initVU.GetID() activeVU := initVU.Activate( getVUActivationParams(ctx, pvi.config.BaseConfig, returnVU, - pvi.getNextLocalVUID, pvi.getNextLocalIter, nil, iterSync)) + pvi.getNextLocalVUID, pvi.nextIterationCounters)) for i := int64(0); i < iterations; i++ { select { diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index fe017f979fa..7b023a3b1e8 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -176,7 +176,6 @@ type RampingArrivalRate struct { *BaseExecutor config RampingArrivalRateConfig et *lib.ExecutionTuple - segIdx *lib.SegmentedIndex } // Make sure we implement the lib.Executor interface. @@ -189,21 +188,11 @@ func (varr *RampingArrivalRate) Init(ctx context.Context) error { et, err := varr.BaseExecutor.executionState.ExecutionTuple.GetNewExecutionTupleFromValue(varr.config.MaxVUs.Int64) varr.et = et start, offsets, lcd := et.GetStripedOffsets() - varr.segIdx = lib.NewSegmentedIndex(start, lcd, offsets) + varr.iterSegIndex = lib.NewSegmentedIndex(start, lcd, offsets) return err //nolint: wrapcheck } -// getNextGlobalIter advances and returns the next global iteration number for -// this executor, taking into account the configured execution segment. -// Unlike the local iteration number returned by getNextLocalIter(), this -// iteration number will be unique across k6 instances. -func (varr *RampingArrivalRate) getNextGlobalIter() uint64 { - res := varr.segIdx.Next() - // iterations are 0-based - return uint64(res.Unscaled - 1) -} - // cal calculates the transtitions between stages and gives the next full value produced by the // stages. In this explanation we are talking about events and in practice those events are starting // of an iteration, but could really be anything that needs to occur at a constant or linear rate. @@ -417,15 +406,12 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.S runIterationBasic := getIterationRunner(varr.executionState, varr.logger) - // Channel for synchronizing scenario-specific iteration increments - iterSync := make(chan struct{}, 1) activateVU := func(initVU lib.InitializedVU) lib.ActiveVU { activeVUsWg.Add(1) activeVU := initVU.Activate( getVUActivationParams( maxDurationCtx, varr.config.BaseConfig, returnVU, - varr.getNextLocalVUID, varr.getNextLocalIter, - varr.getNextGlobalIter, iterSync)) + varr.getNextLocalVUID, varr.nextIterationCounters)) varr.executionState.ModCurrentlyActiveVUsCount(+1) atomic.AddUint64(&activeVUsCount, 1) diff --git a/lib/executor/ramping_vus.go b/lib/executor/ramping_vus.go index 2d46dc967e9..29f34a6d15f 100644 --- a/lib/executor/ramping_vus.go +++ b/lib/executor/ramping_vus.go @@ -195,8 +195,8 @@ func (vlvc RampingVUsConfig) getRawExecutionSteps(et *lib.ExecutionTuple, zeroEn ) // Reserve the scaled StartVUs at the beginning - res := index.GoTo(fromVUs) - steps = append(steps, lib.ExecutionStep{TimeOffset: 0, PlannedVUs: uint64(res.Scaled)}) + scaled, unscaled := index.GoTo(fromVUs) + steps = append(steps, lib.ExecutionStep{TimeOffset: 0, PlannedVUs: uint64(scaled)}) addStep := func(timeOffset time.Duration, plannedVUs uint64) { if steps[len(steps)-1].PlannedVUs != plannedVUs { steps = append(steps, lib.ExecutionStep{TimeOffset: timeOffset, PlannedVUs: plannedVUs}) @@ -213,31 +213,31 @@ func (vlvc RampingVUsConfig) getRawExecutionSteps(et *lib.ExecutionTuple, zeroEn continue } if stageDuration == 0 { - res = index.GoTo(stageEndVUs) - addStep(timeTillEnd, uint64(res.Scaled)) + scaled, unscaled = index.GoTo(stageEndVUs) + addStep(timeTillEnd, uint64(scaled)) fromVUs = stageEndVUs continue } // VU reservation for gracefully ramping down is handled as a // separate method: reserveVUsForGracefulRampDowns() - if res.Unscaled > stageEndVUs { // ramp down + if unscaled > stageEndVUs { // ramp down // here we don't want to emit for the equal to stageEndVUs as it doesn't go below it // it will just go to it - for ; res.Unscaled > stageEndVUs; res = index.Prev() { + for ; unscaled > stageEndVUs; scaled, unscaled = index.Prev() { addStep( // this is the time that we should go up 1 if we are ramping up // but we are ramping down so we should go 1 down, but because we want to not // stop VUs immediately we stop it on the next unscaled VU's time - timeTillEnd-time.Duration(int64(stageDuration)*(stageEndVUs-res.Unscaled+1)/stageVUDiff), - uint64(res.Scaled-1), + timeTillEnd-time.Duration(int64(stageDuration)*(stageEndVUs-unscaled+1)/stageVUDiff), + uint64(scaled-1), ) } } else { - for ; res.Unscaled <= stageEndVUs; res = index.Next() { + for ; unscaled <= stageEndVUs; scaled, unscaled = index.Next() { addStep( - timeTillEnd-time.Duration(int64(stageDuration)*(stageEndVUs-res.Unscaled)/stageVUDiff), - uint64(res.Scaled), + timeTillEnd-time.Duration(int64(stageDuration)*(stageEndVUs-unscaled)/stageVUDiff), + uint64(scaled), ) } } @@ -572,13 +572,11 @@ func (vlv RampingVUs) Run(parentCtx context.Context, out chan<- stats.SampleCont ProgressFn: progressFn, }) - // Channel for synchronizing scenario-specific iteration increments - iterSync := make(chan struct{}, 1) vuHandles := make([]*vuHandle, maxVUs) for i := uint64(0); i < maxVUs; i++ { vuHandle := newStoppedVUHandle( maxDurationCtx, getVU, returnVU, vlv.getNextLocalVUID, - vlv.getNextLocalIter, iterSync, &vlv.config.BaseConfig, + vlv.nextIterationCounters, &vlv.config.BaseConfig, vlv.logger.WithField("vuNum", i)) go vuHandle.runLoopsIfPossible(runIteration) vuHandles[i] = vuHandle diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go index a20ea88f0f8..fc9107cdda3 100644 --- a/lib/executor/shared_iterations.go +++ b/lib/executor/shared_iterations.go @@ -160,7 +160,6 @@ type SharedIterations struct { *BaseExecutor config SharedIterationsConfig et *lib.ExecutionTuple - segIdx *lib.SegmentedIndex } // Make sure we implement the lib.Executor interface. @@ -178,21 +177,11 @@ func (si *SharedIterations) Init(ctx context.Context) error { et, err := si.BaseExecutor.executionState.ExecutionTuple.GetNewExecutionTupleFromValue(si.config.VUs.Int64) si.et = et start, offsets, lcd := et.GetStripedOffsets() - si.segIdx = lib.NewSegmentedIndex(start, lcd, offsets) + si.iterSegIndex = lib.NewSegmentedIndex(start, lcd, offsets) return err } -// getNextGlobalIter advances and returns the next global iteration number for -// this executor, taking into account the configured execution segment. -// Unlike the local iteration number returned by getNextLocalIter(), this -// iteration number will be unique across k6 instances. -func (si *SharedIterations) getNextGlobalIter() uint64 { - res := si.segIdx.Next() - // iterations are 0-based - return uint64(res.Unscaled - 1) -} - // Run executes a specific total number of iterations, which are all shared by // the configured VUs. // nolint:funlen @@ -258,15 +247,13 @@ func (si SharedIterations) Run(parentCtx context.Context, out chan<- stats.Sampl activeVUs.Done() } - // Channel for synchronizing scenario-specific iteration increments - iterSync := make(chan struct{}, 1) handleVU := func(initVU lib.InitializedVU) { ctx, cancel := context.WithCancel(maxDurationCtx) defer cancel() activeVU := initVU.Activate(getVUActivationParams( ctx, si.config.BaseConfig, returnVU, si.getNextLocalVUID, - si.getNextLocalIter, si.getNextGlobalIter, iterSync, + si.nextIterationCounters, )) for { diff --git a/lib/executor/shared_iterations_test.go b/lib/executor/shared_iterations_test.go index bc2e3292abc..692568c0b3c 100644 --- a/lib/executor/shared_iterations_test.go +++ b/lib/executor/shared_iterations_test.go @@ -23,6 +23,7 @@ package executor import ( "context" "fmt" + "sort" "sync" "sync/atomic" "testing" @@ -54,7 +55,7 @@ func TestSharedIterationsRun(t *testing.T) { et, err := lib.NewExecutionTuple(nil, nil) require.NoError(t, err) es := lib.NewExecutionState(lib.Options{}, et, 10, 50) - var ctx, cancel, executor, _ = setupExecutor( + ctx, cancel, executor, _ := setupExecutor( t, getTestSharedIterationsConfig(), es, simpleRunner(func(ctx context.Context) error { atomic.AddUint64(&doneIters, 1) @@ -191,6 +192,7 @@ func TestSharedIterationsGlobalIters(t *testing.T) { engineOut := make(chan stats.SampleContainer, 100) err = executor.Run(ctx, engineOut) require.NoError(t, err) + sort.Slice(gotIters, func(i, j int) bool { return gotIters[i] < gotIters[j] }) assert.Equal(t, tc.expIters, gotIters) }) } diff --git a/lib/executor/vu_handle.go b/lib/executor/vu_handle.go index 64812dc8313..815741fa8e0 100644 --- a/lib/executor/vu_handle.go +++ b/lib/executor/vu_handle.go @@ -88,14 +88,13 @@ short names for input: // - it's not required but preferable, if where possible to not reactivate VUs and to reuse context // as this speed ups the execution type vuHandle struct { - mutex *sync.Mutex - parentCtx context.Context - getVU func() (lib.InitializedVU, error) - returnVU func(lib.InitializedVU) - getScenarioVUID func() uint64 - getScenarioLocalIter func() uint64 - iterSync chan struct{} - config *BaseConfig + mutex *sync.Mutex + parentCtx context.Context + getVU func() (lib.InitializedVU, error) + returnVU func(lib.InitializedVU) + getScenarioVUID func() uint64 + nextIterationCounters func() (uint64, uint64) + config *BaseConfig initVU lib.InitializedVU activeVU lib.ActiveVU @@ -112,22 +111,21 @@ type vuHandle struct { func newStoppedVUHandle( parentCtx context.Context, getVU func() (lib.InitializedVU, error), returnVU func(lib.InitializedVU), getScenarioVUID func() uint64, - getScenarioLocalIter func() uint64, iterSync chan struct{}, + nextIterationCounters func() (uint64, uint64), config *BaseConfig, logger *logrus.Entry, ) *vuHandle { ctx, cancel := context.WithCancel(parentCtx) return &vuHandle{ - mutex: &sync.Mutex{}, - parentCtx: parentCtx, - getVU: getVU, - getScenarioVUID: getScenarioVUID, - getScenarioLocalIter: getScenarioLocalIter, - config: config, + mutex: &sync.Mutex{}, + parentCtx: parentCtx, + getVU: getVU, + getScenarioVUID: getScenarioVUID, + nextIterationCounters: nextIterationCounters, + config: config, canStartIter: make(chan struct{}), state: stopped, - iterSync: iterSync, ctx: ctx, cancel: cancel, @@ -156,7 +154,7 @@ func (vh *vuHandle) start() (err error) { vh.activeVU = vh.initVU.Activate(getVUActivationParams( vh.ctx, *vh.config, vh.returnVU, vh.getScenarioVUID, - vh.getScenarioLocalIter, nil, vh.iterSync)) + vh.nextIterationCounters)) close(vh.canStartIter) vh.changeState(starting) } diff --git a/lib/executor/vu_handle_test.go b/lib/executor/vu_handle_test.go index e53d7fa34c6..c16e2ee27a5 100644 --- a/lib/executor/vu_handle_test.go +++ b/lib/executor/vu_handle_test.go @@ -17,6 +17,10 @@ import ( "go.k6.io/k6/stats" ) +func mockNextIterations() (uint64, uint64) { + return 12, 15 +} + // this test is mostly interesting when -race is enabled func TestVUHandleRace(t *testing.T) { t.Parallel() @@ -61,8 +65,7 @@ func TestVUHandleRace(t *testing.T) { } } - iterSync := make(chan struct{}, 1) - vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, nil, iterSync, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, mockNextIterations, &BaseConfig{}, logEntry) go vuHandle.runLoopsIfPossible(runIter) var wg sync.WaitGroup wg.Add(3) @@ -154,8 +157,7 @@ func TestVUHandleStartStopRace(t *testing.T) { } } - iterSync := make(chan struct{}, 1) - vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, nil, iterSync, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, mockNextIterations, &BaseConfig{}, logEntry) go vuHandle.runLoopsIfPossible(runIter) for i := 0; i < testIterations; i++ { err := vuHandle.start() @@ -232,8 +234,8 @@ func TestVUHandleSimple(t *testing.T) { test := &handleVUTest{runner: &minirunner.MiniRunner{}} ctx, cancel := context.WithCancel(context.Background()) defer cancel() - iterSync := make(chan struct{}, 1) - vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, nil, iterSync, &BaseConfig{}, logEntry) + + vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, mockNextIterations, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { @@ -273,8 +275,7 @@ func TestVUHandleSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - iterSync := make(chan struct{}, 1) - vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, nil, iterSync, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, mockNextIterations, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { @@ -315,8 +316,7 @@ func TestVUHandleSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - iterSync := make(chan struct{}, 1) - vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, nil, iterSync, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, mockNextIterations, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { @@ -394,8 +394,8 @@ func BenchmarkVUHandleIterations(b *testing.B) { reset() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - iterSync := make(chan struct{}, 1) - vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, nil, iterSync, &BaseConfig{}, logEntry) + + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, mockNextIterations, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { diff --git a/lib/runner.go b/lib/runner.go index 488d2b81d84..d77b8909fea 100644 --- a/lib/runner.go +++ b/lib/runner.go @@ -51,14 +51,12 @@ type InitializedVU interface { // VUActivationParams are supplied by each executor when it retrieves a VU from // the buffer pool and activates it for use. type VUActivationParams struct { - RunContext context.Context - DeactivateCallback func(InitializedVU) - Env, Tags map[string]string - Exec, Scenario string - GetNextScVUID func() uint64 - IterSync chan struct{} - GetNextScLocalIter func() uint64 - GetNextScGlobalIter func() uint64 + RunContext context.Context + DeactivateCallback func(InitializedVU) + Env, Tags map[string]string + Exec, Scenario string + GetNextScVUID func() uint64 + GetNextIterationCounters func() (uint64, uint64) } // A Runner is a factory for VUs. It should precompute as much as possible upon diff --git a/lib/testutils/minirunner/minirunner.go b/lib/testutils/minirunner/minirunner.go index f2863b15d46..015d102523e 100644 --- a/lib/testutils/minirunner/minirunner.go +++ b/lib/testutils/minirunner/minirunner.go @@ -150,9 +150,7 @@ type ActiveVU struct { busy chan struct{} scenarioName string - iterSync chan struct{} - getNextScLocalIter func() uint64 - getNextScGlobalIter func() uint64 + getNextIterations func() (uint64, uint64) scIterLocal, scIterGlobal uint64 } @@ -177,15 +175,13 @@ func (vu *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { } avu := &ActiveVU{ - VU: vu, - VUActivationParams: params, - busy: make(chan struct{}, 1), - scenarioName: params.Scenario, - iterSync: params.IterSync, - scIterLocal: ^uint64(0), - scIterGlobal: ^uint64(0), - getNextScLocalIter: params.GetNextScLocalIter, - getNextScGlobalIter: params.GetNextScGlobalIter, + VU: vu, + VUActivationParams: params, + busy: make(chan struct{}, 1), + scenarioName: params.Scenario, + scIterLocal: ^uint64(0), + scIterGlobal: ^uint64(0), + getNextIterations: params.GetNextIterationCounters, } vu.state.GetScenarioLocalVUIter = func() uint64 { @@ -214,25 +210,12 @@ func (vu *ActiveVU) incrIteration() { vu.Iteration++ vu.state.Iteration = vu.Iteration - if vu.iterSync != nil { - // block other VUs from incrementing scenario iterations - vu.iterSync <- struct{}{} - defer func() { - <-vu.iterSync // unlock - }() - } - if _, ok := vu.scenarioIter[vu.scenarioName]; ok { vu.scenarioIter[vu.scenarioName]++ } else { vu.scenarioIter[vu.scenarioName] = 0 } - if vu.getNextScLocalIter != nil { - vu.scIterLocal = vu.getNextScLocalIter() - } - if vu.getNextScGlobalIter != nil { - vu.scIterGlobal = vu.getNextScGlobalIter() - } + vu.scIterLocal, vu.scIterGlobal = vu.getNextIterations() } // RunOnce runs the mock default function once, incrementing its iteration. From 19ae8de89b653b06bc9783ade25dd069f91d090c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Fri, 11 Jun 2021 14:33:47 +0200 Subject: [PATCH 19/22] Accept ExecutionTuple in SegmentedIndex constructor Resolves https://github.com/k6io/k6/pull/1863#discussion_r649899233 --- lib/execution_segment.go | 5 +++-- lib/executor/base_executor.go | 3 +-- lib/executor/constant_arrival_rate.go | 3 +-- lib/executor/ramping_arrival_rate.go | 3 +-- lib/executor/ramping_vus.go | 9 ++++----- lib/executor/shared_iterations.go | 3 +-- 6 files changed, 11 insertions(+), 15 deletions(-) diff --git a/lib/execution_segment.go b/lib/execution_segment.go index b42f3779d17..0cf28adf153 100644 --- a/lib/execution_segment.go +++ b/lib/execution_segment.go @@ -741,8 +741,9 @@ type SegmentedIndex struct { } // NewSegmentedIndex returns a pointer to a new SegmentedIndex instance, -// given a starting index, LCD and offsets as returned by GetStripedOffsets(). -func NewSegmentedIndex(start, lcd int64, offsets []int64) *SegmentedIndex { +// given an ExecutionTuple. +func NewSegmentedIndex(et *ExecutionTuple) *SegmentedIndex { + start, offsets, lcd := et.GetStripedOffsets() return &SegmentedIndex{start: start, lcd: lcd, offsets: offsets} } diff --git a/lib/executor/base_executor.go b/lib/executor/base_executor.go index 0c9de1c0208..44943a94dbf 100644 --- a/lib/executor/base_executor.go +++ b/lib/executor/base_executor.go @@ -49,8 +49,7 @@ type BaseExecutor struct { // NewBaseExecutor returns an initialized BaseExecutor func NewBaseExecutor(config lib.ExecutorConfig, es *lib.ExecutionState, logger *logrus.Entry) *BaseExecutor { - start, offsets, lcd := es.ExecutionTuple.GetStripedOffsets() - segIdx := lib.NewSegmentedIndex(start, lcd, offsets) + segIdx := lib.NewSegmentedIndex(es.ExecutionTuple) return &BaseExecutor{ config: config, executionState: es, diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go index c51c34198fd..5303924acf1 100644 --- a/lib/executor/constant_arrival_rate.go +++ b/lib/executor/constant_arrival_rate.go @@ -196,8 +196,7 @@ func (car *ConstantArrivalRate) Init(ctx context.Context) error { // with no work, as determined by their config's HasWork() method. et, err := car.BaseExecutor.executionState.ExecutionTuple.GetNewExecutionTupleFromValue(car.config.MaxVUs.Int64) car.et = et - start, offsets, lcd := et.GetStripedOffsets() - car.iterSegIndex = lib.NewSegmentedIndex(start, lcd, offsets) + car.iterSegIndex = lib.NewSegmentedIndex(et) return err } diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index 7b023a3b1e8..935385c0394 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -187,8 +187,7 @@ func (varr *RampingArrivalRate) Init(ctx context.Context) error { // with no work, as determined by their config's HasWork() method. et, err := varr.BaseExecutor.executionState.ExecutionTuple.GetNewExecutionTupleFromValue(varr.config.MaxVUs.Int64) varr.et = et - start, offsets, lcd := et.GetStripedOffsets() - varr.iterSegIndex = lib.NewSegmentedIndex(start, lcd, offsets) + varr.iterSegIndex = lib.NewSegmentedIndex(et) return err //nolint: wrapcheck } diff --git a/lib/executor/ramping_vus.go b/lib/executor/ramping_vus.go index 29f34a6d15f..8aee2187a85 100644 --- a/lib/executor/ramping_vus.go +++ b/lib/executor/ramping_vus.go @@ -187,11 +187,10 @@ func (vlvc RampingVUsConfig) Validate() []error { // More information: https://github.com/k6io/k6/issues/997#issuecomment-484416866 func (vlvc RampingVUsConfig) getRawExecutionSteps(et *lib.ExecutionTuple, zeroEnd bool) []lib.ExecutionStep { var ( - timeTillEnd time.Duration - fromVUs = vlvc.StartVUs.Int64 - start, offsets, lcd = et.GetStripedOffsets() - steps = make([]lib.ExecutionStep, 0, vlvc.precalculateTheRequiredSteps(et, zeroEnd)) - index = lib.NewSegmentedIndex(start, lcd, offsets) + timeTillEnd time.Duration + fromVUs = vlvc.StartVUs.Int64 + steps = make([]lib.ExecutionStep, 0, vlvc.precalculateTheRequiredSteps(et, zeroEnd)) + index = lib.NewSegmentedIndex(et) ) // Reserve the scaled StartVUs at the beginning diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go index fc9107cdda3..00c835d64a8 100644 --- a/lib/executor/shared_iterations.go +++ b/lib/executor/shared_iterations.go @@ -176,8 +176,7 @@ func (si *SharedIterations) Init(ctx context.Context) error { // with no work, as determined by their config's HasWork() method. et, err := si.BaseExecutor.executionState.ExecutionTuple.GetNewExecutionTupleFromValue(si.config.VUs.Int64) si.et = et - start, offsets, lcd := et.GetStripedOffsets() - si.iterSegIndex = lib.NewSegmentedIndex(start, lcd, offsets) + si.iterSegIndex = lib.NewSegmentedIndex(et) return err } From ea2ebb190503d24df80b160ad522e2bfbd0a9afc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Tue, 15 Jun 2021 15:47:33 +0200 Subject: [PATCH 20/22] Add global VU IDs Resolves https://github.com/k6io/k6/pull/1863#discussion_r650981134 --- core/local/local.go | 10 ++-- js/console_test.go | 4 +- js/empty_iteartions_bench_test.go | 2 +- js/http_bench_test.go | 4 +- js/init_and_modules_test.go | 7 ++- js/module_loading_test.go | 20 +++---- js/modules/k6/marshalling_test.go | 2 +- js/runner.go | 47 ++++++++------- js/runner_test.go | 66 ++++++++++---------- js/share_test.go | 2 +- lib/execution.go | 29 +++++---- lib/executor/common_test.go | 3 +- lib/executor/constant_vus_test.go | 4 +- lib/executor/execution_test.go | 73 +++++++++++++++++------ lib/executor/per_vu_iterations_test.go | 10 ++-- lib/executor/ramping_arrival_rate_test.go | 6 +- lib/executor/ramping_vus_test.go | 2 +- lib/executor/shared_iterations_test.go | 8 +-- lib/executor/vu_handle_test.go | 8 +-- lib/netext/httpext/request.go | 2 +- lib/runner.go | 2 +- lib/state.go | 6 +- lib/testutils/minirunner/minirunner.go | 22 +++---- 23 files changed, 196 insertions(+), 143 deletions(-) diff --git a/core/local/local.go b/core/local/local.go index 85cb1e7a497..8a5aec3715e 100644 --- a/core/local/local.go +++ b/core/local/local.go @@ -157,15 +157,15 @@ func (e *ExecutionScheduler) GetExecutionPlan() []lib.ExecutionStep { func (e *ExecutionScheduler) initVU( samplesOut chan<- stats.SampleContainer, logger *logrus.Entry, ) (lib.InitializedVU, error) { - // Get the VU ID here, so that the VUs are (mostly) ordered by their + // Get the VU IDs here, so that the VUs are (mostly) ordered by their // number in the channel buffer - vuID := e.state.GetUniqueVUIdentifier() - vu, err := e.runner.NewVU(vuID, samplesOut) + vuIDLocal, vuIDGlobal := e.state.GetUniqueVUIdentifiers() + vu, err := e.runner.NewVU(vuIDLocal, vuIDGlobal, samplesOut) if err != nil { - return nil, errext.WithHint(err, fmt.Sprintf("error while initializing VU #%d", vuID)) + return nil, errext.WithHint(err, fmt.Sprintf("error while initializing VU #%d", vuIDGlobal)) } - logger.Debugf("Initialized VU #%d", vuID) + logger.Debugf("Initialized VU #%d", vuIDGlobal) return vu, nil } diff --git a/js/console_test.go b/js/console_test.go index 49d94ef4b13..a243eeb729b 100644 --- a/js/console_test.go +++ b/js/console_test.go @@ -143,7 +143,7 @@ func TestConsole(t *testing.T) { assert.NoError(t, err) samples := make(chan stats.SampleContainer, 100) - initVU, err := r.newVU(1, samples) + initVU, err := r.newVU(1, 1, samples) assert.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -245,7 +245,7 @@ func TestFileConsole(t *testing.T) { assert.NoError(t, err) samples := make(chan stats.SampleContainer, 100) - initVU, err := r.newVU(1, samples) + initVU, err := r.newVU(1, 1, samples) assert.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) diff --git a/js/empty_iteartions_bench_test.go b/js/empty_iteartions_bench_test.go index 496f0826907..2c9ef01c2a8 100644 --- a/js/empty_iteartions_bench_test.go +++ b/js/empty_iteartions_bench_test.go @@ -26,7 +26,7 @@ func BenchmarkEmptyIteration(b *testing.B) { for range ch { } }() - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) if !assert.NoError(b, err) { return } diff --git a/js/http_bench_test.go b/js/http_bench_test.go index 89ca116b4f6..99415406f13 100644 --- a/js/http_bench_test.go +++ b/js/http_bench_test.go @@ -62,7 +62,7 @@ func BenchmarkHTTPRequests(b *testing.B) { for range ch { } }() - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) if !assert.NoError(b, err) { return } @@ -105,7 +105,7 @@ func BenchmarkHTTPRequestsBase(b *testing.B) { for range ch { } }() - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) if !assert.NoError(b, err) { return } diff --git a/js/init_and_modules_test.go b/js/init_and_modules_test.go index 201f7f9a9a9..beef72ccb80 100644 --- a/js/init_and_modules_test.go +++ b/js/init_and_modules_test.go @@ -31,6 +31,8 @@ import ( "github.com/spf13/afero" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + "go.k6.io/k6/js" "go.k6.io/k6/js/common" "go.k6.io/k6/js/modules" @@ -38,7 +40,6 @@ import ( "go.k6.io/k6/lib/testutils" "go.k6.io/k6/loader" "go.k6.io/k6/stats" - "gopkg.in/guregu/null.v3" ) type CheckModule struct { @@ -94,7 +95,7 @@ func TestNewJSRunnerWithCustomModule(t *testing.T) { assert.Equal(t, checkModule.initCtxCalled, 1) assert.Equal(t, checkModule.vuCtxCalled, 0) - vu, err := runner.NewVU(1, make(chan stats.SampleContainer, 100)) + vu, err := runner.NewVU(1, 1, make(chan stats.SampleContainer, 100)) require.NoError(t, err) assert.Equal(t, checkModule.initCtxCalled, 2) assert.Equal(t, checkModule.vuCtxCalled, 0) @@ -118,7 +119,7 @@ func TestNewJSRunnerWithCustomModule(t *testing.T) { require.NoError(t, err) assert.Equal(t, checkModule.initCtxCalled, 3) // changes because we need to get the exported functions assert.Equal(t, checkModule.vuCtxCalled, 2) - vuFromArc, err := runnerFromArc.NewVU(2, make(chan stats.SampleContainer, 100)) + vuFromArc, err := runnerFromArc.NewVU(2, 2, make(chan stats.SampleContainer, 100)) require.NoError(t, err) assert.Equal(t, checkModule.initCtxCalled, 4) assert.Equal(t, checkModule.vuCtxCalled, 2) diff --git a/js/module_loading_test.go b/js/module_loading_test.go index a1f93a4e3e6..d65489e6da1 100644 --- a/js/module_loading_test.go +++ b/js/module_loading_test.go @@ -116,7 +116,7 @@ func TestLoadOnceGlobalVars(t *testing.T) { t.Parallel() ch := newDevNullSampleChannel() defer close(ch) - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -167,7 +167,7 @@ func TestLoadExportsIsUsableInModule(t *testing.T) { t.Parallel() ch := newDevNullSampleChannel() defer close(ch) - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) ctx, cancel := context.WithCancel(context.Background()) defer cancel() vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) @@ -215,7 +215,7 @@ func TestLoadDoesntBreakHTTPGet(t *testing.T) { t.Parallel() ch := newDevNullSampleChannel() defer close(ch) - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -261,7 +261,7 @@ func TestLoadGlobalVarsAreNotSharedBetweenVUs(t *testing.T) { t.Parallel() ch := newDevNullSampleChannel() defer close(ch) - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -270,7 +270,7 @@ func TestLoadGlobalVarsAreNotSharedBetweenVUs(t *testing.T) { require.NoError(t, err) // run a second VU - initVU, err = r.NewVU(2, ch) + initVU, err = r.NewVU(2, 2, ch) require.NoError(t, err) ctx, cancel = context.WithCancel(context.Background()) defer cancel() @@ -326,7 +326,7 @@ func TestLoadCycle(t *testing.T) { t.Parallel() ch := newDevNullSampleChannel() defer close(ch) - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -389,7 +389,7 @@ func TestLoadCycleBinding(t *testing.T) { t.Parallel() ch := newDevNullSampleChannel() defer close(ch) - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -455,7 +455,7 @@ func TestBrowserified(t *testing.T) { t.Parallel() ch := make(chan stats.SampleContainer, 100) defer close(ch) - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -499,7 +499,7 @@ func TestLoadingUnexistingModuleDoesntPanic(t *testing.T) { t.Parallel() ch := newDevNullSampleChannel() defer close(ch) - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -534,7 +534,7 @@ func TestLoadingSourceMapsDoesntErrorOut(t *testing.T) { t.Parallel() ch := newDevNullSampleChannel() defer close(ch) - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/js/modules/k6/marshalling_test.go b/js/modules/k6/marshalling_test.go index 6b7acf763f4..b5ebca9417c 100644 --- a/js/modules/k6/marshalling_test.go +++ b/js/modules/k6/marshalling_test.go @@ -136,7 +136,7 @@ func TestSetupDataMarshalling(t *testing.T) { if !assert.NoError(t, runner.Setup(context.Background(), samples)) { return } - initVU, err := runner.NewVU(1, samples) + initVU, err := runner.NewVU(1, 1, samples) if assert.NoError(t, err) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/js/runner.go b/js/runner.go index 4468ee90ea5..f6d89ffd397 100644 --- a/js/runner.go +++ b/js/runner.go @@ -126,8 +126,8 @@ func (r *Runner) MakeArchive() *lib.Archive { } // NewVU returns a new initialized VU. -func (r *Runner) NewVU(id uint64, samplesOut chan<- stats.SampleContainer) (lib.InitializedVU, error) { - vu, err := r.newVU(id, samplesOut) +func (r *Runner) NewVU(idLocal, idGlobal uint64, samplesOut chan<- stats.SampleContainer) (lib.InitializedVU, error) { + vu, err := r.newVU(idLocal, idGlobal, samplesOut) if err != nil { return nil, err } @@ -135,9 +135,9 @@ func (r *Runner) NewVU(id uint64, samplesOut chan<- stats.SampleContainer) (lib. } // nolint:funlen -func (r *Runner) newVU(id uint64, samplesOut chan<- stats.SampleContainer) (*VU, error) { +func (r *Runner) newVU(idLocal, idGlobal uint64, samplesOut chan<- stats.SampleContainer) (*VU, error) { // Instantiate a new bundle, make a VU out of it. - bi, err := r.Bundle.Instantiate(r.Logger, id) + bi, err := r.Bundle.Instantiate(r.Logger, idLocal) if err != nil { return nil, err } @@ -175,8 +175,8 @@ func (r *Runner) newVU(id uint64, samplesOut chan<- stats.SampleContainer) (*VU, } if r.Bundle.Options.LocalIPs.Valid { var ipIndex uint64 - if id > 0 { - ipIndex = uint64(id - 1) + if idLocal > 0 { + ipIndex = idLocal - 1 } dialer.Dialer.LocalAddr = &net.TCPAddr{IP: r.Bundle.Options.LocalIPs.Pool.GetIP(ipIndex)} } @@ -207,7 +207,8 @@ func (r *Runner) newVU(id uint64, samplesOut chan<- stats.SampleContainer) (*VU, } vu := &VU{ - ID: id, + ID: idLocal, + IDGlobal: idGlobal, iteration: int64(-1), BundleInstance: *bi, Runner: r, @@ -223,18 +224,19 @@ func (r *Runner) newVU(id uint64, samplesOut chan<- stats.SampleContainer) (*VU, } vu.state = &lib.State{ - Logger: vu.Runner.Logger, - Options: vu.Runner.Bundle.Options, - Transport: vu.Transport, - Dialer: vu.Dialer, - TLSConfig: vu.TLSConfig, - CookieJar: cookieJar, - RPSLimit: vu.Runner.RPSLimit, - BPool: vu.BPool, - Vu: vu.ID, - Samples: vu.Samples, - Tags: vu.Runner.Bundle.Options.RunTags.CloneTags(), - Group: r.defaultGroup, + Logger: vu.Runner.Logger, + Options: vu.Runner.Bundle.Options, + Transport: vu.Transport, + Dialer: vu.Dialer, + TLSConfig: vu.TLSConfig, + CookieJar: cookieJar, + RPSLimit: vu.Runner.RPSLimit, + BPool: vu.BPool, + VUID: vu.ID, + VUIDGlobal: vu.IDGlobal, + Samples: vu.Samples, + Tags: vu.Runner.Bundle.Options.RunTags.CloneTags(), + Group: r.defaultGroup, } vu.Runtime.Set("console", common.Bind(vu.Runtime, vu.Console, vu.Context)) @@ -325,7 +327,7 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary) (map[s } }() - vu, err := r.newVU(0, out) + vu, err := r.newVU(0, 0, out) if err != nil { return nil, err } @@ -463,7 +465,7 @@ func parseTTL(ttlS string) (time.Duration, error) { // Runs an exported function in its own temporary VU, optionally with an argument. Execution is // interrupted if the context expires. No error is returned if the part does not exist. func (r *Runner) runPart(ctx context.Context, out chan<- stats.SampleContainer, name string, arg interface{}) (goja.Value, error) { - vu, err := r.newVU(0, out) + vu, err := r.newVU(0, 0, out) if err != nil { return goja.Undefined(), err } @@ -533,7 +535,8 @@ type VU struct { Dialer *netext.Dialer CookieJar *cookiejar.Jar TLSConfig *tls.Config - ID uint64 + ID uint64 // local to the current instance + IDGlobal uint64 // global across all instances iteration int64 Console *console diff --git a/js/runner_test.go b/js/runner_test.go index 84d9fff35ab..473ee1810a0 100644 --- a/js/runner_test.go +++ b/js/runner_test.go @@ -77,7 +77,7 @@ func TestRunnerNew(t *testing.T) { t.Run("NewVU", func(t *testing.T) { t.Parallel() - initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, 1, make(chan stats.SampleContainer, 100)) assert.NoError(t, err) vuc, ok := initVU.(*VU) assert.True(t, ok) @@ -172,7 +172,7 @@ func TestOptionsSettingToScript(t *testing.T) { require.Equal(t, newOptions, r.GetOptions()) samples := make(chan stats.SampleContainer, 100) - initVU, err := r.NewVU(1, samples) + initVU, err := r.NewVU(1, 1, samples) if assert.NoError(t, err) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -223,7 +223,7 @@ func TestOptionsPropagationToScript(t *testing.T) { t.Parallel() samples := make(chan stats.SampleContainer, 100) - initVU, err := r.NewVU(1, samples) + initVU, err := r.NewVU(1, 1, samples) if assert.NoError(t, err) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -356,7 +356,7 @@ func testSetupDataHelper(t *testing.T, data string) { if !assert.NoError(t, r.Setup(context.Background(), samples)) { return } - initVU, err := r.NewVU(1, samples) + initVU, err := r.NewVU(1, 1, samples) if assert.NoError(t, err) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -421,7 +421,7 @@ func TestConsoleInInitContext(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() samples := make(chan stats.SampleContainer, 100) - initVU, err := r.NewVU(1, samples) + initVU, err := r.NewVU(1, 1, samples) if assert.NoError(t, err) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -504,7 +504,7 @@ func TestRunnerIntegrationImports(t *testing.T) { for name, r := range testdata { r := r t.Run(name, func(t *testing.T) { - initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, 1, make(chan stats.SampleContainer, 100)) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -535,7 +535,7 @@ func TestVURunContext(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - vu, err := r.newVU(1, make(chan stats.SampleContainer, 100)) + vu, err := r.newVU(1, 1, make(chan stats.SampleContainer, 100)) require.NoError(t, err) fnCalled := false @@ -586,7 +586,7 @@ func TestVURunInterrupt(t *testing.T) { } }() - vu, err := r.newVU(1, samples) + vu, err := r.newVU(1, 1, samples) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) @@ -624,7 +624,7 @@ func TestVURunInterruptDoesntPanic(t *testing.T) { }() var wg sync.WaitGroup - initVU, err := r.newVU(1, samples) + initVU, err := r.newVU(1, 1, samples) require.NoError(t, err) for i := 0; i < 1000; i++ { wg.Add(1) @@ -673,7 +673,7 @@ func TestVUIntegrationGroups(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - vu, err := r.newVU(1, make(chan stats.SampleContainer, 100)) + vu, err := r.newVU(1, 1, make(chan stats.SampleContainer, 100)) require.NoError(t, err) fnOuterCalled := false @@ -728,7 +728,7 @@ func TestVUIntegrationMetrics(t *testing.T) { t.Parallel() samples := make(chan stats.SampleContainer, 100) defer close(samples) - vu, err := r.newVU(1, samples) + vu, err := r.newVU(1, 1, samples) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -803,7 +803,7 @@ func TestVUIntegrationInsecureRequests(t *testing.T) { t.Parallel() r.Logger, _ = logtest.NewNullLogger() - initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, 1, make(chan stats.SampleContainer, 100)) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -846,7 +846,7 @@ func TestVUIntegrationBlacklistOption(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, 1, make(chan stats.SampleContainer, 100)) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -881,7 +881,7 @@ func TestVUIntegrationBlacklistScript(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, 1, make(chan stats.SampleContainer, 100)) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -917,7 +917,7 @@ func TestVUIntegrationBlockHostnamesOption(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - initVu, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + initVu, err := r.NewVU(1, 1, make(chan stats.SampleContainer, 100)) require.NoError(t, err) vu := initVu.Activate(&lib.VUActivationParams{RunContext: context.Background()}) err = vu.RunOnce() @@ -950,7 +950,7 @@ func TestVUIntegrationBlockHostnamesScript(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - initVu, err := r.NewVU(0, make(chan stats.SampleContainer, 100)) + initVu, err := r.NewVU(0, 0, make(chan stats.SampleContainer, 100)) require.NoError(t, err) vu := initVu.Activate(&lib.VUActivationParams{RunContext: context.Background()}) err = vu.RunOnce() @@ -994,7 +994,7 @@ func TestVUIntegrationHosts(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, 1, make(chan stats.SampleContainer, 100)) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1065,7 +1065,7 @@ func TestVUIntegrationTLSConfig(t *testing.T) { t.Parallel() r.Logger, _ = logtest.NewNullLogger() - initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, 1, make(chan stats.SampleContainer, 100)) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1090,7 +1090,7 @@ func TestVUIntegrationOpenFunctionError(t *testing.T) { `) assert.NoError(t, err) - initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, 1, make(chan stats.SampleContainer, 100)) assert.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1108,7 +1108,7 @@ func TestVUIntegrationOpenFunctionErrorWhenSneaky(t *testing.T) { `) assert.NoError(t, err) - initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, 1, make(chan stats.SampleContainer, 100)) assert.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1154,7 +1154,7 @@ func TestVUIntegrationCookiesReset(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, 1, make(chan stats.SampleContainer, 100)) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1208,7 +1208,7 @@ func TestVUIntegrationCookiesNoReset(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, 1, make(chan stats.SampleContainer, 100)) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1241,7 +1241,7 @@ func TestVUIntegrationVUID(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - initVU, err := r.NewVU(1234, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1234, 1234, make(chan stats.SampleContainer, 100)) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1335,7 +1335,7 @@ func TestVUIntegrationClientCerts(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() r.Logger, _ = logtest.NewNullLogger() - initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, 1, make(chan stats.SampleContainer, 100)) if assert.NoError(t, err) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1392,7 +1392,7 @@ func TestVUIntegrationClientCerts(t *testing.T) { for name, r := range runners { r := r t.Run(name, func(t *testing.T) { - initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, 1, make(chan stats.SampleContainer, 100)) if assert.NoError(t, err) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1543,7 +1543,7 @@ func TestArchiveRunningIntegrity(t *testing.T) { ch := make(chan stats.SampleContainer, 100) err = r.Setup(context.Background(), ch) require.NoError(t, err) - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1619,7 +1619,7 @@ func TestStuffNotPanicking(t *testing.T) { require.NoError(t, err) ch := make(chan stats.SampleContainer, 1000) - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1653,7 +1653,7 @@ func TestPanicOnSimpleHTML(t *testing.T) { require.NoError(t, err) ch := make(chan stats.SampleContainer, 1000) - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -1732,7 +1732,7 @@ func TestSystemTags(t *testing.T) { InsecureSkipTLSVerify: null.BoolFrom(true), }))) - vu, err := r.NewVU(uint64(num), samples) + vu, err := r.NewVU(uint64(num), 0, samples) require.NoError(t, err) activeVU := vu.Activate(&lib.VUActivationParams{ RunContext: context.Background(), @@ -1777,7 +1777,7 @@ func TestVUPanic(t *testing.T) { r := r t.Run(name, func(t *testing.T) { t.Parallel() - initVU, err := r.NewVU(1234, make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, 1234, make(chan stats.SampleContainer, 100)) require.NoError(t, err) logger := logrus.New() @@ -1848,7 +1848,7 @@ func runMultiFileTestCase(t *testing.T, tc multiFileTestCase, tb *httpmultibin.H options := runner.GetOptions() require.Empty(t, options.Validate()) - vu, err := runner.NewVU(1, tc.samples) + vu, err := runner.NewVU(1, 1, tc.samples) require.NoError(t, err) jsVU, ok := vu.(*VU) @@ -1870,7 +1870,7 @@ func runMultiFileTestCase(t *testing.T, tc multiFileTestCase, tb *httpmultibin.H arc := runner.MakeArchive() runnerFromArc, err := NewFromArchive(logger, arc, tc.rtOpts) require.NoError(t, err) - vuFromArc, err := runnerFromArc.NewVU(2, tc.samples) + vuFromArc, err := runnerFromArc.NewVU(2, 2, tc.samples) require.NoError(t, err) jsVUFromArc, ok := vuFromArc.(*VU) require.True(t, ok) @@ -2002,7 +2002,7 @@ func TestMinIterationDurationIsCancellable(t *testing.T) { require.NoError(t, err) ch := make(chan stats.SampleContainer, 1000) - initVU, err := r.NewVU(1, ch) + initVU, err := r.NewVU(1, 1, ch) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) diff --git a/js/share_test.go b/js/share_test.go index bd1593cf46d..4417f19ae38 100644 --- a/js/share_test.go +++ b/js/share_test.go @@ -95,7 +95,7 @@ exports.default = function() { t.Run(name, func(t *testing.T) { t.Parallel() samples := make(chan stats.SampleContainer, 100) - initVU, err := r.NewVU(1, samples) + initVU, err := r.NewVU(1, 1, samples) if assert.NoError(t, err) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/lib/execution.go b/lib/execution.go index 7eb94a00171..af23ce13de1 100644 --- a/lib/execution.go +++ b/lib/execution.go @@ -176,10 +176,11 @@ type ExecutionState struct { // MaxTimeToWaitForPlannedVU. vus chan InitializedVU - // The current VU ID, used for the __VU execution context variable. Use the - // GetUniqueVUIdentifier() to get unique values for each VU, starting from 1 - // (for backwards compatibility...) - currentVUIdentifier *uint64 + // The segmented index used to generate unique local (current k6 instance) + // and global (across k6 instances) VU IDs, starting from 1 + // (for backwards compatibility...). + vuIDSegIndexMx *sync.Mutex + vuIDSegIndex *SegmentedIndex // TODO: add something similar, but for iterations? Currently, there isn't // a straightforward way to get a unique sequential identifier per iteration @@ -187,8 +188,8 @@ type ExecutionState struct { // a unique identifier, but it's unwieldy and somewhat cumbersome. // Total number of currently initialized VUs. Generally equal to - // currentVUIdentifier minus 1, since initializedVUs starts from 0 and is - // incremented only after a VU is initialized, while CurrentVUIdentifier is + // the VU ID minus 1, since initializedVUs starts from 0 and is + // incremented only after a VU is initialized, while the VU ID is // incremented before a VU is initialized. It should always be greater than // or equal to 0, but int64 is used for simplification of the used atomic // arithmetic operations. @@ -277,12 +278,14 @@ func NewExecutionState(options Options, et *ExecutionTuple, maxPlannedVUs, maxPo maxUnplannedUninitializedVUs := int64(maxPossibleVUs - maxPlannedVUs) + segIdx := NewSegmentedIndex(et) return &ExecutionState{ Options: options, vus: make(chan InitializedVU, maxPossibleVUs), executionStatus: new(uint32), - currentVUIdentifier: new(uint64), + vuIDSegIndexMx: new(sync.Mutex), + vuIDSegIndex: segIdx, initializedVUs: new(int64), uninitializedUnplannedVUs: &maxUnplannedUninitializedVUs, activeVUs: new(int64), @@ -298,10 +301,14 @@ func NewExecutionState(options Options, et *ExecutionTuple, maxPlannedVUs, maxPo } } -// GetUniqueVUIdentifier returns an auto-incrementing unique VU ID, used for __VU. -// It starts from 1 (for backwards compatibility...) -func (es *ExecutionState) GetUniqueVUIdentifier() uint64 { - return atomic.AddUint64(es.currentVUIdentifier, 1) +// GetUniqueVUIdentifiers returns the next unique VU IDs, both local (for the +// current instance, exposed as __VU) and global (across k6 instances, exposed +// in the k6/execution module). It starts from 1, for backwards compatibility. +func (es *ExecutionState) GetUniqueVUIdentifiers() (uint64, uint64) { + es.vuIDSegIndexMx.Lock() + defer es.vuIDSegIndexMx.Unlock() + scaled, unscaled := es.vuIDSegIndex.Next() + return uint64(scaled), uint64(unscaled) } // GetInitializedVUsCount returns the total number of currently initialized VUs. diff --git a/lib/executor/common_test.go b/lib/executor/common_test.go index a2aba342460..08d4c2a7a60 100644 --- a/lib/executor/common_test.go +++ b/lib/executor/common_test.go @@ -55,7 +55,8 @@ func setupExecutor(t testing.TB, config lib.ExecutorConfig, es *lib.ExecutionSta logEntry := logrus.NewEntry(testLog) initVUFunc := func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { - return runner.NewVU(es.GetUniqueVUIdentifier(), engineOut) //nolint: wrapcheck + idl, idg := es.GetUniqueVUIdentifiers() + return runner.NewVU(idl, idg, engineOut) } es.SetInitVUFunc(initVUFunc) diff --git a/lib/executor/constant_vus_test.go b/lib/executor/constant_vus_test.go index 06c8ce1ce06..4ee339c314a 100644 --- a/lib/executor/constant_vus_test.go +++ b/lib/executor/constant_vus_test.go @@ -57,8 +57,8 @@ func TestConstantVUsRun(t *testing.T) { default: } state := lib.GetState(ctx) - currIter, _ := result.LoadOrStore(state.Vu, uint64(0)) - result.Store(state.Vu, currIter.(uint64)+1) + currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) + result.Store(state.VUID, currIter.(uint64)+1) time.Sleep(210 * time.Millisecond) return nil }), diff --git a/lib/executor/execution_test.go b/lib/executor/execution_test.go index d2724ee42c3..91c9c3029b4 100644 --- a/lib/executor/execution_test.go +++ b/lib/executor/execution_test.go @@ -22,6 +22,7 @@ package executor import ( "context" + "fmt" "io/ioutil" "math/rand" "sync" @@ -39,24 +40,62 @@ import ( func TestExecutionStateVUIDs(t *testing.T) { t.Parallel() - et, err := lib.NewExecutionTuple(nil, nil) - require.NoError(t, err) - es := lib.NewExecutionState(lib.Options{}, et, 0, 0) - assert.Equal(t, uint64(1), es.GetUniqueVUIdentifier()) - assert.Equal(t, uint64(2), es.GetUniqueVUIdentifier()) - assert.Equal(t, uint64(3), es.GetUniqueVUIdentifier()) - wg := sync.WaitGroup{} - rand.Seed(time.Now().UnixNano()) - count := 100 + rand.Intn(50) - wg.Add(count) - for i := 0; i < count; i++ { - go func() { - es.GetUniqueVUIdentifier() - wg.Done() - }() + + testCases := []struct { + seq, seg string + }{ + {}, + {seq: "0,1/4,3/4,1", seg: "0:1/4"}, + {seq: "0,0.3,0.5,0.6,0.7,0.8,0.9,1", seg: "0.5:0.6"}, + } + + for _, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("seq:%s;segment:%s", tc.seq, tc.seg), func(t *testing.T) { + t.Parallel() + ess, err := lib.NewExecutionSegmentSequenceFromString(tc.seq) + require.NoError(t, err) + segment, err := lib.NewExecutionSegmentFromString(tc.seg) + require.NoError(t, err) + et, err := lib.NewExecutionTuple(segment, &ess) + require.NoError(t, err) + + start, offsets, _ := et.GetStripedOffsets() + es := lib.NewExecutionState(lib.Options{}, et, 0, 0) + + idl, idg := es.GetUniqueVUIdentifiers() + assert.Equal(t, uint64(1), idl) + expGlobal := start + 1 + assert.Equal(t, uint64(expGlobal), idg) + + idl, idg = es.GetUniqueVUIdentifiers() + assert.Equal(t, uint64(2), idl) + expGlobal += offsets[0] + assert.Equal(t, uint64(expGlobal), idg) + + idl, idg = es.GetUniqueVUIdentifiers() + assert.Equal(t, uint64(3), idl) + expGlobal += offsets[0] + assert.Equal(t, uint64(expGlobal), idg) + + seed := time.Now().UnixNano() + r := rand.New(rand.NewSource(seed)) //nolint:gosec + t.Logf("Random source seeded with %d\n", seed) + count := 100 + r.Intn(50) + wg := sync.WaitGroup{} + wg.Add(count) + for i := 0; i < count; i++ { + go func() { + es.GetUniqueVUIdentifiers() + wg.Done() + }() + } + wg.Wait() + idl, idg = es.GetUniqueVUIdentifiers() + assert.Equal(t, uint64(4+count), idl) + assert.Equal(t, uint64((3+count)*int(offsets[0])+int(start+1)), idg) + }) } - wg.Wait() - assert.Equal(t, uint64(4+count), es.GetUniqueVUIdentifier()) } func TestExecutionStateGettingVUsWhenNonAreAvailable(t *testing.T) { diff --git a/lib/executor/per_vu_iterations_test.go b/lib/executor/per_vu_iterations_test.go index 06895035051..4f97e14f6a2 100644 --- a/lib/executor/per_vu_iterations_test.go +++ b/lib/executor/per_vu_iterations_test.go @@ -57,8 +57,8 @@ func TestPerVUIterationsRun(t *testing.T) { t, getTestPerVUIterationsConfig(), es, simpleRunner(func(ctx context.Context) error { state := lib.GetState(ctx) - currIter, _ := result.LoadOrStore(state.Vu, uint64(0)) - result.Store(state.Vu, currIter.(uint64)+1) + currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) + result.Store(state.VUID, currIter.(uint64)+1) return nil }), ) @@ -92,11 +92,11 @@ func TestPerVUIterationsRunVariableVU(t *testing.T) { t, getTestPerVUIterationsConfig(), es, simpleRunner(func(ctx context.Context) error { state := lib.GetState(ctx) - if state.Vu == slowVUID { + if state.VUID == slowVUID { time.Sleep(200 * time.Millisecond) } - currIter, _ := result.LoadOrStore(state.Vu, uint64(0)) - result.Store(state.Vu, currIter.(uint64)+1) + currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) + result.Store(state.VUID, currIter.(uint64)+1) return nil }), ) diff --git a/lib/executor/ramping_arrival_rate_test.go b/lib/executor/ramping_arrival_rate_test.go index 15572607a6a..1403f351f9d 100644 --- a/lib/executor/ramping_arrival_rate_test.go +++ b/lib/executor/ramping_arrival_rate_test.go @@ -185,7 +185,8 @@ func TestRampingArrivalRateRunUnplannedVUs(t *testing.T) { time.Sleep(time.Millisecond * 200) cur = atomic.LoadInt64(&count) require.NotEqual(t, cur, int64(2)) - return runner.NewVU(es.GetUniqueVUIdentifier(), engineOut) //nolint: wrapcheck + idl, idg := es.GetUniqueVUIdentifiers() + return runner.NewVU(idl, idg, engineOut) }) err = executor.Run(ctx, engineOut) assert.NoError(t, err) @@ -235,7 +236,8 @@ func TestRampingArrivalRateRunCorrectRateWithSlowRate(t *testing.T) { cur = atomic.LoadInt64(&count) require.NotEqual(t, cur, int64(1)) - return runner.NewVU(es.GetUniqueVUIdentifier(), engineOut) //nolint: wrapcheck + idl, idg := es.GetUniqueVUIdentifiers() + return runner.NewVU(idl, idg, engineOut) }) err = executor.Run(ctx, engineOut) assert.NoError(t, err) diff --git a/lib/executor/ramping_vus_test.go b/lib/executor/ramping_vus_test.go index 28af6f1366c..22fcaf06a1a 100644 --- a/lib/executor/ramping_vus_test.go +++ b/lib/executor/ramping_vus_test.go @@ -248,7 +248,7 @@ func TestRampingVUsGracefulRampDown(t *testing.T) { ctx, cancel, executor, _ := setupExecutor( t, config, es, simpleRunner(func(ctx context.Context) error { - if lib.GetState(ctx).Vu == 1 { // the first VU will wait here to do stuff + if lib.GetState(ctx).VUID == 1 { // the first VU will wait here to do stuff close(started) defer close(stopped) select { diff --git a/lib/executor/shared_iterations_test.go b/lib/executor/shared_iterations_test.go index 692568c0b3c..ec2fed47715 100644 --- a/lib/executor/shared_iterations_test.go +++ b/lib/executor/shared_iterations_test.go @@ -87,13 +87,13 @@ func TestSharedIterationsRunVariableVU(t *testing.T) { // Pick one VU randomly and always slow it down. sid := atomic.LoadUint64(&slowVUID) if sid == uint64(0) { - atomic.StoreUint64(&slowVUID, state.Vu) + atomic.StoreUint64(&slowVUID, state.VUID) } - if sid == state.Vu { + if sid == state.VUID { time.Sleep(200 * time.Millisecond) } - currIter, _ := result.LoadOrStore(state.Vu, uint64(0)) - result.Store(state.Vu, currIter.(uint64)+1) + currIter, _ := result.LoadOrStore(state.VUID, uint64(0)) + result.Store(state.VUID, currIter.(uint64)+1) return nil }), ) diff --git a/lib/executor/vu_handle_test.go b/lib/executor/vu_handle_test.go index c16e2ee27a5..425d6647a66 100644 --- a/lib/executor/vu_handle_test.go +++ b/lib/executor/vu_handle_test.go @@ -42,7 +42,7 @@ func TestVUHandleRace(t *testing.T) { var getVUCount int64 var returnVUCount int64 getVU := func() (lib.InitializedVU, error) { - return runner.NewVU(uint64(atomic.AddInt64(&getVUCount, 1)), nil) + return runner.NewVU(uint64(atomic.AddInt64(&getVUCount, 1)), 0, nil) } returnVU := func(_ lib.InitializedVU) { @@ -134,7 +134,7 @@ func TestVUHandleStartStopRace(t *testing.T) { getVU := func() (lib.InitializedVU, error) { returned = make(chan struct{}) - return runner.NewVU(atomic.AddUint64(&vuID, 1), nil) + return runner.NewVU(atomic.AddUint64(&vuID, 1), 0, nil) } returnVU := func(v lib.InitializedVU) { @@ -196,7 +196,7 @@ type handleVUTest struct { } func (h *handleVUTest) getVU() (lib.InitializedVU, error) { - return h.runner.NewVU(uint64(atomic.AddUint32(&h.getVUCount, 1)), nil) + return h.runner.NewVU(uint64(atomic.AddUint32(&h.getVUCount, 1)), 0, nil) } func (h *handleVUTest) returnVU(_ lib.InitializedVU) { @@ -371,7 +371,7 @@ func BenchmarkVUHandleIterations(b *testing.B) { return nil } getVU := func() (lib.InitializedVU, error) { - return runner.NewVU(uint64(atomic.AddUint32(&getVUCount, 1)), nil) + return runner.NewVU(uint64(atomic.AddUint32(&getVUCount, 1)), 0, nil) } returnVU := func(_ lib.InitializedVU) { diff --git a/lib/netext/httpext/request.go b/lib/netext/httpext/request.go index b48cd45a17b..ee37215a0e7 100644 --- a/lib/netext/httpext/request.go +++ b/lib/netext/httpext/request.go @@ -210,7 +210,7 @@ func MakeRequest(ctx context.Context, preq *ParsedHTTPRequest) (*Response, error var transport http.RoundTripper = tracerTransport // Combine tags with common log fields - combinedLogFields := map[string]interface{}{"source": "http-debug", "vu": state.Vu, "iter": state.Iteration} + combinedLogFields := map[string]interface{}{"source": "http-debug", "vu": state.VUID, "iter": state.Iteration} for k, v := range tags { if _, present := combinedLogFields[k]; !present { combinedLogFields[k] = v diff --git a/lib/runner.go b/lib/runner.go index d77b8909fea..12522dd6f98 100644 --- a/lib/runner.go +++ b/lib/runner.go @@ -75,7 +75,7 @@ type Runner interface { // Spawns a new VU. It's fine to make this function rather heavy, if it means a performance // improvement at runtime. Remember, this is called once per VU and normally only at the start // of a test - RunOnce() may be called hundreds of thousands of times, and must be fast. - NewVU(id uint64, out chan<- stats.SampleContainer) (InitializedVU, error) + NewVU(idLocal, idGlobal uint64, out chan<- stats.SampleContainer) (InitializedVU, error) // Runs pre-test setup, if applicable. Setup(ctx context.Context, out chan<- stats.SampleContainer) error diff --git a/lib/state.go b/lib/state.go index ec286f121b1..8c271e6043b 100644 --- a/lib/state.go +++ b/lib/state.go @@ -67,9 +67,9 @@ type State struct { // TODO: maybe use https://golang.org/pkg/sync/#Pool ? BPool *bpool.BufferPool - Vu, VUIDScenario uint64 - Iteration int64 - Tags map[string]string + VUID, VUIDGlobal, VUIDScenario uint64 + Iteration int64 + Tags map[string]string // These will be assigned on VU activation. // Returns the iteration number of this VU in the current scenario. GetScenarioVUIter func() uint64 diff --git a/lib/testutils/minirunner/minirunner.go b/lib/testutils/minirunner/minirunner.go index 015d102523e..834cb15990c 100644 --- a/lib/testutils/minirunner/minirunner.go +++ b/lib/testutils/minirunner/minirunner.go @@ -46,9 +46,8 @@ type MiniRunner struct { SetupData []byte - NextVUID uint64 - Group *lib.Group - Options lib.Options + Group *lib.Group + Options lib.Options } // MakeArchive isn't implemented, it always returns nil and is just here to @@ -58,12 +57,13 @@ func (r MiniRunner) MakeArchive() *lib.Archive { } // NewVU returns a new VU with an incremental ID. -func (r *MiniRunner) NewVU(id uint64, out chan<- stats.SampleContainer) (lib.InitializedVU, error) { - state := &lib.State{Vu: id, Iteration: int64(-1)} +func (r *MiniRunner) NewVU(idLocal, idGlobal uint64, out chan<- stats.SampleContainer) (lib.InitializedVU, error) { + state := &lib.State{VUID: idLocal, VUIDGlobal: idGlobal, Iteration: int64(-1)} return &VU{ R: r, Out: out, - ID: id, + ID: idLocal, + IDGlobal: idGlobal, state: state, scenarioID: make(map[string]uint64), scenarioIter: make(map[string]uint64), @@ -132,11 +132,11 @@ func (r *MiniRunner) HandleSummary(ctx context.Context, s *lib.Summary) (map[str // VU is a mock VU, spawned by a MiniRunner. type VU struct { - R *MiniRunner - Out chan<- stats.SampleContainer - ID uint64 - Iteration int64 - state *lib.State + R *MiniRunner + Out chan<- stats.SampleContainer + ID, IDGlobal uint64 + Iteration int64 + state *lib.State // ID of this VU in each scenario scenarioID map[string]uint64 // count of iterations executed by this VU in each scenario From b515245a0f8e94207767f2e4184b6c49731c984e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Wed, 16 Jun 2021 16:32:09 +0200 Subject: [PATCH 21/22] Fix unlikely data race when calling BaseExecutor methods Calling these methods defined on a value receiver copies the BaseExecutor instance and could cause a data race if another goroutine is writing to it. In practice this wasn't a problem and unlikely anyone would ever run into it on master, but it did appear on the feat/1320-execution-api branch (#1863) when running: go run -race main.go run --quiet -u 5 -i 5 'github.com/k6io/k6/samples/http_get.js' Clipped stack trace: WARNING: DATA RACE Write at 0x00c00050d7a8 by main goroutine: go.k6.io/k6/lib/executor.(*SharedIterations).Init() /home/ivan/Projects/k6io/k6/lib/executor/shared_iterations.go:179 +0x384 go.k6.io/k6/core/local.(*ExecutionScheduler).Init() /home/ivan/Projects/k6io/k6/core/local/local.go:276 +0xc1c go.k6.io/k6/core.(*Engine).Init() /home/ivan/Projects/k6io/k6/core/engine.go:190 +0x148 go.k6.io/k6/cmd.getRunCmd.func1() /home/ivan/Projects/k6io/k6/cmd/run.go:248 +0x1ad7 ... Previous read at 0x00c00050d7a8 by goroutine 32: go.k6.io/k6/lib/executor.(*SharedIterations).GetProgress() :1 +0x85 go.k6.io/k6/cmd.getRunCmd.func1.1() /home/ivan/Projects/k6io/k6/cmd/run.go:180 +0x13d Also see https://github.com/k6io/k6/pull/1863#discussion_r652612344 . --- lib/executor/base_executor.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/executor/base_executor.go b/lib/executor/base_executor.go index 44943a94dbf..14126ca351c 100644 --- a/lib/executor/base_executor.go +++ b/lib/executor/base_executor.go @@ -79,29 +79,29 @@ func (bs *BaseExecutor) Init(_ context.Context) error { } // GetConfig returns the configuration with which this executor was launched. -func (bs BaseExecutor) GetConfig() lib.ExecutorConfig { +func (bs *BaseExecutor) GetConfig() lib.ExecutorConfig { return bs.config } // getNextLocalVUID increments and returns the next VU ID that's specific for // this executor (i.e. not global like __VU). -func (bs BaseExecutor) getNextLocalVUID() uint64 { +func (bs *BaseExecutor) getNextLocalVUID() uint64 { return atomic.AddUint64(bs.VUIDLocal, 1) } // GetLogger returns the executor logger entry. -func (bs BaseExecutor) GetLogger() *logrus.Entry { +func (bs *BaseExecutor) GetLogger() *logrus.Entry { return bs.logger } // GetProgress just returns the progressbar pointer. -func (bs BaseExecutor) GetProgress() *pb.ProgressBar { +func (bs *BaseExecutor) GetProgress() *pb.ProgressBar { return bs.progress } // getMetricTags returns a tag set that can be used to emit metrics by the // executor. The VU ID is optional. -func (bs BaseExecutor) getMetricTags(vuID *uint64) *stats.SampleTags { +func (bs *BaseExecutor) getMetricTags(vuID *uint64) *stats.SampleTags { tags := bs.executionState.Options.RunTags.CloneTags() if bs.executionState.Options.SystemTags.Has(stats.TagScenario) { tags["scenario"] = bs.config.GetName() From 0279c6796cc43974155bf4bd95c1795b1fc92b64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ivan=20Miri=C4=87?= Date: Thu, 17 Jun 2021 16:06:04 +0200 Subject: [PATCH 22/22] Remove scenario specific VU IDs We might want to eventually add this back once we determine the usefulness of it, possibly with a global (across instances) variant. Resolves https://github.com/k6io/k6/pull/1863#discussion_r653558709 --- js/runner.go | 9 --------- lib/executor/base_executor.go | 9 --------- lib/executor/constant_arrival_rate.go | 2 +- lib/executor/constant_vus.go | 2 +- lib/executor/externally_controlled.go | 2 +- lib/executor/helpers.go | 3 +-- lib/executor/per_vu_iterations.go | 2 +- lib/executor/ramping_arrival_rate.go | 2 +- lib/executor/ramping_vus.go | 5 ++--- lib/executor/shared_iterations.go | 4 +--- lib/executor/vu_handle.go | 7 ++----- lib/executor/vu_handle_test.go | 12 ++++++------ lib/runner.go | 1 - lib/state.go | 6 +++--- lib/testutils/minirunner/minirunner.go | 10 ---------- 15 files changed, 20 insertions(+), 56 deletions(-) diff --git a/js/runner.go b/js/runner.go index f6d89ffd397..c91e9fcb235 100644 --- a/js/runner.go +++ b/js/runner.go @@ -219,7 +219,6 @@ func (r *Runner) newVU(idLocal, idGlobal uint64, samplesOut chan<- stats.SampleC Console: r.console, BPool: bpool.NewBufferPool(100), Samples: samplesOut, - scenarioID: make(map[string]uint64), scenarioIter: make(map[string]uint64), } @@ -547,8 +546,6 @@ type VU struct { setupData goja.Value state *lib.State - // ID of this VU in each scenario - scenarioID map[string]uint64 // count of iterations executed by this VU in each scenario scenarioIter map[string]uint64 } @@ -616,12 +613,6 @@ func (u *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { ctx = lib.WithState(ctx, u.state) params.RunContext = ctx *u.Context = ctx - if params.GetNextScVUID != nil { - if _, ok := u.scenarioID[params.Scenario]; !ok { - u.state.VUIDScenario = params.GetNextScVUID() - u.scenarioID[params.Scenario] = u.state.VUIDScenario - } - } u.state.GetScenarioVUIter = func() uint64 { return u.scenarioIter[params.Scenario] diff --git a/lib/executor/base_executor.go b/lib/executor/base_executor.go index 14126ca351c..9a65d4e5e9b 100644 --- a/lib/executor/base_executor.go +++ b/lib/executor/base_executor.go @@ -24,7 +24,6 @@ import ( "context" "strconv" "sync" - "sync/atomic" "github.com/sirupsen/logrus" @@ -40,7 +39,6 @@ import ( type BaseExecutor struct { config lib.ExecutorConfig executionState *lib.ExecutionState - VUIDLocal *uint64 // counter for assigning executor-specific VU IDs iterSegIndexMx *sync.Mutex iterSegIndex *lib.SegmentedIndex logger *logrus.Entry @@ -53,7 +51,6 @@ func NewBaseExecutor(config lib.ExecutorConfig, es *lib.ExecutionState, logger * return &BaseExecutor{ config: config, executionState: es, - VUIDLocal: new(uint64), logger: logger, iterSegIndexMx: new(sync.Mutex), iterSegIndex: segIdx, @@ -83,12 +80,6 @@ func (bs *BaseExecutor) GetConfig() lib.ExecutorConfig { return bs.config } -// getNextLocalVUID increments and returns the next VU ID that's specific for -// this executor (i.e. not global like __VU). -func (bs *BaseExecutor) getNextLocalVUID() uint64 { - return atomic.AddUint64(bs.VUIDLocal, 1) -} - // GetLogger returns the executor logger entry. func (bs *BaseExecutor) GetLogger() *logrus.Entry { return bs.logger diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go index 5303924acf1..9d4111b66e6 100644 --- a/lib/executor/constant_arrival_rate.go +++ b/lib/executor/constant_arrival_rate.go @@ -286,7 +286,7 @@ func (car ConstantArrivalRate) Run(parentCtx context.Context, out chan<- stats.S activeVUsWg.Add(1) activeVU := initVU.Activate(getVUActivationParams( maxDurationCtx, car.config.BaseConfig, returnVU, - car.getNextLocalVUID, car.nextIterationCounters, + car.nextIterationCounters, )) car.executionState.ModCurrentlyActiveVUsCount(+1) atomic.AddUint64(&activeVUsCount, 1) diff --git a/lib/executor/constant_vus.go b/lib/executor/constant_vus.go index 86b7bfda25f..515ef29ba57 100644 --- a/lib/executor/constant_vus.go +++ b/lib/executor/constant_vus.go @@ -194,7 +194,7 @@ func (clv ConstantVUs) Run(parentCtx context.Context, out chan<- stats.SampleCon activeVU := initVU.Activate( getVUActivationParams(ctx, clv.config.BaseConfig, returnVU, - clv.getNextLocalVUID, clv.nextIterationCounters)) + clv.nextIterationCounters)) for { select { diff --git a/lib/executor/externally_controlled.go b/lib/executor/externally_controlled.go index 5acf23c24c1..f356eac3003 100644 --- a/lib/executor/externally_controlled.go +++ b/lib/executor/externally_controlled.go @@ -362,7 +362,7 @@ func (rs *externallyControlledRunState) newManualVUHandle( ctx, cancel := context.WithCancel(rs.ctx) return &manualVUHandle{ vuHandle: newStoppedVUHandle(ctx, getVU, returnVU, - rs.executor.getNextLocalVUID, rs.executor.nextIterationCounters, + rs.executor.nextIterationCounters, &rs.executor.config.BaseConfig, logger), initVU: initVU, wg: &wg, diff --git a/lib/executor/helpers.go b/lib/executor/helpers.go index e2a972df988..f127cec44c0 100644 --- a/lib/executor/helpers.go +++ b/lib/executor/helpers.go @@ -225,7 +225,7 @@ func getArrivalRatePerSec(scaledArrivalRate *big.Rat) *big.Rat { // TODO: Refactor this, maybe move all scenario things to an embedded struct? func getVUActivationParams( ctx context.Context, conf BaseConfig, deactivateCallback func(lib.InitializedVU), - getNextScVUID func() uint64, nextIterationCounters func() (uint64, uint64), + nextIterationCounters func() (uint64, uint64), ) *lib.VUActivationParams { return &lib.VUActivationParams{ RunContext: ctx, @@ -234,7 +234,6 @@ func getVUActivationParams( Env: conf.GetEnv(), Tags: conf.GetTags(), DeactivateCallback: deactivateCallback, - GetNextScVUID: getNextScVUID, GetNextIterationCounters: nextIterationCounters, } } diff --git a/lib/executor/per_vu_iterations.go b/lib/executor/per_vu_iterations.go index 204e41f49fa..0194b2ea846 100644 --- a/lib/executor/per_vu_iterations.go +++ b/lib/executor/per_vu_iterations.go @@ -220,7 +220,7 @@ func (pvi PerVUIterations) Run(parentCtx context.Context, out chan<- stats.Sampl vuID := initVU.GetID() activeVU := initVU.Activate( getVUActivationParams(ctx, pvi.config.BaseConfig, returnVU, - pvi.getNextLocalVUID, pvi.nextIterationCounters)) + pvi.nextIterationCounters)) for i := int64(0); i < iterations; i++ { select { diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go index 935385c0394..adfef535ba3 100644 --- a/lib/executor/ramping_arrival_rate.go +++ b/lib/executor/ramping_arrival_rate.go @@ -410,7 +410,7 @@ func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.S activeVU := initVU.Activate( getVUActivationParams( maxDurationCtx, varr.config.BaseConfig, returnVU, - varr.getNextLocalVUID, varr.nextIterationCounters)) + varr.nextIterationCounters)) varr.executionState.ModCurrentlyActiveVUsCount(+1) atomic.AddUint64(&activeVUsCount, 1) diff --git a/lib/executor/ramping_vus.go b/lib/executor/ramping_vus.go index 8aee2187a85..af5aa554efd 100644 --- a/lib/executor/ramping_vus.go +++ b/lib/executor/ramping_vus.go @@ -574,9 +574,8 @@ func (vlv RampingVUs) Run(parentCtx context.Context, out chan<- stats.SampleCont vuHandles := make([]*vuHandle, maxVUs) for i := uint64(0); i < maxVUs; i++ { vuHandle := newStoppedVUHandle( - maxDurationCtx, getVU, returnVU, vlv.getNextLocalVUID, - vlv.nextIterationCounters, &vlv.config.BaseConfig, - vlv.logger.WithField("vuNum", i)) + maxDurationCtx, getVU, returnVU, vlv.nextIterationCounters, + &vlv.config.BaseConfig, vlv.logger.WithField("vuNum", i)) go vuHandle.runLoopsIfPossible(runIteration) vuHandles[i] = vuHandle } diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go index 00c835d64a8..9e2e8226dd8 100644 --- a/lib/executor/shared_iterations.go +++ b/lib/executor/shared_iterations.go @@ -251,9 +251,7 @@ func (si SharedIterations) Run(parentCtx context.Context, out chan<- stats.Sampl defer cancel() activeVU := initVU.Activate(getVUActivationParams( - ctx, si.config.BaseConfig, returnVU, si.getNextLocalVUID, - si.nextIterationCounters, - )) + ctx, si.config.BaseConfig, returnVU, si.nextIterationCounters)) for { select { diff --git a/lib/executor/vu_handle.go b/lib/executor/vu_handle.go index 815741fa8e0..7a25ca32ad2 100644 --- a/lib/executor/vu_handle.go +++ b/lib/executor/vu_handle.go @@ -92,7 +92,6 @@ type vuHandle struct { parentCtx context.Context getVU func() (lib.InitializedVU, error) returnVU func(lib.InitializedVU) - getScenarioVUID func() uint64 nextIterationCounters func() (uint64, uint64) config *BaseConfig @@ -110,7 +109,7 @@ type vuHandle struct { func newStoppedVUHandle( parentCtx context.Context, getVU func() (lib.InitializedVU, error), - returnVU func(lib.InitializedVU), getScenarioVUID func() uint64, + returnVU func(lib.InitializedVU), nextIterationCounters func() (uint64, uint64), config *BaseConfig, logger *logrus.Entry, ) *vuHandle { @@ -120,7 +119,6 @@ func newStoppedVUHandle( mutex: &sync.Mutex{}, parentCtx: parentCtx, getVU: getVU, - getScenarioVUID: getScenarioVUID, nextIterationCounters: nextIterationCounters, config: config, @@ -153,8 +151,7 @@ func (vh *vuHandle) start() (err error) { } vh.activeVU = vh.initVU.Activate(getVUActivationParams( - vh.ctx, *vh.config, vh.returnVU, vh.getScenarioVUID, - vh.nextIterationCounters)) + vh.ctx, *vh.config, vh.returnVU, vh.nextIterationCounters)) close(vh.canStartIter) vh.changeState(starting) } diff --git a/lib/executor/vu_handle_test.go b/lib/executor/vu_handle_test.go index 425d6647a66..4dd1cb7ff08 100644 --- a/lib/executor/vu_handle_test.go +++ b/lib/executor/vu_handle_test.go @@ -65,7 +65,7 @@ func TestVUHandleRace(t *testing.T) { } } - vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, mockNextIterations, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, mockNextIterations, &BaseConfig{}, logEntry) go vuHandle.runLoopsIfPossible(runIter) var wg sync.WaitGroup wg.Add(3) @@ -157,7 +157,7 @@ func TestVUHandleStartStopRace(t *testing.T) { } } - vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, mockNextIterations, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, mockNextIterations, &BaseConfig{}, logEntry) go vuHandle.runLoopsIfPossible(runIter) for i := 0; i < testIterations; i++ { err := vuHandle.start() @@ -235,7 +235,7 @@ func TestVUHandleSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, mockNextIterations, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, mockNextIterations, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { @@ -275,7 +275,7 @@ func TestVUHandleSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, mockNextIterations, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, mockNextIterations, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { @@ -316,7 +316,7 @@ func TestVUHandleSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, nil, mockNextIterations, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, test.getVU, test.returnVU, mockNextIterations, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { @@ -395,7 +395,7 @@ func BenchmarkVUHandleIterations(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, nil, mockNextIterations, &BaseConfig{}, logEntry) + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, mockNextIterations, &BaseConfig{}, logEntry) var wg sync.WaitGroup wg.Add(1) go func() { diff --git a/lib/runner.go b/lib/runner.go index 12522dd6f98..9cad5f6c420 100644 --- a/lib/runner.go +++ b/lib/runner.go @@ -55,7 +55,6 @@ type VUActivationParams struct { DeactivateCallback func(InitializedVU) Env, Tags map[string]string Exec, Scenario string - GetNextScVUID func() uint64 GetNextIterationCounters func() (uint64, uint64) } diff --git a/lib/state.go b/lib/state.go index 8c271e6043b..37933910ba2 100644 --- a/lib/state.go +++ b/lib/state.go @@ -67,9 +67,9 @@ type State struct { // TODO: maybe use https://golang.org/pkg/sync/#Pool ? BPool *bpool.BufferPool - VUID, VUIDGlobal, VUIDScenario uint64 - Iteration int64 - Tags map[string]string + VUID, VUIDGlobal uint64 + Iteration int64 + Tags map[string]string // These will be assigned on VU activation. // Returns the iteration number of this VU in the current scenario. GetScenarioVUIter func() uint64 diff --git a/lib/testutils/minirunner/minirunner.go b/lib/testutils/minirunner/minirunner.go index 834cb15990c..a70fdaa6906 100644 --- a/lib/testutils/minirunner/minirunner.go +++ b/lib/testutils/minirunner/minirunner.go @@ -65,7 +65,6 @@ func (r *MiniRunner) NewVU(idLocal, idGlobal uint64, out chan<- stats.SampleCont ID: idLocal, IDGlobal: idGlobal, state: state, - scenarioID: make(map[string]uint64), scenarioIter: make(map[string]uint64), }, nil } @@ -137,8 +136,6 @@ type VU struct { ID, IDGlobal uint64 Iteration int64 state *lib.State - // ID of this VU in each scenario - scenarioID map[string]uint64 // count of iterations executed by this VU in each scenario scenarioIter map[string]uint64 } @@ -163,13 +160,6 @@ func (vu *VU) GetID() uint64 { func (vu *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { ctx := lib.WithState(params.RunContext, vu.state) - if params.GetNextScVUID != nil { - if _, ok := vu.scenarioID[params.Scenario]; !ok { - vu.state.VUIDScenario = params.GetNextScVUID() - vu.scenarioID[params.Scenario] = vu.state.VUIDScenario - } - } - vu.state.GetScenarioVUIter = func() uint64 { return vu.scenarioIter[params.Scenario] }