-
Notifications
You must be signed in to change notification settings - Fork 1.3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Config refactoring and tests #935
Changes from 2 commits
977b10e
d1943ec
effbf30
2120a12
2c1adf3
62ff4aa
704dcc3
a7d0b6f
b86d5cd
c8c902a
13e506d
5d8b354
588ef66
e76972a
295abd2
9eb4379
2de0923
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,7 +1,7 @@ | ||
/* | ||
* | ||
* k6 - a next-generation load testing tool | ||
* Copyright (C) 2016 Load Impact | ||
* Copyright (C) 2019 Load Impact | ||
* | ||
* This program is free software: you can redistribute it and/or modify | ||
* it under the terms of the GNU Affero General Public License as | ||
|
@@ -21,11 +21,25 @@ | |
package cmd | ||
|
||
import ( | ||
"fmt" | ||
"io/ioutil" | ||
"os" | ||
"strings" | ||
"sync" | ||
"testing" | ||
"time" | ||
|
||
"github.com/loadimpact/k6/lib/scheduler" | ||
"github.com/loadimpact/k6/lib/types" | ||
|
||
"github.com/spf13/afero" | ||
"github.com/spf13/pflag" | ||
|
||
"github.com/kelseyhightower/envconfig" | ||
"github.com/loadimpact/k6/lib" | ||
log "github.com/sirupsen/logrus" | ||
"github.com/stretchr/testify/assert" | ||
"github.com/stretchr/testify/require" | ||
"gopkg.in/guregu/null.v3" | ||
) | ||
|
||
|
@@ -83,8 +97,239 @@ func TestConfigCmd(t *testing.T) { | |
} | ||
} | ||
|
||
//TODO: write end-to-end configuration tests - how the config file, in-script options, environment variables and CLI flags are parsed and interact... and how the end result is then propagated back into the script | ||
// A simple logrus hook that could be used to check if log messages were outputted | ||
type simpleLogrusHook struct { | ||
na-- marked this conversation as resolved.
Show resolved
Hide resolved
|
||
mutex sync.Mutex | ||
levels []log.Level | ||
messageCache []log.Entry | ||
} | ||
|
||
func (smh *simpleLogrusHook) Levels() []log.Level { | ||
return smh.levels | ||
} | ||
func (smh *simpleLogrusHook) Fire(e *log.Entry) error { | ||
smh.mutex.Lock() | ||
defer smh.mutex.Unlock() | ||
smh.messageCache = append(smh.messageCache, *e) | ||
return nil | ||
} | ||
|
||
func (smh *simpleLogrusHook) drain() []log.Entry { | ||
smh.mutex.Lock() | ||
defer smh.mutex.Unlock() | ||
res := smh.messageCache | ||
smh.messageCache = []log.Entry{} | ||
return res | ||
} | ||
|
||
var _ log.Hook = &simpleLogrusHook{} | ||
|
||
// A helper funcion for setting arbitrary environment variables and | ||
// restoring the old ones at the end, usually by deferring the returned callback | ||
//TODO: remove these hacks when we improve the configuration... we shouldn't | ||
// have to mess with the global environment at all... | ||
func setEnv(t *testing.T, newEnv []string) (restoreEnv func()) { | ||
actuallSetEnv := func(env []string) { | ||
os.Clearenv() | ||
for _, e := range env { | ||
val := "" | ||
pair := strings.SplitN(e, "=", 2) | ||
if len(pair) > 1 { | ||
val = pair[1] | ||
} | ||
require.NoError(t, os.Setenv(pair[0], val)) | ||
} | ||
} | ||
oldEnv := os.Environ() | ||
actuallSetEnv(newEnv) | ||
|
||
return func() { | ||
actuallSetEnv(oldEnv) | ||
} | ||
} | ||
|
||
var verifyOneIterPerOneVU = func(t *testing.T, c Config) { | ||
na-- marked this conversation as resolved.
Show resolved
Hide resolved
|
||
// No config anywhere should result in a 1 VU with a 1 uninterruptible iteration config | ||
sched := c.Execution[lib.DefaultSchedulerName] | ||
require.NotEmpty(t, sched) | ||
require.IsType(t, scheduler.PerVUIteationsConfig{}, sched) | ||
perVuIters, ok := sched.(scheduler.PerVUIteationsConfig) | ||
require.True(t, ok) | ||
assert.Equal(t, null.NewBool(false, false), perVuIters.Interruptible) | ||
assert.Equal(t, null.NewInt(1, false), perVuIters.Iterations) | ||
assert.Equal(t, null.NewInt(1, false), perVuIters.VUs) | ||
//TODO: verify shortcut options as well? | ||
} | ||
|
||
var verifySharedIters = func(vus, iters int64) func(t *testing.T, c Config) { | ||
na-- marked this conversation as resolved.
Show resolved
Hide resolved
|
||
return func(t *testing.T, c Config) { | ||
sched := c.Execution[lib.DefaultSchedulerName] | ||
require.NotEmpty(t, sched) | ||
require.IsType(t, scheduler.SharedIteationsConfig{}, sched) | ||
sharedIterConfig, ok := sched.(scheduler.SharedIteationsConfig) | ||
require.True(t, ok) | ||
assert.Equal(t, null.NewInt(vus, true), sharedIterConfig.VUs) | ||
assert.Equal(t, null.NewInt(iters, true), sharedIterConfig.Iterations) | ||
//TODO: verify shortcut options as well? | ||
} | ||
} | ||
|
||
var verifyConstantLoopingVUs = func(vus int64, duration time.Duration) func(t *testing.T, c Config) { | ||
na-- marked this conversation as resolved.
Show resolved
Hide resolved
|
||
return func(t *testing.T, c Config) { | ||
sched := c.Execution[lib.DefaultSchedulerName] | ||
require.NotEmpty(t, sched) | ||
require.IsType(t, scheduler.ConstantLoopingVUsConfig{}, sched) | ||
clvc, ok := sched.(scheduler.ConstantLoopingVUsConfig) | ||
require.True(t, ok) | ||
assert.Equal(t, null.NewBool(true, false), clvc.Interruptible) | ||
assert.Equal(t, null.NewInt(vus, true), clvc.VUs) | ||
assert.Equal(t, types.NullDurationFrom(duration), clvc.Duration) | ||
//TODO: verify shortcut options as well? | ||
} | ||
} | ||
|
||
func mostFlagSets() []*pflag.FlagSet { | ||
// sigh... compromises... | ||
na-- marked this conversation as resolved.
Show resolved
Hide resolved
|
||
return []*pflag.FlagSet{runCmdFlagSet(), archiveCmdFlagSet(), cloudCmdFlagSet()} | ||
} | ||
|
||
type opts struct { | ||
cliFlagSets []*pflag.FlagSet | ||
cli []string | ||
env []string | ||
runner *lib.Options | ||
//TODO: test the JSON config as well... after most of https://github.com/loadimpact/k6/issues/883#issuecomment-468646291 is fixed | ||
} | ||
|
||
// exp contains the different events or errors we expect our test case to trigger. | ||
// for space and clarity, we use the fact that by default, all of the struct values are false | ||
type exp struct { | ||
cliError bool | ||
consolidationError bool | ||
validationErrors bool | ||
logWarning bool //TODO: remove in the next version? | ||
} | ||
|
||
// A hell of a complicated test case, that still doesn't test things fully... | ||
type configConsolidationTestCase struct { | ||
options opts | ||
expected exp | ||
customValidator func(t *testing.T, c Config) | ||
} | ||
|
||
var configConsolidationTestCases = []configConsolidationTestCase{ | ||
// Check that no options will result in 1 VU 1 iter value for execution | ||
{opts{}, exp{}, verifyOneIterPerOneVU}, | ||
// Verify some CLI errors | ||
{opts{cli: []string{"--blah", "blah"}}, exp{cliError: true}, nil}, | ||
{opts{cli: []string{"--duration", "blah"}}, exp{cliError: true}, nil}, | ||
{opts{cli: []string{"--iterations", "blah"}}, exp{cliError: true}, nil}, | ||
{opts{cli: []string{"--execution", ""}}, exp{cliError: true}, nil}, | ||
{opts{cli: []string{"--stage", "10:20s"}}, exp{cliError: true}, nil}, | ||
// Check if CLI shortcuts generate correct execution values | ||
{opts{cli: []string{"--vus", "1", "--iterations", "5"}}, exp{}, verifySharedIters(1, 5)}, | ||
{opts{cli: []string{"-u", "2", "-i", "6"}}, exp{}, verifySharedIters(2, 6)}, | ||
{opts{cli: []string{"-u", "3", "-d", "30s"}}, exp{}, verifyConstantLoopingVUs(3, 30*time.Second)}, | ||
{opts{cli: []string{"-u", "4", "--duration", "60s"}}, exp{}, verifyConstantLoopingVUs(4, 1*time.Minute)}, | ||
//TODO: verify stages | ||
// This should get a validation error since VUs are more than the shared iterations | ||
{opts{cli: []string{"--vus", "10", "-i", "6"}}, exp{validationErrors: true}, verifySharedIters(10, 6)}, | ||
// These should emit a warning | ||
{opts{cli: []string{"-u", "1", "-i", "6", "-d", "10s"}}, exp{logWarning: true}, nil}, | ||
{opts{cli: []string{"-u", "2", "-d", "10s", "-s", "10s:20"}}, exp{logWarning: true}, nil}, | ||
{opts{cli: []string{"-u", "3", "-i", "5", "-s", "10s:20"}}, exp{logWarning: true}, nil}, | ||
{opts{cli: []string{"-u", "3", "-d", "0"}}, exp{logWarning: true}, nil}, | ||
// Test if environment variable shortcuts are working as expected | ||
{opts{env: []string{"K6_VUS=5", "K6_ITERATIONS=15"}}, exp{}, verifySharedIters(5, 15)}, | ||
{opts{env: []string{"K6_VUS=10", "K6_DURATION=20s"}}, exp{}, verifyConstantLoopingVUs(10, 20*time.Second)}, | ||
|
||
//TODO: test combinations between options and levels | ||
//TODO: test the future full overwriting of the duration/iterations/stages/execution options | ||
|
||
// Just in case, verify that no options will result in the same 1 vu 1 iter config | ||
{opts{}, exp{}, verifyOneIterPerOneVU}, | ||
//TODO: test for differences between flagsets | ||
//TODO: more tests in general... | ||
} | ||
|
||
func TestConfigConsolidation(t *testing.T) { | ||
logHook := simpleLogrusHook{levels: []log.Level{log.WarnLevel}} | ||
na-- marked this conversation as resolved.
Show resolved
Hide resolved
|
||
log.AddHook(&logHook) | ||
log.SetOutput(ioutil.Discard) | ||
defer log.SetOutput(os.Stderr) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This once again makes me think we should not just be calling some global There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Probably... And to be fair, we have some logrus instances we pass in some places. For example, each VU has its own logger IIRC. But far from enough. |
||
|
||
runTestCase := func(t *testing.T, testCase configConsolidationTestCase, flagSet *pflag.FlagSet) { | ||
na-- marked this conversation as resolved.
Show resolved
Hide resolved
|
||
t.Logf("Test with opts=%#v and exp=%#v\n", testCase.options, testCase.expected) | ||
logHook.drain() | ||
|
||
restoreEnv := setEnv(t, testCase.options.env) | ||
defer restoreEnv() | ||
|
||
//TODO: also remove these hacks when we improve the configuration... | ||
getTestCaseCliConf := func() (Config, error) { | ||
na-- marked this conversation as resolved.
Show resolved
Hide resolved
|
||
if err := flagSet.Parse(testCase.options.cli); err != nil { | ||
return Config{}, err | ||
} | ||
if flagSet.Lookup("out") != nil { | ||
return getConfig(flagSet) | ||
} | ||
opts, errOpts := getOptions(flagSet) | ||
return Config{Options: opts}, errOpts | ||
} | ||
|
||
cliConf, err := getTestCaseCliConf() | ||
if testCase.expected.cliError { | ||
require.Error(t, err) | ||
return | ||
} | ||
require.NoError(t, err) | ||
|
||
var runner lib.Runner | ||
if testCase.options.runner != nil { | ||
runner = &lib.MiniRunner{Options: *testCase.options.runner} | ||
} | ||
fs := afero.NewMemMapFs() //TODO: test JSON configs as well! | ||
result, err := getConsolidatedConfig(fs, cliConf, runner) | ||
if testCase.expected.consolidationError { | ||
require.Error(t, err) | ||
return | ||
} | ||
require.NoError(t, err) | ||
|
||
warnings := logHook.drain() | ||
if testCase.expected.logWarning { | ||
assert.NotEmpty(t, warnings) | ||
} else { | ||
assert.Empty(t, warnings) | ||
} | ||
|
||
validationErrors := result.Validate() | ||
if testCase.expected.validationErrors { | ||
assert.NotEmpty(t, validationErrors) | ||
} else { | ||
assert.Empty(t, validationErrors) | ||
} | ||
|
||
if testCase.customValidator != nil { | ||
testCase.customValidator(t, result) | ||
} | ||
} | ||
|
||
for tcNum, testCase := range configConsolidationTestCases { | ||
flagSets := testCase.options.cliFlagSets | ||
if flagSets == nil { // handle the most common case | ||
flagSets = mostFlagSets() | ||
} | ||
for fsNum, flagSet := range flagSets { | ||
// I want to paralelize this, but I cannot... due to global variables and other | ||
// questionable architectural choices... :| | ||
t.Run( | ||
fmt.Sprintf("TestCase#%d_FlagSet#%d", tcNum, fsNum), | ||
func(t *testing.T) { runTestCase(t, testCase, flagSet) }, | ||
) | ||
} | ||
} | ||
} | ||
func TestConfigEnv(t *testing.T) { | ||
testdata := map[struct{ Name, Key string }]map[string]func(Config){ | ||
{"Linger", "K6_LINGER"}: { | ||
|
@@ -134,8 +379,3 @@ func TestConfigApply(t *testing.T) { | |
assert.Equal(t, []string{"influxdb", "json"}, conf.Out) | ||
}) | ||
} | ||
|
||
func TestBuildExecutionConfig(t *testing.T) { | ||
//TODO: test the current config building and constructing of the execution plan, and the emitted warnings | ||
//TODO: test the future full overwriting of the duration/iterations/stages/execution options | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Well you can use local variables but you will need to not use
init
:) . I suppose at some point we will rewrite it but this seems as too small of a problem atmThere was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It likely needs to be rewritten as we tackle the big config refactoring... Using global variables is somewhat fine, until you need to test them. And it can hide issues like the fact we currently somehow have 2 different variables for saving the value of the
--config
flag (details).But in my head, we have at least 2 other similar issues that are more problematic than this, both in terms of improving test coverage and correctness, and in their potential for hiding bugs...
The first problem is the liberal use of
os.GetEnv()
to read environment variables directly into those same global variables.grep -FR --exclude-dir=vendor "os.Getenv"
producesThat's on top of the other issues we also have due to using
envconfig
... Ideally, we shouldn't directly callos.Getenv()
or otheros
functions more than once. Instead, we should just capture the environment variables once per run, in a single place in the code, and then pass them through to the configuration parsing and setting logic. That way we can actually mock the process and write simple and sensible tests that are able to check all of the configuration parsing and management logic...The second problem lies in the way we're dealing with the CLI flags that don't save their values into global variables, i.e. the majority of the CLI flags. And I'm not even talking about how we use them to store both the default option values and the highest-priority user-supplied ones... The problem I've hit a few times recently is that a lot of the potential developer errors in setting CLI flags won't happen at compile time, but rather at runtime.
The problematic code can be seen here and here (and in a few other places as well). Because we're using string identifiers in multiple places, and we
panic()
if we don't have the expected flag, and we don't have tests for the differentcobra.Command
global variables ( i.e.runCmd
,cloudCmd
, etc.), minor developer errors can lead to releasing broken code, especially in rarely used commands...So yeah, I won't fix this specific TODO with this pull request, but it and the others I mentioned above should definitely be fixed sometime soon...