From 8c7d302b11fb5d54c42801bbd610d65197ac1b16 Mon Sep 17 00:00:00 2001 From: Dongsu Park Date: Fri, 8 Jul 2016 09:58:10 +0200 Subject: [PATCH] functional: retrieve stdout/stderr after running command Make every call cluster.Fleetctl() (or run()) return both stdout and stderr, to get them included in the result error buffer. We need to do this to make sure that every potential error message is printed out. This change will help us investigate occasional failures in functional tests. --- functional/client_test.go | 4 +-- functional/cluster_test.go | 18 +++++++------ functional/connectivity-loss_test.go | 8 +++--- functional/fleetctl_test.go | 10 ++++--- functional/node_test.go | 16 +++++------ functional/platform/nspawn.go | 28 +++++++++---------- functional/scheduling_test.go | 40 +++++++++++++++------------- functional/server_test.go | 5 ++-- functional/shutdown_test.go | 17 +++++++----- functional/unit_action_test.go | 39 ++++++++++++++------------- 10 files changed, 98 insertions(+), 87 deletions(-) diff --git a/functional/client_test.go b/functional/client_test.go index 487adbd0a..be2ead8a8 100644 --- a/functional/client_test.go +++ b/functional/client_test.go @@ -66,8 +66,8 @@ func TestKnownHostsVerification(t *testing.T) { } // SSH'ing to the cluster member should now fail with a host key mismatch - if _, _, err := cluster.Fleetctl(m0, "--strict-host-key-checking=true", fmt.Sprintf("--known-hosts-file=%s", khFile), "ssh", m1.ID(), "uptime"); err == nil { - t.Errorf("Expected error while SSH'ing to fleet machine") + if stdout, stderr, err := cluster.Fleetctl(m0, "--strict-host-key-checking=true", fmt.Sprintf("--known-hosts-file=%s", khFile), "ssh", m1.ID(), "uptime"); err == nil { + t.Errorf("Expected error while SSH'ing to fleet machine\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } // Overwrite the known-hosts file to simulate removing the old host key diff --git a/functional/cluster_test.go b/functional/cluster_test.go index 38b3df683..6716c585a 100644 --- a/functional/cluster_test.go +++ b/functional/cluster_test.go @@ -53,9 +53,9 @@ func TestDynamicClusterNewMemberUnitMigration(t *testing.T) { // All 3 services should be visible immediately, and all of them should // become ACTIVE shortly thereafter - stdout, _, err = cluster.Fleetctl(m0, "list-units", "--no-legend") + stdout, stderr, err = cluster.Fleetctl(m0, "list-units", "--no-legend") if err != nil { - t.Fatalf("Failed to run list-units: %v", err) + t.Fatalf("Failed to run list-units:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } units := strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != 3 { @@ -75,8 +75,10 @@ func TestDynamicClusterNewMemberUnitMigration(t *testing.T) { // Kill one of the machines and make sure the unit migrates somewhere else unit := "conflict.1.service" oldMach := states[unit].Machine - if _, _, err = cluster.Fleetctl(m0, "--strict-host-key-checking=false", "ssh", oldMach, "sudo", "systemctl", "stop", "fleet"); err != nil { - t.Fatal(err) + stdout, stderr, err = cluster.Fleetctl(m0, "--strict-host-key-checking=false", "ssh", oldMach, + "sudo", "systemctl", "stop", "fleet") + if err != nil { + t.Fatalf("Failed to stop fleet service:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } var mN platform.Member if m0.ID() == oldMach { @@ -131,20 +133,20 @@ func TestDynamicClusterMemberReboot(t *testing.T) { t.Fatal(err) } - _, _, err = cluster.Fleetctl(m0, "start", + stdout, stderr, err := cluster.Fleetctl(m0, "start", "fixtures/units/conflict.0.service", "fixtures/units/conflict.1.service", "fixtures/units/conflict.2.service", ) if err != nil { - t.Errorf("Failed starting units: %v", err) + t.Errorf("Failed starting units:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } // All 3 services should be visible immediately, and all of them should // become ACTIVE shortly thereafter - stdout, _, err := cluster.Fleetctl(m0, "list-units", "--no-legend") + stdout, stderr, err = cluster.Fleetctl(m0, "list-units", "--no-legend") if err != nil { - t.Fatalf("Failed to run list-units: %v", err) + t.Fatalf("Failed to run list-units:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } units := strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != 3 { diff --git a/functional/connectivity-loss_test.go b/functional/connectivity-loss_test.go index ca66de2c4..612beeac4 100644 --- a/functional/connectivity-loss_test.go +++ b/functional/connectivity-loss_test.go @@ -93,9 +93,9 @@ func TestSingleNodeConnectivityLoss(t *testing.T) { checkExpectedStates := func() (isExpected bool, expected, actual map[string]string) { // First check unit files. // These shouldn't change at all after intital submit -- but better safe than sorry... - stdout, _, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend", "--full", "--fields", "unit,dstate") + stdout, stderr, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend", "--full", "--fields", "unit,dstate") if err != nil { - t.Errorf("Failed listing unit files: %v", err) + t.Errorf("Failed listing unit files:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } stdout = strings.TrimSpace(stdout) @@ -113,9 +113,9 @@ func TestSingleNodeConnectivityLoss(t *testing.T) { } // Now check the actual unit states. - stdout, _, err = cluster.Fleetctl(m0, "list-units", "--no-legend", "--full", "--fields", "unit,active") + stdout, stderr, err = cluster.Fleetctl(m0, "list-units", "--no-legend", "--full", "--fields", "unit,active") if err != nil { - t.Errorf("Failed listing units: %v", err) + t.Errorf("Failed listing units:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } stdout = strings.TrimSpace(stdout) diff --git a/functional/fleetctl_test.go b/functional/fleetctl_test.go index d5f5e2afc..47a15333c 100644 --- a/functional/fleetctl_test.go +++ b/functional/fleetctl_test.go @@ -24,9 +24,10 @@ import ( ) func TestClientVersionFlag(t *testing.T) { - stdout, _, err := util.RunFleetctl("version") + stdout, stderr, err := util.RunFleetctl("version") if err != nil { - t.Fatalf("Unexpected error while executing fleetctl: %v", err) + t.Fatalf("Unexpected error while executing fleetctl:\nstdout: %s\nstderr: %s\nerr: %v", + stdout, stderr, err) } if strings.TrimSpace(stdout) != fmt.Sprintf("fleetctl version %s", version.Version) { @@ -35,9 +36,10 @@ func TestClientVersionFlag(t *testing.T) { } func TestClientVersionHelpOutput(t *testing.T) { - stdout, _, err := util.RunFleetctl("help") + stdout, stderr, err := util.RunFleetctl("help") if err != nil { - t.Fatalf("Unexpected error while executing fleetctl: %v", err) + t.Fatalf("Unexpected error while executing fleetctl:\nstdout: %s\nstderr: %s\nerr: %v", + stdout, stderr, err) } if !strings.Contains(stdout, fmt.Sprintf("%s", version.Version)) { diff --git a/functional/node_test.go b/functional/node_test.go index c14479874..6f3e9695a 100644 --- a/functional/node_test.go +++ b/functional/node_test.go @@ -62,8 +62,8 @@ func TestNodeShutdown(t *testing.T) { } // Stop the fleet process on the first member - if _, err = cluster.MemberCommand(m0, "sudo", "systemctl", "stop", "fleet"); err != nil { - t.Fatal(err) + if stdout, err = cluster.MemberCommand(m0, "sudo", "systemctl", "stop", "fleet"); err != nil { + t.Fatalf("Failed stopping fleet service: %v\nstdout: %s\n", err, stdout) } // The first member should quickly remove itself from the published @@ -118,13 +118,13 @@ func TestDetectMachineId(t *testing.T) { return fmt.Errorf("Failed to restart fleet service\nstdout: %s\nerr: %v", stdout, err) } - stdout, _ = cluster.MemberCommand(m, "systemctl", "show", "--property=ActiveState", "fleet") + stdout, err = cluster.MemberCommand(m, "systemctl", "show", "--property=ActiveState", "fleet") if strings.TrimSpace(stdout) != "ActiveState=active" { - return fmt.Errorf("Fleet unit not reported as active: %s", stdout) + return fmt.Errorf("Fleet unit not reported as active:\nstdout:%s\nerr: %v", stdout, err) } - stdout, _ = cluster.MemberCommand(m, "systemctl", "show", "--property=Result", "fleet") + stdout, err = cluster.MemberCommand(m, "systemctl", "show", "--property=Result", "fleet") if strings.TrimSpace(stdout) != "Result=success" { - return fmt.Errorf("Result for fleet unit not reported as success: %s", stdout) + return fmt.Errorf("Result for fleet unit not reported as success:\nstdout:%s\nerr: %v", stdout, err) } return nil } @@ -155,12 +155,12 @@ func TestDetectMachineId(t *testing.T) { if err != nil { if !strings.Contains(err.Error(), "exit status 1") || !strings.Contains(stderr, "fleet server unable to communicate with etcd") { - t.Fatalf("m1: Failed to get list of machines. err: %v\nstderr: %s", err, stderr) + t.Fatalf("m1: Failed to get list of machines. err: %v\nstdout: %s\nstderr: %s", err, stdout, stderr) } // If both conditions are satisfied, "exit status 1" and // "...unable to communicate...", then it's an expected error. PASS. } else { - t.Fatalf("m1: should get an error, but got success.\nstderr: %s", stderr) + t.Fatalf("m1: should get an error, but got success.\nstdout: %s\nstderr: %s", stdout, stderr) } // Trigger another test case of m0's ID getting different from m1's. diff --git a/functional/platform/nspawn.go b/functional/platform/nspawn.go index d8f8494c2..36729ba8b 100644 --- a/functional/platform/nspawn.go +++ b/functional/platform/nspawn.go @@ -294,39 +294,39 @@ func (nc *nspawnCluster) prepCluster() (err error) { return } - stdout, _, err := run("brctl show") + stdout, stderr, err := run("brctl show") if err != nil { - log.Printf("Failed enumerating bridges: %v", err) + log.Printf("Failed enumerating bridges: %v\nstdout: %s\nstderr: %s", err, stdout, stderr) return } if !strings.Contains(stdout, "fleet0") { - _, _, err = run("brctl addbr fleet0") + stdout, stderr, err = run("brctl addbr fleet0") if err != nil { - log.Printf("Failed adding fleet0 bridge: %v", err) + log.Printf("Failed adding fleet0 bridge: %v\nstdout: %s\nstderr: %s", err, stdout, stderr) return } } else { log.Printf("Bridge fleet0 already exists") } - stdout, _, err = run("ip addr list fleet0") + stdout, stderr, err = run("ip addr list fleet0") if err != nil { - log.Printf("Failed listing fleet0 addresses: %v", err) + log.Printf("Failed listing fleet0 addresses: %v\nstdout: %s\nstderr: %s", err, stdout, stderr) return } if !strings.Contains(stdout, "172.18.0.1/16") { - _, _, err = run("ip addr add 172.18.0.1/16 dev fleet0") + stdout, stderr, err = run("ip addr add 172.18.0.1/16 dev fleet0") if err != nil { - log.Printf("Failed adding 172.18.0.1/16 to fleet0: %v", err) + log.Printf("Failed adding 172.18.0.1/16 to fleet0: %v\nstdout: %s\nstderr: %s", err, stdout, stderr) return } } - _, _, err = run("ip link set fleet0 up") + stdout, stderr, err = run("ip link set fleet0 up") if err != nil { - log.Printf("Failed bringing up fleet0 bridge: %v", err) + log.Printf("Failed bringing up fleet0 bridge: %v\nstdout: %s\nstderr: %s", err, stdout, stderr) return } @@ -603,8 +603,8 @@ func (nc *nspawnCluster) ReplaceMember(m Member) (Member, error) { label := fmt.Sprintf("%s%s", nc.name, m.ID()) cmd := fmt.Sprintf("machinectl poweroff %s", label) - if _, _, err := run(cmd); err != nil { - return nil, fmt.Errorf("poweroff failed: %v", err) + if stdout, stderr, err := run(cmd); err != nil { + return nil, fmt.Errorf("poweroff failed: %v\nstdout: %s\nstderr: %s", err, stdout, stderr) } var mN Member @@ -708,13 +708,13 @@ func (nc *nspawnCluster) systemd(unitName, exec string) error { func (nc *nspawnCluster) machinePID(name string) (int, error) { for i := 0; i < 100; i++ { mach := fmt.Sprintf("%s%s", nc.name, name) - stdout, _, err := run(fmt.Sprintf("machinectl show -p Leader %s", mach)) + stdout, stderr, err := run(fmt.Sprintf("machinectl show -p Leader %s", mach)) if err != nil { if i != -1 { time.Sleep(100 * time.Millisecond) continue } - return -1, fmt.Errorf("failed detecting machine %s status: %v", mach, err) + return -1, fmt.Errorf("failed detecting machine %s status: %v\nstdout: %s\nstderr: %s", mach, err, stdout, stderr) } out := strings.SplitN(strings.TrimSpace(stdout), "=", 2) diff --git a/functional/scheduling_test.go b/functional/scheduling_test.go index d21cead53..8eb450001 100644 --- a/functional/scheduling_test.go +++ b/functional/scheduling_test.go @@ -66,9 +66,9 @@ func TestScheduleMachineOf(t *testing.T) { // All 6 services should be visible immediately and become ACTIVE // shortly thereafter - stdout, _, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend") + stdout, stderr, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend") if err != nil { - t.Fatalf("Failed to run list-unit-files: %v", err) + t.Fatalf("Failed to run list-unit-files:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } units := strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != 6 { @@ -113,8 +113,8 @@ func TestScheduleMachineOf(t *testing.T) { // Ensure a pair of units migrate together when their host goes down mach := states["ping.1.service"].Machine - if _, _, err = cluster.Fleetctl(m0, "--strict-host-key-checking=false", "ssh", mach, "sudo", "systemctl", "stop", "fleet"); err != nil { - t.Fatal(err) + if stdout, stderr, err = cluster.Fleetctl(m0, "--strict-host-key-checking=false", "ssh", mach, "sudo", "systemctl", "stop", "fleet"); err != nil { + t.Fatalf("Failed to stop fleet service:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } var mN platform.Member @@ -184,9 +184,9 @@ func TestScheduleConflicts(t *testing.T) { // All 5 services should be visible immediately and 3 should become // ACTIVE shortly thereafter - stdout, _, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend") + stdout, stderr, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend") if err != nil { - t.Fatalf("Failed to run list-unit-files: %v", err) + t.Fatalf("Failed to run list-unit-files:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } units := strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != 5 { @@ -256,9 +256,9 @@ func TestScheduleOneWayConflict(t *testing.T) { // Both units should show up, but only conflicts-with-hello.service // should report ACTIVE - stdout, _, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend") + stdout, stderr, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend") if err != nil { - t.Fatalf("Failed to run list-unit-files: %v", err) + t.Fatalf("Failed to run list-unit-files:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } units := strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != 2 { @@ -281,8 +281,8 @@ func TestScheduleOneWayConflict(t *testing.T) { // Destroying the conflicting unit should allow the other to start name = "conflicts-with-hello.service" - if _, _, err := cluster.Fleetctl(m0, "destroy", name); err != nil { - t.Fatalf("Failed destroying %s", name) + if stdout, stderr, err := cluster.Fleetctl(m0, "destroy", name); err != nil { + t.Fatalf("Failed destroying %s:\nstdout: %s\nstderr: %s\nerr: %v", name, stdout, stderr, err) } // NOTE: we need to sleep here shortly to avoid occasional errors of @@ -295,13 +295,14 @@ func TestScheduleOneWayConflict(t *testing.T) { time.Sleep(1 * time.Second) // Wait for the destroyed unit to actually disappear + var stdoutBuf, stderrBuf string timeout, err := util.WaitForState( func() bool { - stdout, _, err := cluster.Fleetctl(m0, "list-units", "--no-legend", "--full", "--fields", "unit,active,machine") + stdoutBuf, stderrBuf, err = cluster.Fleetctl(m0, "list-units", "--no-legend", "--full", "--fields", "unit,active,machine") if err != nil { return false } - lines := strings.Split(strings.TrimSpace(stdout), "\n") + lines := strings.Split(strings.TrimSpace(stdoutBuf), "\n") states := util.ParseUnitStates(lines) for _, state := range states { if state.Name == name { @@ -312,7 +313,8 @@ func TestScheduleOneWayConflict(t *testing.T) { }, ) if err != nil { - t.Fatalf("Destroyed unit %s not gone within %v", name, timeout) + t.Fatalf("Destroyed unit %s not gone within %v\nstdout: %s\nstderr: %s\nerr: %v", + name, timeout, stdoutBuf, stderrBuf, err) } active, err = cluster.WaitForNActiveUnits(m0, 1) @@ -374,9 +376,9 @@ func TestScheduleReplace(t *testing.T) { } // Check that both units should show up - stdout, _, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend") + stdout, stderr, err := cluster.Fleetctl(m0, "list-unit-files", "--no-legend") if err != nil { - t.Fatalf("Failed to run list-unit-files: %v", err) + t.Fatalf("Failed to run list-unit-files:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } units := strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != 2 { @@ -450,9 +452,9 @@ func TestScheduleCircularReplace(t *testing.T) { uName0tmp, stdout, stderr, err) } - stdout, _, err = cluster.Fleetctl(m0, "list-unit-files", "--no-legend") + stdout, stderr, err = cluster.Fleetctl(m0, "list-unit-files", "--no-legend") if err != nil { - t.Fatalf("Failed to run list-unit-files: %v", err) + t.Fatalf("Failed to run list-unit-files:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } units := strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != nActiveUnits { @@ -472,9 +474,9 @@ func TestScheduleCircularReplace(t *testing.T) { if stdout, stderr, err := cluster.Fleetctl(m0, "start", "--no-block", uNames[1]); err != nil { t.Fatalf("Failed starting unit %s: \nstdout: %s\nstderr: %s\nerr: %v", uNames[1], stdout, stderr, err) } - stdout, _, err = cluster.Fleetctl(m0, "list-unit-files", "--no-legend") + stdout, stderr, err = cluster.Fleetctl(m0, "list-unit-files", "--no-legend") if err != nil { - t.Fatalf("Failed to run list-unit-files: %v", err) + t.Fatalf("Failed to run list-unit-files:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } units = strings.Split(strings.TrimSpace(stdout), "\n") if len(units) != nUnits { diff --git a/functional/server_test.go b/functional/server_test.go index a8647530b..eeb07eee7 100644 --- a/functional/server_test.go +++ b/functional/server_test.go @@ -78,9 +78,10 @@ func TestReconfigureServer(t *testing.T) { // check if fleetd is still running correctly, by running fleetctl status // Even if the log message do not show up this test may catch the error. - stdout, _, err = cluster.Fleetctl(m0, "list-units") + stdout, stderr, err = cluster.Fleetctl(m0, "list-units") if err != nil { - t.Fatalf("Unable to check list-units. Please check for fleetd socket. err:%v", err) + t.Fatalf("Unable to check list-units. Please check for fleetd socket\nstdout: %s\nstderr: %s\nerr:%v", + stdout, stderr, err) } // Ensure that fleet received SIGHUP, if not then just skip this test diff --git a/functional/shutdown_test.go b/functional/shutdown_test.go index 3b5ea8e02..46514237e 100644 --- a/functional/shutdown_test.go +++ b/functional/shutdown_test.go @@ -74,24 +74,27 @@ func TestShutdownVsMonitor(t *testing.T) { // Cut connection to etcd. // // This will result in a failed health check, and consequently the monitor will attempt a restart. - if _, err = cluster.MemberCommand(m0, "sudo", "iptables", "-I", "OUTPUT", "-p", "tcp", "-m", "multiport", "--dports=2379,4001", "-j", "DROP"); err != nil { - t.Fatal(err) + stdout, err := cluster.MemberCommand(m0, "sudo", "iptables", "-I", "OUTPUT", "-p", "tcp", "-m", "multiport", "--dports=2379,4001", "-j", "DROP") + if err != nil { + t.Fatalf("Failed inserting iptables rule:\nstdout: %s\nerr: %v", stdout, err) } // Wait for the monitor to trigger the restart. // // This will never complete, as long as there is no connectivity. - if _, err = cluster.MemberCommand(m0, "sudo", "sh", "-c", `'until journalctl -u fleet | grep -q "Server monitor triggered: Monitor timed out before successful heartbeat"; do sleep 1; done'`); err != nil { - t.Fatal(err) + stdout, err = cluster.MemberCommand(m0, "sudo", "sh", "-c", `'until journalctl -u fleet | grep -q "Server monitor triggered: Monitor timed out before successful heartbeat"; do sleep 1; done'`) + if err != nil { + t.Fatalf("Failed checking journal message:\nstdout: %s\nerr: %v", stdout, err) } // Stop fleetd while the restart is still in progress. - if _, err = cluster.MemberCommand(m0, "sudo", "systemctl", "stop", "fleet"); err != nil { - t.Fatal(err) + stdout, err = cluster.MemberCommand(m0, "sudo", "systemctl", "stop", "fleet") + if err != nil { + t.Fatalf("Failed stopping fleet service:\nstdout: %s\nerr: %v", stdout, err) } // Verify that fleetd was shut down cleanly in spite of the concurrent restart. - stdout, _ := cluster.MemberCommand(m0, "systemctl", "show", "--property=ActiveState", "fleet") + stdout, _ = cluster.MemberCommand(m0, "systemctl", "show", "--property=ActiveState", "fleet") if strings.TrimSpace(stdout) != "ActiveState=inactive" { t.Fatalf("Fleet unit not reported as inactive: %s", stdout) } diff --git a/functional/unit_action_test.go b/functional/unit_action_test.go index be5ee4c02..51fc4fd80 100644 --- a/functional/unit_action_test.go +++ b/functional/unit_action_test.go @@ -250,9 +250,9 @@ func TestUnitCat(t *testing.T) { fileBody := strings.TrimSpace(string(fileBuf)) // submit a unit and assert it shows up - _, _, err = cluster.Fleetctl(m, "submit", unitFile) + stdout, stderr, err := cluster.Fleetctl(m, "submit", unitFile) if err != nil { - t.Fatalf("Unable to submit fleet unit: %v", err) + t.Fatalf("Unable to submit fleet unit:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } // wait until the unit gets submitted up to 15 seconds _, err = cluster.WaitForNUnitFiles(m, 1) @@ -261,9 +261,9 @@ func TestUnitCat(t *testing.T) { } // cat the unit file and compare it with the original unit body - stdout, _, err := cluster.Fleetctl(m, "cat", path.Base(unitFile)) + stdout, stderr, err = cluster.Fleetctl(m, "cat", path.Base(unitFile)) if err != nil { - t.Fatalf("Unable to submit fleet unit: %v", err) + t.Fatalf("Unable to submit fleet unit:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } catBody := strings.TrimSpace(stdout) @@ -293,9 +293,9 @@ func TestUnitStatus(t *testing.T) { // Load a unit and print out status. // Without loading a unit, it's impossible to run fleetctl status - _, _, err = cluster.Fleetctl(m, "load", unitFile) + stdout, stderr, err := cluster.Fleetctl(m, "load", unitFile) if err != nil { - t.Fatalf("Unable to load a fleet unit: %v", err) + t.Fatalf("Unable to load a fleet unit:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } // wait until the unit gets loaded up to 15 seconds @@ -304,7 +304,7 @@ func TestUnitStatus(t *testing.T) { t.Fatalf("Failed to run list-units: %v", err) } - stdout, stderr, err := cluster.Fleetctl(m, + stdout, stderr, err = cluster.Fleetctl(m, "--strict-host-key-checking=false", "status", path.Base(unitFile)) if !strings.Contains(stdout, "Loaded: loaded") { t.Errorf("Could not find expected string in status output:\n%s\nstderr:\n%s", @@ -348,9 +348,9 @@ func TestListUnitFilesOrder(t *testing.T) { t.Fatal("Failed to run list-unit-files: %v", err) } - stdout, _, err := cluster.Fleetctl(m, "list-unit-files", "--no-legend", "--fields", "unit") + stdout, stderr, err := cluster.Fleetctl(m, "list-unit-files", "--no-legend", "--fields", "unit") if err != nil { - t.Fatal("Failed to run list-unit-files: %v", err) + t.Fatal("Failed to run list-unit-files:\nstdout: %s\nstderr: %s\nerr: %v", stdout, stderr, err) } outUnits := strings.Split(strings.TrimSpace(stdout), "\n") @@ -453,9 +453,9 @@ func replaceUnitCommon(t *testing.T, cmd string, numRUnits int) error { } // retrieve content of hello.service, and append to bodiesOrig[] - bodyCur, _, err := cluster.Fleetctl(m, "cat", helloFilename) + bodyCur, stderr, err := cluster.Fleetctl(m, "cat", helloFilename) if err != nil { - return nil, fmt.Errorf("Failed to run cat %s: %v", helloFilename, err) + return nil, fmt.Errorf("Failed to run cat %s: %v\nstderr: %s", helloFilename, err, stderr) } bodiesOrig = append(bodiesOrig, bodyCur) @@ -474,8 +474,8 @@ func replaceUnitCommon(t *testing.T, cmd string, numRUnits int) error { curHelloService := path.Join("/tmp", helloFilename) // replace the unit and assert it shows up - if _, _, err = cluster.Fleetctl(m, cmd, "--replace", curHelloService); err != nil { - return fmt.Errorf("Unable to replace fleet unit: %v", err) + if stdout, stderr, err := cluster.Fleetctl(m, cmd, "--replace", curHelloService); err != nil { + return fmt.Errorf("Unable to replace fleet unit: %v\nstdout: %s\nstderr: %s", err, stdout, stderr) } if err := waitForNUnitsCmd(cluster, m, cmd, numUnits); err != nil { return fmt.Errorf("Did not find %d units in cluster", numUnits) @@ -483,9 +483,9 @@ func replaceUnitCommon(t *testing.T, cmd string, numRUnits int) error { // retrieve content of hello.service, and compare it with the // correspondent entry in bodiesOrig[] - bodyCur, _, err := cluster.Fleetctl(m, "cat", helloFilename) + bodyCur, stderr, err := cluster.Fleetctl(m, "cat", helloFilename) if err != nil { - return fmt.Errorf("Failed to run cat %s: %v", helloFilename, err) + return fmt.Errorf("Failed to run cat %s: %v\nstderr: %s", helloFilename, err, stderr) } if bodiesOrig[i] == bodyCur { @@ -561,8 +561,8 @@ func launchUnitsCmd(cluster platform.Cluster, m platform.Member, cmd string, num func cleanUnits(cl platform.Cluster, m platform.Member, cmd string, ufs []string, nu int) (err error) { for i := 0; i < nu; i++ { - if _, _, err := cl.Fleetctl(m, cmd, ufs[i]); err != nil { - return fmt.Errorf("Failed to %s unit: %v", cmd, err) + if stdout, stderr, err := cl.Fleetctl(m, cmd, ufs[i]); err != nil { + return fmt.Errorf("Failed to %s unit: %v\nstdout: %s\nstderr: %s", cmd, err, stdout, stderr) } } return nil @@ -717,9 +717,10 @@ func TestReplaceSerialization(t *testing.T) { // the ExecStartPre is running at the same time, if it failed // then we probably will catch it later when we check its status tmpService := path.Base(tmpSyncService) + var stdoutBuf string timeout, err := util.WaitForState( func() bool { - _, err = cluster.MemberCommand(m, syncNew) + stdoutBuf, err = cluster.MemberCommand(m, syncNew) if err != nil { return false } @@ -727,7 +728,7 @@ func TestReplaceSerialization(t *testing.T) { }, ) if err != nil { - t.Fatalf("Failed to check if file %s exists within %v", tmpSyncFile, timeout) + t.Fatalf("Failed to check if file %s exists within %v\nerr: %v\nstdout: %s", tmpSyncFile, timeout, err, stdoutBuf) } timeout, err = util.WaitForState(