Skip to content

Commit

Permalink
Add test-report check - capture metrics from JUnit XML (#51)
Browse files Browse the repository at this point in the history
* Add test-report check - capture metrics from JUnit XML
* Add pass rate, refine example.
  • Loading branch information
jonog authored Sep 8, 2016
1 parent 8df5227 commit 5a6a85c
Show file tree
Hide file tree
Showing 5 changed files with 345 additions and 0 deletions.
19 changes: 19 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ For monitoring your infrastructure and sending notifications if stuff is not ok.
* *TCP connectivity monitoring* & latency measurement (check type: `tcp`)
* *Execute local commands* & capture output (check type: `command`)
* *Execute remote commands via SSH* & capture output (check type: `remote-command`)
* *Run test suite and capture report metrics* via `JUnit XML` format (check type: `test-report`)

#### Dashboard and Alerts
* Alert notifications available on several channels:
Expand Down Expand Up @@ -370,6 +371,24 @@ Configure servers to monitor & alert settings via `config.json`.
"target": "0"
}
]
},
{
"name": "Run Smoke Tests",
"type": "test-report",
"config": {
"command": "./run-smoke-tests.sh"
},
"send_alerts": [
"stderr"
],
"assertions": [
{
"comparison": "==",
"identifier": "status",
"source": "metadata",
"target": "PASSING"
}
]
}
],
"notifications": [
Expand Down
24 changes: 24 additions & 0 deletions checks/fixtures/sample_junit_failing.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="rspec" tests="9" failures="2" errors="0" time="3.257417" timestamp="2016-09-07T22:50:20+10:00">
<!-- Randomized with seed 47975 -->
<properties/>
<testcase classname="my_spec_fail" name="Test 1 is ..." file="./my_spec_fail.rb" time="0.251198"/>
<testcase classname="my_spec_fail" name="Test 2 is ..." file="./my_spec_fail.rb" time="0.000426">
<failure message="fail" type="RuntimeError">
<![CDATA[fail
./my_spec_fail.rb:24:in `block (3 levels) in <top (required)>']]>
</failure>
</testcase>
<testcase classname="my_spec_fail" name="Test 3 is ..." file="./my_spec_fail.rb" time="0.133883"/>
<testcase classname="my_spec_fail" name="Test 4 is ..." file="./my_spec_fail.rb" time="0.149151"/>
<testcase classname="my_spec_fail" name="Test 5 is ..." file="./my_spec_fail.rb" time="0.168329"/>
<testcase classname="my_spec_fail" name="Test 6 is ..." file="./my_spec_fail.rb" time="0.140436"/>
<testcase classname="my_spec_fail" name="Test 7 is ..." file="./my_spec_fail.rb" time="0.132665"/>
<testcase classname="my_spec_fail" name="Test 8 is ..." file="./my_spec_fail.rb" time="0.000460">
<failure message="fail" type="RuntimeError">
<![CDATA[fail
./my_spec_fail.rb:64:in `block (3 levels) in <top (required)>']]>
</failure>
</testcase>
<testcase classname="my_spec_fail" name="Test 9" file="./my_spec_fail.rb" time="2.279250"/>
</testsuite>
14 changes: 14 additions & 0 deletions checks/fixtures/sample_junit_passing.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="rspec" tests="9" failures="0" errors="0" time="4.179975" timestamp="2016-09-07T22:48:34+10:00">
<!-- Randomized with seed 43628 -->
<properties/>
<testcase classname="my_spec" name="Test 1 is ..." file="./my_spec.rb" time="0.646604"/>
<testcase classname="my_spec" name="Test 2 is ..." file="./my_spec.rb" time="0.144441"/>
<testcase classname="my_spec" name="Test 3 is ..." file="./my_spec.rb" time="0.144228"/>
<testcase classname="my_spec" name="Test 4 is ..." file="./my_spec.rb" time="0.154773"/>
<testcase classname="my_spec" name="Test 5 is ..." file="./my_spec.rb" time="0.174368"/>
<testcase classname="my_spec" name="Test 6 is ..." file="./my_spec.rb" time="0.213206"/>
<testcase classname="my_spec" name="Test 7 is ..." file="./my_spec.rb" time="0.131378"/>
<testcase classname="my_spec" name="Test 8 is ..." file="./my_spec.rb" time="0.310525"/>
<testcase classname="my_spec" name="Test 9 is ..." file="./my_spec.rb" time="2.258925"/>
</testsuite>
147 changes: 147 additions & 0 deletions checks/test_report.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
package checks

import (
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"log"
"os/exec"
"time"

"github.com/jonog/redalert/data"
"github.com/jonog/redalert/utils"
)

func init() {
Register("test-report", NewTestReport)
}

type TestReport struct {
Command string
Shell string
log *log.Logger
}

var TestReportMetrics = map[string]MetricInfo{
"execution_time": {
Unit: "ms",
},
}

type TestReportConfig struct {
Command string `json:"command"`
Shell string `json:"shell"`
}

var NewTestReport = func(config Config, logger *log.Logger) (Checker, error) {
var testReportConfig TestReportConfig
err := json.Unmarshal([]byte(config.Config), &testReportConfig)
if err != nil {
return nil, err
}
if testReportConfig.Command == "" {
return nil, errors.New("command: command to run cannot be blank")
}
if testReportConfig.Shell == "" {
testReportConfig.Shell = "sh"
}
return Checker(&TestReport{
testReportConfig.Command,
testReportConfig.Shell,
logger}), nil
}

func (c *TestReport) Check() (data.CheckResponse, error) {

response := data.CheckResponse{
Metrics: data.Metrics(make(map[string]*float64)),
Metadata: make(map[string]string),
}
executionTime := float64(0)

c.log.Println("Run test-suite via:", c.Command, "using shell:", c.Shell)

startTime := time.Now()
// ignore error here and rely on xml parsing error
out, _ := exec.Command(c.Shell, "-c", c.Command).Output()
response.Response = out
endTime := time.Now()

executionTimeCalc := endTime.Sub(startTime)
executionTime = float64(executionTimeCalc.Seconds() * 1e3)
c.log.Println("Execution Time", utils.White, executionTime, utils.Reset)
response.Metrics["execution_time"] = &executionTime

var testReport Testsuite
xmlErr := xml.Unmarshal(out, &testReport)
if xmlErr != nil {
return response, errors.New("test-suite: invalid junit xml: " + xmlErr.Error())
}
testCount := float64(testReport.Tests)
response.Metrics["test_count"] = &testCount
failureCount := float64(testReport.Failures)
if failureCount > 0 {
response.Metadata["status"] = "FAILING"
} else {
response.Metadata["status"] = "PASSING"
}
response.Metrics["failure_count"] = &failureCount

skippedCountInt := 0
for _, test := range testReport.Testcases {
testCase := *test
if testCase.Skipped != nil {
skippedCountInt++
}
}
skippedCount := float64(skippedCountInt)
response.Metrics["skipped_count"] = &skippedCount

passCount := float64(testCount - failureCount - skippedCount)
response.Metrics["pass_count"] = &passCount

if (passCount + failureCount) > 0 {
passRate := float64(100 * passCount / (passCount + failureCount))
response.Metrics["pass_rate"] = &passRate
} else {
zeroPassRate := float64(0)
response.Metrics["pass_rate"] = &zeroPassRate
}

c.log.Println("Report: ", fmt.Sprintf("%s", out))

return response, nil
}

func (c *TestReport) MetricInfo(metric string) MetricInfo {
return TestReportMetrics[metric]
}

func (c *TestReport) MessageContext() string {
return c.Command
}

type Testsuite struct {
Name string `xml:"name,attr"`
Tests int `xml:"tests,attr"`
Failures int `xml:"failures,attr"`
Errors int `xml:"errors,attr"`
Timestamp string `xml:"timestamp,attr"`
Time float64 `xml:"time,attr"`
Hostname string `xml:"hostname,attr"`
Testcases []*TestCase `xml:"testcase"`
}

type TestCase struct {
Name string `xml:"name,attr"`
Time float64 `xml:"time,attr"`
Classname string `xml:"classname,attr"`
Failure *Failure `xml:"failure"`
Skipped *struct{} `xml:"skipped"`
}

type Failure struct {
Type string `xml:"type,attr"`
Message string `xml:"message,attr"`
}
141 changes: 141 additions & 0 deletions checks/test_report_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
package checks

import (
"encoding/json"
"testing"
)

func testTestReportConfig(cmd string) []byte {
json := `
{
"name": "Smoke Tests",
"type": "test-report",
"config": {
"command": "` + cmd + `"
},
"send_alerts": [
"stderr"
],
"backoff": {
"interval": 10,
"type": "constant"
}
}`
return []byte(json)
}

func TestTestReport_ParseAndInitialise(t *testing.T) {
var config Config
err := json.Unmarshal(testTestReportConfig("./scripts/test"), &config)
if err != nil {
t.Fatalf("error: %#v", err)
}
_, err = New(config, testLog())
if err != nil {
t.Fatalf("error: %#v", err)
}
}

func TestTestReport_Check_PassingTests(t *testing.T) {
var config Config
err := json.Unmarshal(testTestReportConfig("cat fixtures/sample_junit_passing.xml"), &config)
if err != nil {
t.Fatalf("error: %#v", err)
}
checker, err := New(config, testLog())
if err != nil {
t.Fatalf("error: %#v", err)
}
data, err := checker.Check()
if err != nil {
t.Fatalf("error: %#v", err)
}
if data.Metadata["status"] != "PASSING" {
t.Fatalf("expect: %#v, got: %#v", "PASSING", data.Metadata["status"])
}

testCount, ok := data.Metrics["test_count"]
if !ok || testCount == nil {
t.Fatalf("Expected metric test_count does not exist. metrics: %#v", data.Metrics)
}
if *testCount != 9 {
t.Fatalf("Invalid test_count")
}

failureCount, ok := data.Metrics["failure_count"]
if !ok || failureCount == nil {
t.Fatalf("Expected metric failure_count does not exist. metrics: %#v", data.Metrics)
}
if *failureCount != 0 {
t.Fatalf("Invalid failure_count")
}

passCount, ok := data.Metrics["pass_count"]
if !ok || passCount == nil {
t.Fatalf("Expected metric pass_count does not exist. metrics: %#v", data.Metrics)
}
if *passCount != 9 {
t.Fatalf("Invalid pass_count")
}

passRate, ok := data.Metrics["pass_rate"]
if !ok || passRate == nil {
t.Fatalf("Expected metric pass_rate does not exist. metrics: %#v", data.Metrics)
}
if *passRate != 100 {
t.Fatalf("Invalid pass_rate")
}

}

func TestTestReport_Check_FailingTests(t *testing.T) {
var config Config
err := json.Unmarshal(testTestReportConfig("cat fixtures/sample_junit_failing.xml && exit 1"), &config)
if err != nil {
t.Fatalf("error: %#v", err)
}
checker, err := New(config, testLog())
if err != nil {
t.Fatalf("error: %#v", err)
}
data, err := checker.Check()
if err != nil {
t.Fatalf("error: %#v", err)
}
if data.Metadata["status"] != "FAILING" {
t.Fatalf("expect: %#v, got: %#v", "FAILING", data.Metadata["status"])
}

testCount, ok := data.Metrics["test_count"]
if !ok || testCount == nil {
t.Fatalf("Expected metric test_count does not exist. metrics: %#v", data.Metrics)
}
if *testCount != 9 {
t.Fatalf("Invalid test_count")
}

failureCount, ok := data.Metrics["failure_count"]
if !ok || failureCount == nil {
t.Fatalf("Expected metric failure_count does not exist. metrics: %#v", data.Metrics)
}
if *failureCount != 2 {
t.Fatalf("Invalid failure_count")
}

passCount, ok := data.Metrics["pass_count"]
if !ok || passCount == nil {
t.Fatalf("Expected metric pass_count does not exist. metrics: %#v", data.Metrics)
}
if *passCount != 7 {
t.Fatalf("Invalid pass_count")
}

passRate, ok := data.Metrics["pass_rate"]
if !ok || passRate == nil {
t.Fatalf("Expected metric pass_rate does not exist. metrics: %#v", data.Metrics)
}
if *passRate != 100*float64(7)/float64(9) {
t.Fatalf("Invalid pass_rate")
}

}

0 comments on commit 5a6a85c

Please sign in to comment.