diff --git a/README.md b/README.md
index c0c7bbe..d6ef217 100644
--- a/README.md
+++ b/README.md
@@ -18,6 +18,7 @@ For monitoring your infrastructure and sending notifications if stuff is not ok.
* *TCP connectivity monitoring* & latency measurement (check type: `tcp`)
* *Execute local commands* & capture output (check type: `command`)
* *Execute remote commands via SSH* & capture output (check type: `remote-command`)
+* *Run test suite and capture report metrics* via `JUnit XML` format (check type: `test-report`)
#### Dashboard and Alerts
* Alert notifications available on several channels:
@@ -370,6 +371,24 @@ Configure servers to monitor & alert settings via `config.json`.
"target": "0"
}
]
+ },
+ {
+ "name": "Run Smoke Tests",
+ "type": "test-report",
+ "config": {
+ "command": "./run-smoke-tests.sh"
+ },
+ "send_alerts": [
+ "stderr"
+ ],
+ "assertions": [
+ {
+ "comparison": "==",
+ "identifier": "status",
+ "source": "metadata",
+ "target": "PASSING"
+ }
+ ]
}
],
"notifications": [
diff --git a/checks/fixtures/sample_junit_failing.xml b/checks/fixtures/sample_junit_failing.xml
new file mode 100644
index 0000000..af21321
--- /dev/null
+++ b/checks/fixtures/sample_junit_failing.xml
@@ -0,0 +1,24 @@
+
+
+
+
+
+
+
+ ']]>
+
+
+
+
+
+
+
+
+
+ ']]>
+
+
+
+
diff --git a/checks/fixtures/sample_junit_passing.xml b/checks/fixtures/sample_junit_passing.xml
new file mode 100644
index 0000000..a6b93ed
--- /dev/null
+++ b/checks/fixtures/sample_junit_passing.xml
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/checks/test_report.go b/checks/test_report.go
new file mode 100644
index 0000000..19975dc
--- /dev/null
+++ b/checks/test_report.go
@@ -0,0 +1,147 @@
+package checks
+
+import (
+ "encoding/json"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "log"
+ "os/exec"
+ "time"
+
+ "github.com/jonog/redalert/data"
+ "github.com/jonog/redalert/utils"
+)
+
+func init() {
+ Register("test-report", NewTestReport)
+}
+
+type TestReport struct {
+ Command string
+ Shell string
+ log *log.Logger
+}
+
+var TestReportMetrics = map[string]MetricInfo{
+ "execution_time": {
+ Unit: "ms",
+ },
+}
+
+type TestReportConfig struct {
+ Command string `json:"command"`
+ Shell string `json:"shell"`
+}
+
+var NewTestReport = func(config Config, logger *log.Logger) (Checker, error) {
+ var testReportConfig TestReportConfig
+ err := json.Unmarshal([]byte(config.Config), &testReportConfig)
+ if err != nil {
+ return nil, err
+ }
+ if testReportConfig.Command == "" {
+ return nil, errors.New("command: command to run cannot be blank")
+ }
+ if testReportConfig.Shell == "" {
+ testReportConfig.Shell = "sh"
+ }
+ return Checker(&TestReport{
+ testReportConfig.Command,
+ testReportConfig.Shell,
+ logger}), nil
+}
+
+func (c *TestReport) Check() (data.CheckResponse, error) {
+
+ response := data.CheckResponse{
+ Metrics: data.Metrics(make(map[string]*float64)),
+ Metadata: make(map[string]string),
+ }
+ executionTime := float64(0)
+
+ c.log.Println("Run test-suite via:", c.Command, "using shell:", c.Shell)
+
+ startTime := time.Now()
+ // ignore error here and rely on xml parsing error
+ out, _ := exec.Command(c.Shell, "-c", c.Command).Output()
+ response.Response = out
+ endTime := time.Now()
+
+ executionTimeCalc := endTime.Sub(startTime)
+ executionTime = float64(executionTimeCalc.Seconds() * 1e3)
+ c.log.Println("Execution Time", utils.White, executionTime, utils.Reset)
+ response.Metrics["execution_time"] = &executionTime
+
+ var testReport Testsuite
+ xmlErr := xml.Unmarshal(out, &testReport)
+ if xmlErr != nil {
+ return response, errors.New("test-suite: invalid junit xml: " + xmlErr.Error())
+ }
+ testCount := float64(testReport.Tests)
+ response.Metrics["test_count"] = &testCount
+ failureCount := float64(testReport.Failures)
+ if failureCount > 0 {
+ response.Metadata["status"] = "FAILING"
+ } else {
+ response.Metadata["status"] = "PASSING"
+ }
+ response.Metrics["failure_count"] = &failureCount
+
+ skippedCountInt := 0
+ for _, test := range testReport.Testcases {
+ testCase := *test
+ if testCase.Skipped != nil {
+ skippedCountInt++
+ }
+ }
+ skippedCount := float64(skippedCountInt)
+ response.Metrics["skipped_count"] = &skippedCount
+
+ passCount := float64(testCount - failureCount - skippedCount)
+ response.Metrics["pass_count"] = &passCount
+
+ if (passCount + failureCount) > 0 {
+ passRate := float64(100 * passCount / (passCount + failureCount))
+ response.Metrics["pass_rate"] = &passRate
+ } else {
+ zeroPassRate := float64(0)
+ response.Metrics["pass_rate"] = &zeroPassRate
+ }
+
+ c.log.Println("Report: ", fmt.Sprintf("%s", out))
+
+ return response, nil
+}
+
+func (c *TestReport) MetricInfo(metric string) MetricInfo {
+ return TestReportMetrics[metric]
+}
+
+func (c *TestReport) MessageContext() string {
+ return c.Command
+}
+
+type Testsuite struct {
+ Name string `xml:"name,attr"`
+ Tests int `xml:"tests,attr"`
+ Failures int `xml:"failures,attr"`
+ Errors int `xml:"errors,attr"`
+ Timestamp string `xml:"timestamp,attr"`
+ Time float64 `xml:"time,attr"`
+ Hostname string `xml:"hostname,attr"`
+ Testcases []*TestCase `xml:"testcase"`
+}
+
+type TestCase struct {
+ Name string `xml:"name,attr"`
+ Time float64 `xml:"time,attr"`
+ Classname string `xml:"classname,attr"`
+ Failure *Failure `xml:"failure"`
+ Skipped *struct{} `xml:"skipped"`
+}
+
+type Failure struct {
+ Type string `xml:"type,attr"`
+ Message string `xml:"message,attr"`
+}
diff --git a/checks/test_report_test.go b/checks/test_report_test.go
new file mode 100644
index 0000000..3918fb5
--- /dev/null
+++ b/checks/test_report_test.go
@@ -0,0 +1,141 @@
+package checks
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func testTestReportConfig(cmd string) []byte {
+ json := `
+ {
+ "name": "Smoke Tests",
+ "type": "test-report",
+ "config": {
+ "command": "` + cmd + `"
+ },
+ "send_alerts": [
+ "stderr"
+ ],
+ "backoff": {
+ "interval": 10,
+ "type": "constant"
+ }
+ }`
+ return []byte(json)
+}
+
+func TestTestReport_ParseAndInitialise(t *testing.T) {
+ var config Config
+ err := json.Unmarshal(testTestReportConfig("./scripts/test"), &config)
+ if err != nil {
+ t.Fatalf("error: %#v", err)
+ }
+ _, err = New(config, testLog())
+ if err != nil {
+ t.Fatalf("error: %#v", err)
+ }
+}
+
+func TestTestReport_Check_PassingTests(t *testing.T) {
+ var config Config
+ err := json.Unmarshal(testTestReportConfig("cat fixtures/sample_junit_passing.xml"), &config)
+ if err != nil {
+ t.Fatalf("error: %#v", err)
+ }
+ checker, err := New(config, testLog())
+ if err != nil {
+ t.Fatalf("error: %#v", err)
+ }
+ data, err := checker.Check()
+ if err != nil {
+ t.Fatalf("error: %#v", err)
+ }
+ if data.Metadata["status"] != "PASSING" {
+ t.Fatalf("expect: %#v, got: %#v", "PASSING", data.Metadata["status"])
+ }
+
+ testCount, ok := data.Metrics["test_count"]
+ if !ok || testCount == nil {
+ t.Fatalf("Expected metric test_count does not exist. metrics: %#v", data.Metrics)
+ }
+ if *testCount != 9 {
+ t.Fatalf("Invalid test_count")
+ }
+
+ failureCount, ok := data.Metrics["failure_count"]
+ if !ok || failureCount == nil {
+ t.Fatalf("Expected metric failure_count does not exist. metrics: %#v", data.Metrics)
+ }
+ if *failureCount != 0 {
+ t.Fatalf("Invalid failure_count")
+ }
+
+ passCount, ok := data.Metrics["pass_count"]
+ if !ok || passCount == nil {
+ t.Fatalf("Expected metric pass_count does not exist. metrics: %#v", data.Metrics)
+ }
+ if *passCount != 9 {
+ t.Fatalf("Invalid pass_count")
+ }
+
+ passRate, ok := data.Metrics["pass_rate"]
+ if !ok || passRate == nil {
+ t.Fatalf("Expected metric pass_rate does not exist. metrics: %#v", data.Metrics)
+ }
+ if *passRate != 100 {
+ t.Fatalf("Invalid pass_rate")
+ }
+
+}
+
+func TestTestReport_Check_FailingTests(t *testing.T) {
+ var config Config
+ err := json.Unmarshal(testTestReportConfig("cat fixtures/sample_junit_failing.xml && exit 1"), &config)
+ if err != nil {
+ t.Fatalf("error: %#v", err)
+ }
+ checker, err := New(config, testLog())
+ if err != nil {
+ t.Fatalf("error: %#v", err)
+ }
+ data, err := checker.Check()
+ if err != nil {
+ t.Fatalf("error: %#v", err)
+ }
+ if data.Metadata["status"] != "FAILING" {
+ t.Fatalf("expect: %#v, got: %#v", "FAILING", data.Metadata["status"])
+ }
+
+ testCount, ok := data.Metrics["test_count"]
+ if !ok || testCount == nil {
+ t.Fatalf("Expected metric test_count does not exist. metrics: %#v", data.Metrics)
+ }
+ if *testCount != 9 {
+ t.Fatalf("Invalid test_count")
+ }
+
+ failureCount, ok := data.Metrics["failure_count"]
+ if !ok || failureCount == nil {
+ t.Fatalf("Expected metric failure_count does not exist. metrics: %#v", data.Metrics)
+ }
+ if *failureCount != 2 {
+ t.Fatalf("Invalid failure_count")
+ }
+
+ passCount, ok := data.Metrics["pass_count"]
+ if !ok || passCount == nil {
+ t.Fatalf("Expected metric pass_count does not exist. metrics: %#v", data.Metrics)
+ }
+ if *passCount != 7 {
+ t.Fatalf("Invalid pass_count")
+ }
+
+ passRate, ok := data.Metrics["pass_rate"]
+ if !ok || passRate == nil {
+ t.Fatalf("Expected metric pass_rate does not exist. metrics: %#v", data.Metrics)
+ }
+ if *passRate != 100*float64(7)/float64(9) {
+ t.Fatalf("Invalid pass_rate")
+ }
+
+}